diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 0000000000..e00ce3d698
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,2 @@
+github_checks:
+ annotations: false
diff --git a/.githooks/README.md b/.githooks/README.md
new file mode 100644
index 0000000000..c2de4599ff
--- /dev/null
+++ b/.githooks/README.md
@@ -0,0 +1,8 @@
+# Git Hooks
+
+This directory contains useful Git hooks for working with go-libp2p.
+
+Install them by running
+```bash
+git config core.hooksPath .githooks
+```
diff --git a/.githooks/pre-commit b/.githooks/pre-commit
new file mode 100755
index 0000000000..27af2ba8e8
--- /dev/null
+++ b/.githooks/pre-commit
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+pushd ./test-plans > /dev/null
+go mod tidy
+if [[ -n $(git diff --name-only -- "go.mod" "go.sum") ]]; then
+ echo "go.mod / go.sum in test-plans not tidied"
+ errored=true
+fi
+popd > /dev/null
+
+if [ "$errored" = true ]; then
+ exit 1
+fi
diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
new file mode 100644
index 0000000000..47facff2c3
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -0,0 +1,20 @@
+---
+name: 'Bug Report'
+about: 'Report a bug in go-libp2p.'
+labels: bug
+---
+
+
+
+
+Version Information
+
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000..1ba3fd8360
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,8 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Technical Questions
+ url: https://github.com/libp2p/go-libp2p/discussions/new?category=q-a
+ about: Please ask technical questions in the go-libp2p Github Discusions forum.
+ - name: Community-wide libp2p Discussion
+ url: https://discuss.libp2p.io
+ about: Discussions and questions about the libp2p community.
diff --git a/.github/ISSUE_TEMPLATE/doc.md b/.github/ISSUE_TEMPLATE/doc.md
new file mode 100644
index 0000000000..5a0d198218
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/doc.md
@@ -0,0 +1,13 @@
+---
+name: 'Documentation Issue'
+about: 'Report missing/erroneous documentation, propose new documentation, report broken links, etc.'
+labels: documentation
+---
+
+#### Location
+
+
+
+#### Description
+
+
diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md
new file mode 100644
index 0000000000..517b84fa48
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/enhancement.md
@@ -0,0 +1,11 @@
+---
+name: 'Enhancement'
+about: 'Suggest an improvement to an existing go-libp2p feature.'
+labels: enhancement
+---
+
+
diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md
new file mode 100644
index 0000000000..2dc1ad986e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature.md
@@ -0,0 +1,15 @@
+---
+name: 'Feature'
+about: 'Suggest a new feature in go-libp2p.'
+labels: feature
+---
+
+
diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md
new file mode 100644
index 0000000000..97d8ca5faf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/question.md
@@ -0,0 +1,9 @@
+---
+name: 'Question/Support'
+about: 'Ask a question about go-libp2p or request support.'
+labels: question, invalid
+---
+
+This bug tracker is only for actionable bug reports and feature requests. Please direct any questions to https://discuss.libp2p.io or to our Matrix (#libp2p:matrix.org) or IRC (#libp2p on freenode) channels.
+
+If you don't get an immediate response, please keep trying.
diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md
new file mode 100644
index 0000000000..5326b7069a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/release.md
@@ -0,0 +1,34 @@
+---
+name: 'Libp2p Release'
+about: 'Start a new libp2p release.'
+---
+
+## ๐บ What's left for release
+
+
+
+## ๐ฆ Highlights
+
+< top highlights for this release notes >
+
+## Changelog
+
+< changelog generated by scripts/mkreleaselog >
+
+## โ
Release Checklist
+
+- [ ] **Stage 0 - Finishing Touches**
+ - [ ] Go through relevant libp2p repos looking for unreleased changes that should make it into the release. If you find any, cut releases.
+ - [ ] Run `go get -u ./...` to see if there are any out-of-date deps that look important. If there are, bubble them. Try to avoid _directly_ updating indirect deps in go-libp2p's `go.mod` when possible.
+- [ ] **Stage 1 - Release**
+ - [ ] Publish the release through the GitHub UI, adding the release notes. Some users rely on this to receive notifications of new releases.
+ - [ ] Announce the release on the [discuss.libp2p.io](https://discuss.libp2p.io).
+- [ ] **Stage 2 - Update Upstream**
+ - [ ] Update the examples to the final release
+ - [ ] Update the upstream dependencies to the final release and create PRs.
+ - [ ] [filecoin-project/lotus](https://github.com/filecoin-project/lotus)
+ - [ ] [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht/)
+ - [ ] [go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub) (In case of breaking changes.)
+ - [ ] [ipfs/kubo](https://github.com/ipfs/kubo)
+ - [ ] Add new release to interop tester in [test-plans](https://github.com/libp2p/test-plans/)
+- [ ] Make required changes to the release process.
diff --git a/.github/actions/go-check-setup/action.yml b/.github/actions/go-check-setup/action.yml
new file mode 100644
index 0000000000..e674678db5
--- /dev/null
+++ b/.github/actions/go-check-setup/action.yml
@@ -0,0 +1,11 @@
+runs:
+ using: "composite"
+ steps:
+ - name: Install Protoc
+ uses: trail-of-forks/setup-protoc@a97892a429d98fae78d26f40334ab7eb616d08b9 # include https://github.com/arduino/setup-protoc/pull/58
+ with:
+ version: '21.12'
+ repo-token: ${{ github.token }}
+ - name: Install Protobuf compiler
+ shell: bash
+ run: go install google.golang.org/protobuf/cmd/protoc-gen-go
diff --git a/.github/actions/go-test-setup/action.yml b/.github/actions/go-test-setup/action.yml
new file mode 100644
index 0000000000..1836ae8dc3
--- /dev/null
+++ b/.github/actions/go-test-setup/action.yml
@@ -0,0 +1,21 @@
+runs:
+ using: "composite"
+ steps:
+ - name: increase the UDP receive buffer size # see https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+ shell: bash
+ run: sysctl -w net.core.rmem_max=2500000
+ if: ${{ matrix.os == 'ubuntu' }}
+ - name: Run nocover tests. These are tests that require the coverage analysis to be off # See https://github.com/protocol/.github/issues/460
+ shell: bash
+ # This matches only tests with "NoCover" in their test name to avoid running all tests again.
+ run: go test -tags nocover -run NoCover -v ./...
+ - name: Run synctests tests. These are tests that require go 1.24 and the experimental testing/synctest package
+ shell: bash
+ if: ${{ contains(matrix.go, '1.24') }}
+ run: go test -tags goexperiment.synctest -run "_synctest$" -v ./...
+ - name: Install testing tools
+ shell: bash
+ run: cd scripts/test_analysis && go install ./cmd/gotest2sql
+ - name: Install test_analysis
+ shell: bash
+ run: cd scripts/test_analysis && go install .
diff --git a/.github/workflows/generated-pr.yml b/.github/workflows/generated-pr.yml
new file mode 100644
index 0000000000..b8c5cc6311
--- /dev/null
+++ b/.github/workflows/generated-pr.yml
@@ -0,0 +1,14 @@
+name: Close Generated PRs
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+ workflow_dispatch:
+
+permissions:
+ issues: write
+ pull-requests: write
+
+jobs:
+ stale:
+ uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1
diff --git a/.github/workflows/go-check-config.json b/.github/workflows/go-check-config.json
new file mode 100644
index 0000000000..4b37308d50
--- /dev/null
+++ b/.github/workflows/go-check-config.json
@@ -0,0 +1,3 @@
+{
+ "gogenerate": true
+}
diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml
new file mode 100644
index 0000000000..8de0c4c487
--- /dev/null
+++ b/.github/workflows/go-check.yml
@@ -0,0 +1,21 @@
+name: Go Checks
+
+on:
+ pull_request:
+ push:
+ branches: ["master", "release-v0[0-9][0-9]"]
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ go-check:
+ uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0
+ with:
+ go-version: "1.25.x"
+ go-generate-ignore-protoc-version-comments: true
diff --git a/.github/workflows/go-test-config.json b/.github/workflows/go-test-config.json
new file mode 100644
index 0000000000..699fa72ea1
--- /dev/null
+++ b/.github/workflows/go-test-config.json
@@ -0,0 +1,3 @@
+{
+ "skip32bit": true
+}
diff --git a/.github/workflows/go-test-template.yml b/.github/workflows/go-test-template.yml
new file mode 100644
index 0000000000..f79719ec4b
--- /dev/null
+++ b/.github/workflows/go-test-template.yml
@@ -0,0 +1,154 @@
+name: Go Test
+on:
+ workflow_call:
+ inputs:
+ go-versions:
+ required: false
+ type: string
+ default: '["this", "next"]'
+ secrets:
+ CODECOV_TOKEN:
+ required: false
+
+defaults:
+ run:
+ shell: bash
+
+jobs:
+ unit:
+ strategy:
+ fail-fast: false
+ matrix:
+ os: ["ubuntu", "macos", "windows"]
+ go: ${{ fromJSON(inputs.go-versions) }}
+ env:
+ GOTESTFLAGS: -cover -coverprofile=module-coverage.txt -coverpkg=./...
+ GO386FLAGS: ""
+ GORACEFLAGS: ""
+ runs-on: ${{ fromJSON(vars[format('UCI_GO_TEST_RUNNER_{0}', matrix.os)] || format('"{0}-latest"', matrix.os)) }}
+ name: ${{ matrix.os }} (go ${{ matrix.go }})
+ steps:
+ - name: Use msys2 on windows
+ if: matrix.os == 'windows'
+ # The executable for msys2 is also called bash.cmd
+ # https://github.com/actions/virtual-environments/blob/main/images/win/Windows2019-Readme.md#shells
+ # If we prepend its location to the PATH
+ # subsequent 'shell: bash' steps will use msys2 instead of gitbash
+ run: echo "C:/msys64/usr/bin" >> $GITHUB_PATH
+ - name: Check out the repository
+ uses: actions/checkout@v4
+ with:
+ submodules: recursive
+ - name: Check out the latest stable version of Go
+ uses: actions/setup-go@v5
+ with:
+ go-version: stable
+ cache: ${{ matrix.os != 'windows' }} # Windows VMs are slow to use caching. Can add ~15m to the job
+ - name: Read the Unified GitHub Workflows configuration
+ id: config
+ uses: ipdxco/unified-github-workflows/.github/actions/read-config@main
+ - name: Read the go.mod file
+ id: go-mod
+ uses: ipdxco/unified-github-workflows/.github/actions/read-go-mod@main
+ - name: Determine the Go version to use based on the go.mod file
+ id: go
+ env:
+ MATRIX_GO: ${{ matrix.go }}
+ GO_MOD_VERSION: ${{ fromJSON(steps.go-mod.outputs.json).Go }}
+ run: |
+ if [[ "$MATRIX_GO" == "this" ]]; then
+ echo "version=$GO_MOD_VERSION.x" >> $GITHUB_OUTPUT
+ elif [[ "$MATRIX_GO" == "next" ]]; then
+ MAJOR="${GO_MOD_VERSION%.[0-9]*}"
+ MINOR="${GO_MOD_VERSION#[0-9]*.}"
+ echo "version=$MAJOR.$(($MINOR+1)).x" >> $GITHUB_OUTPUT
+ elif [[ "$MATRIX_GO" == "prev" ]]; then
+ MAJOR="${GO_MOD_VERSION%.[0-9]*}"
+ MINOR="${GO_MOD_VERSION#[0-9]*.}"
+ echo "version=$MAJOR.$(($MINOR-1)).x" >> $GITHUB_OUTPUT
+ else
+ echo "version=$MATRIX_GO" >> $GITHUB_OUTPUT
+ fi
+ - name: Enable shuffle flag for go test command
+ if: toJSON(fromJSON(steps.config.outputs.json).shuffle) != 'false'
+ run: |
+ echo "GOTESTFLAGS=-shuffle=on $GOTESTFLAGS" >> $GITHUB_ENV
+ echo "GO386FLAGS=-shuffle=on $GO386FLAGS" >> $GITHUB_ENV
+ echo "GORACEFLAGS=-shuffle=on $GORACEFLAGS" >> $GITHUB_ENV
+ - name: Enable verbose flag for go test command
+ if: toJSON(fromJSON(steps.config.outputs.json).verbose) != 'false'
+ run: |
+ echo "GOTESTFLAGS=-v $GOTESTFLAGS" >> $GITHUB_ENV
+ echo "GO386FLAGS=-v $GO386FLAGS" >> $GITHUB_ENV
+ echo "GORACEFLAGS=-v $GORACEFLAGS" >> $GITHUB_ENV
+ - name: Set extra flags for go test command
+ if: fromJSON(steps.config.outputs.json).gotestflags != ''
+ run: |
+ echo "GOTESTFLAGS=${{ fromJSON(steps.config.outputs.json).gotestflags }} $GOTESTFLAGS" >> $GITHUB_ENV
+ - name: Set extra flags for go test race command
+ if: fromJSON(steps.config.outputs.json).goraceflags != ''
+ run: |
+ echo "GORACEFLAGS=${{ fromJSON(steps.config.outputs.json).goraceflags }} $GORACEFLAGS" >> $GITHUB_ENV
+ - name: Set up the Go version read from the go.mod file
+ uses: actions/setup-go@v5
+ with:
+ go-version: ${{ steps.go.outputs.version }}
+ cache: ${{ matrix.os != 'windows' }} # Windows VMs are slow to use caching. Can add ~15m to the job
+ - name: Display the Go version and environment
+ run: |
+ go version
+ go env
+ - name: Run repo-specific setup
+ uses: ./.github/actions/go-test-setup
+ if: hashFiles('./.github/actions/go-test-setup') != ''
+ - name: Run tests
+ id: test
+ if: contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false
+ uses: protocol/multiple-go-modules@v1.4
+ with:
+ run: test_analysis ${{ env.GOTESTFLAGS }}
+ - name: Upload test results
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{ matrix.os }}_${{ matrix.go }}_test_results.db
+ path: ./test_results.db
+ - name: Add failure summary
+ if: always()
+ run: |
+ echo "### Failure Summary" >> $GITHUB_STEP_SUMMARY
+ test_analysis summarize >> $GITHUB_STEP_SUMMARY
+ - name: Remove test results
+ run: rm ./test_results.db
+ - name: Run tests with race detector
+ # speed things up. Windows and OSX VMs are slow
+ if: matrix.os == 'ubuntu' &&
+ fromJSON(steps.config.outputs.json).skipRace != true &&
+ contains(fromJSON(steps.config.outputs.json).skipOSes, matrix.os) == false
+ uses: protocol/multiple-go-modules@v1.4
+ id: race
+ with:
+ run: test_analysis -race ${{ env.GORACEFLAGS }} ./...
+ - name: Upload test results (Race)
+ if: (steps.race.conclusion == 'success' || steps.race.conclusion == 'failure')
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{ matrix.os }}_${{ matrix.go }}_test_results_race.db
+ path: ./test_results.db
+ - name: Add failure summary
+ if: (steps.race.conclusion == 'success' || steps.race.conclusion == 'failure')
+ run: |
+ echo "# Tests with race detector failure summary" >> $GITHUB_STEP_SUMMARY
+ test_analysis summarize >> $GITHUB_STEP_SUMMARY
+ - name: Adding Link to Run Analysis
+ run: echo "### [Test flakiness analysis](https://observablehq.com/d/d74435ea5bbf24c7?run-id=$GITHUB_RUN_ID)" >> $GITHUB_STEP_SUMMARY
+ - name: Collect coverage files
+ id: coverages
+ run: echo "files=$(find . -type f -name 'module-coverage.txt' | tr -s '\n' ',' | sed 's/,$//')" >> $GITHUB_OUTPUT
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab # v4.1.0
+ with:
+ files: ${{ steps.coverages.outputs.files }}
+ env_vars: OS=${{ matrix.os }}, GO=${{ steps.go.outputs.version }}
+ token: ${{ secrets.CODECOV_TOKEN }}
+ fail_ci_if_error: false
diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml
new file mode 100644
index 0000000000..0c5cd4f96c
--- /dev/null
+++ b/.github/workflows/go-test.yml
@@ -0,0 +1,22 @@
+name: Go Test
+
+on:
+ pull_request:
+ push:
+ branches: ["master", "release-v0[0-9][0-9]"]
+ workflow_dispatch:
+
+permissions:
+ contents: read
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ go-test:
+ uses: ./.github/workflows/go-test-template.yml
+ with:
+ go-versions: '["1.24.x", "1.25.x"]'
+ secrets:
+ CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.github/workflows/interop-test.yml b/.github/workflows/interop-test.yml
new file mode 100644
index 0000000000..1c5be0a5c4
--- /dev/null
+++ b/.github/workflows/interop-test.yml
@@ -0,0 +1,42 @@
+name: Interoperability Testing
+
+on:
+ workflow_dispatch:
+ pull_request:
+ paths:
+ - 'config/**'
+ - 'core/**'
+ - 'internal/**'
+ - 'p2p/**'
+ - 'test-plans/**'
+ - '.github/workflows/interop-test.yml'
+ push:
+ branches:
+ - "master"
+ paths:
+ - 'config/**'
+ - 'core/**'
+ - 'internal/**'
+ - 'p2p/**'
+ - 'test-plans/**'
+
+jobs:
+ run-transport-interop:
+ name: Run transport interoperability tests
+ runs-on: ${{ fromJSON(vars['INTEROP_TEST_RUNNER_UBUNTU'] || '"ubuntu-22.04"') }}
+ steps:
+ - name: Free Disk Space (Ubuntu)
+ if: vars['INTEROP_TEST_RUNNER_UBUNTU'] == ''
+ uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
+ with:
+ tool-cache: true
+ - uses: actions/checkout@v4
+ - name: Build image
+ run: docker build -t go-libp2p-head -f test-plans/PingDockerfile .
+ - uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
+ with:
+ test-filter: go-libp2p-head
+ extra-versions: ${{ github.workspace }}/test-plans/ping-version.json
+ s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
+ s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
+ s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml
new file mode 100644
index 0000000000..cd006dd51d
--- /dev/null
+++ b/.github/workflows/link-check.yml
@@ -0,0 +1,17 @@
+name: Markdown Link Checking
+on:
+ pull_request:
+ push:
+ branches:
+ - "master"
+
+jobs:
+ check-links:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: tcort/github-action-markdown-link-check@v1
+ with:
+ use-quiet-mode: 'yes' # show only broken links
+ use-verbose-mode: 'yes'
+ config-file: .github/workflows/markdown-links-config.json # for removing any false positives
diff --git a/.github/workflows/markdown-links-config.json b/.github/workflows/markdown-links-config.json
new file mode 100644
index 0000000000..505831a198
--- /dev/null
+++ b/.github/workflows/markdown-links-config.json
@@ -0,0 +1,22 @@
+{
+ "ignorePatterns": [
+ {
+ "pattern": "^http://localhost"
+ },
+ {
+ "pattern": "^https://twitter.com/"
+ },
+ {
+ "pattern": "^https://opensource.org/"
+ }
+ ],
+ "aliveStatusCodes": [200],
+ "httpHeaders": [
+ {
+ "urls": ["https://docs.github.com/"],
+ "headers": {
+ "Accept-Encoding": "*"
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.github/workflows/release-check.yml b/.github/workflows/release-check.yml
new file mode 100644
index 0000000000..0b5ff6070f
--- /dev/null
+++ b/.github/workflows/release-check.yml
@@ -0,0 +1,19 @@
+name: Release Checker
+
+on:
+ pull_request_target:
+ paths: [ 'version.json' ]
+ types: [ opened, synchronize, reopened, labeled, unlabeled ]
+ workflow_dispatch:
+
+permissions:
+ contents: write
+ pull-requests: write
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ release-check:
+ uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0
diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml
new file mode 100644
index 0000000000..2ebdbed31a
--- /dev/null
+++ b/.github/workflows/releaser.yml
@@ -0,0 +1,17 @@
+name: Releaser
+
+on:
+ push:
+ paths: [ 'version.json' ]
+ workflow_dispatch:
+
+permissions:
+ contents: write
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.sha }}
+ cancel-in-progress: true
+
+jobs:
+ releaser:
+ uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 0000000000..7c955c4143
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,14 @@
+name: Close Stale Issues
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+ workflow_dispatch:
+
+permissions:
+ issues: write
+ pull-requests: write
+
+jobs:
+ stale:
+ uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1
diff --git a/.github/workflows/tagpush.yml b/.github/workflows/tagpush.yml
new file mode 100644
index 0000000000..5ef3fb9ede
--- /dev/null
+++ b/.github/workflows/tagpush.yml
@@ -0,0 +1,18 @@
+name: Tag Push Checker
+
+on:
+ push:
+ tags:
+ - v*
+
+permissions:
+ contents: read
+ issues: write
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ releaser:
+ uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0
diff --git a/.github/workflows/upstream.yml b/.github/workflows/upstream.yml
new file mode 100644
index 0000000000..a9c005de88
--- /dev/null
+++ b/.github/workflows/upstream.yml
@@ -0,0 +1,59 @@
+on:
+ pull_request:
+ branches:
+ - master
+ push:
+ branches:
+ - master
+name: Upstream Test
+
+jobs:
+ unit:
+ strategy:
+ fail-fast: false
+ matrix:
+ os:
+ - 'ubuntu'
+ go:
+ # - '1.15.x'
+ - '1.16.x'
+ upstream:
+ # - 'libp2p/go-libp2p-pubsub' flaky
+ # - 'ipfs/go-bitswap' flaky
+ # - 'libp2p/go-libp2p-kad-dht'
+ - 'libp2p/go-libp2p-daemon'
+ runs-on: ${{ matrix.os }}-latest
+ name: ${{ matrix.upstream }} unit tests (${{ matrix.os }}, Go ${{ matrix.go }})
+ steps:
+ - uses: actions/setup-go@v2
+ with:
+ go-version: ${{ matrix.go }}
+ - name: Go information
+ run: |
+ go version
+ go env
+ - uses: actions/checkout@v4
+ with:
+ path: 'libp2p'
+ - uses: actions/checkout@v4
+ with:
+ repository: ${{ matrix.upstream }}
+ path: upstream
+ - name: Patch in new go-libp2p
+ working-directory: upstream
+ run: |
+ go mod edit -replace "github.com/libp2p/go-libp2p=../libp2p"
+ go mod tidy
+ - name: Run tests
+ working-directory: upstream
+ run: go test -v ./...
+ - name: Run tests (32 bit)
+ working-directory: upstream
+ if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX.
+ env:
+ GOARCH: 386
+ run: go test -v ./...
+ - name: Run tests with race detector
+ working-directory: upstream
+ if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow
+ run: go test -v -race ./...
diff --git a/.gitignore b/.gitignore
index 895adf1394..64c6d853dd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,6 @@
*.swp
-examples/echo/echo
-examples/multicodecs/multicodecs
-.idea
\ No newline at end of file
+.idea
+*.qlog
+*.sqlog
+*.qlog.zst
+*.sqlog.zst
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000000..13697f72b6
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,27 @@
+version: "2"
+
+run:
+ timeout: 5m
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+linters:
+ enable:
+ - revive
+ - unused
+ - prealloc
+ disable:
+ - errcheck
+ - staticcheck
+
+ settings:
+ revive:
+ severity: warning
+ rules:
+ - name: unused-parameter
+ severity: warning
+
+severity:
+ default: warning
diff --git a/.gx/lastpubver b/.gx/lastpubver
deleted file mode 100644
index 23914f7206..0000000000
--- a/.gx/lastpubver
+++ /dev/null
@@ -1 +0,0 @@
-5.0.17: QmWsV6kzPaYGBDVyuUfWBvyQygEc9Qrv9vzo8vZ7X4mdLN
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 7be88a9369..0000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-os:
- - linux
- - osx
-
-sudo: false
-
-language: go
-
-go:
- - 1.9.x
-
-install:
- - make deps-protocol-muxing
-
-script:
- - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh)
-
-cache:
- directories:
- - $GOPATH/src/gx
-
-notifications:
- email: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000..b6ab14c177
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,293 @@
+# Table Of Contents
+- [v0.28.0](#v0280)
+- [v0.27.0](#v0270)
+- [v0.26.4](#v0264)
+- [v0.26.3](#v0263)
+- [v0.26.2](#v0262)
+- [v0.26.1](#v0261)
+- [v0.26.0](#v0260)
+- [v0.25.1](#v0251)
+- [v0.25.0](#v0250)
+
+# [v0.28.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.28.0)
+
+## ๐ฆ Highlights
+
+### Smart Dialing
+
+This release introduces smart dialing logic. Currently, libp2p dials all addresses of a remote peer in parallel, and
+aborts all outstanding dials as soon as the first one succeeds.
+Dialing many addresses in parallel creates a lot of churn on the client side, and unnecessary load on the network and
+on the server side, and is heavily discouraged by the networking community (see [RFC 8305](https://www.rfc-editor.org/rfc/rfc8305) for example).
+
+When connecting to a peer we first determine the order to dial its addresses. This ranking logic considers a number of corner cases
+described in detail in the documentation of the swarm package (`swarm.DefaultDialRanker`).
+At a high level, this is what happens:
+* If a peer offers a WebTransport and a QUIC address (on the same IP:port), the QUIC address is preferred.
+* If a peer has a QUIC and a TCP address, the QUIC address is dialed first. Only if the connection attempt doesn't succeed within 250ms, a TCP connection is started.
+
+Our measurements on the IPFS network show that for >90% of established libp2p connections, the first connection attempt succeeds,
+leading a dramatic decrease in the number of aborted connection attempts.
+
+We also added new metrics to the swarm Grafana dashboard, showing:
+* The number of connection attempts it took to establish a connection
+* The delay introduced by the ranking logic
+
+This feature should be safe to enable for nodes running in data centers and for most nodes in home networks.
+However, there are some (mostly home and corporate networks) that block all UDP traffic. If enabled, the current implementation
+of the smart dialing logic will lead to a regression, since it preferes QUIC addresses over TCP addresses. Nodes would still be
+able to connect, but connection establishment of the TCP connection would be delayed by 250ms.
+
+In a future release (see #1605 for details), we will introduce a feature called blackhole detection. By observing the outcome of
+QUIC connection attempts, we can determine if UDP traffic is blocked (namely, if all QUIC connection attempts fail), and stop
+dialing QUIC in this case altogether. Once this detection logic is in place, smart dialing will be enabled by default.
+
+### More Metrics!
+Since the last release, we've added metrics for:
+* [Holepunching](https://github.com/libp2p/go-libp2p/pull/2246)
+* Smart Dialing (see above)
+
+### WebTransport
+* [#2251](https://github.com/libp2p/go-libp2p/pull/2251): Infer public WebTransport address from `quic-v1` addresses if both transports are using the same port for both quic-v1 and WebTransport addresses.
+* [#2271](https://github.com/libp2p/go-libp2p/pull/2271): Only add certificate hashes to WebTransport mulitaddress if listening on WebTransport
+
+## Housekeeping updates
+* Identify
+ * [#2303](https://github.com/libp2p/go-libp2p/pull/2303): Don't send default protocol version
+ * Prevent polluting PeerStore with local addrs
+ * [#2325](https://github.com/libp2p/go-libp2p/pull/2325): Don't save signed peer records
+ * [#2300](https://github.com/libp2p/go-libp2p/pull/2300): Filter received addresses based on the node's remote address
+* WebSocket
+ * [#2280](https://github.com/libp2p/go-libp2p/pull/2280): Reverted back to the Gorilla library for WebSocket
+* NAT
+ * [#2248](https://github.com/libp2p/go-libp2p/pull/2248): Move NAT mapping logic out of the host
+
+## ๐ Bugfixes
+* Identify
+ * [Reject signed peer records on peer ID mismatch](https://github.com/libp2p/go-libp2p/commit/8d771355b41297623e05b04a865d029a2522a074)
+ * [#2299](https://github.com/libp2p/go-libp2p/pull/2299): Avoid spuriously pushing updates
+* Swarm
+ * [#2322](https://github.com/libp2p/go-libp2p/pull/2322): Dedup addresses to dial
+ * [#2284](https://github.com/libp2p/go-libp2p/pull/2284): Change maps with multiaddress keys to use strings
+* QUIC
+ * [#2262](https://github.com/libp2p/go-libp2p/pull/2262): Prioritize listen connections for reuse
+ * [#2276](https://github.com/libp2p/go-libp2p/pull/2276): Don't panic when quic-go's accept call errors
+ * [#2263](https://github.com/libp2p/go-libp2p/pull/2263): Fix race condition when generating random holepunch packet
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.27.0...v0.28.0
+
+# [v0.27.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.27.0)
+
+### Breaking Changes
+
+* The `LocalPrivateKey` method was removed from the `network.Conn` interface. [#2144](https://github.com/libp2p/go-libp2p/pull/2144)
+
+## ๐ฆ Highlights
+
+### Additional metrics
+Since the last release, we've added metrics for:
+* [Relay Service](https://github.com/libp2p/go-libp2p/pull/2154): RequestStatus, RequestCounts, RejectionReasons for Reservation and Connection Requests,
+ConnectionDuration, BytesTransferred, Relay Service Status.
+* [Autorelay](https://github.com/libp2p/go-libp2p/pull/2185): relay finder status, reservation request outcomes, current reservations, candidate circuit v2 support, current candidates, relay addresses updated, num relay address, and scheduled work times
+
+## ๐ Bugfixes
+
+* autonat: don't change status on dial request refused [2225](https://github.com/libp2p/go-libp2p/pull/2225)
+* relaysvc: fix flaky TestReachabilityChangeEvent [2215](https://github.com/libp2p/go-libp2p/pull/2215)
+* basichost: prevent duplicate dials [2196](https://github.com/libp2p/go-libp2p/pull/2196)
+* websocket: don't set a WSS multiaddr for accepted unencrypted conns [2199](https://github.com/libp2p/go-libp2p/pull/2199)
+* identify: Fix IdentifyWait when Connected events happen out of order [2173](https://github.com/libp2p/go-libp2p/pull/2173)
+* circuitv2: cleanup relay service properly [2164](https://github.com/libp2p/go-libp2p/pull/2164)
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.4...v0.27.0
+
+# [v0.26.4](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.4)
+
+This patch release fixes a busy-looping happening inside AutoRelay on private nodes, see [2208](https://github.com/libp2p/go-libp2p/pull/2208).
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.4
+
+# [v0.26.3](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.3)
+
+* rcmgr: fix JSON marshalling of ResourceManagerStat peer map [2156](https://github.com/libp2p/go-libp2p/pull/2156)
+* websocket: Don't limit message sizes in the websocket reader [2193](https://github.com/libp2p/go-libp2p/pull/2193)
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.3
+
+# [v0.26.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.2)
+
+This patch release fixes two bugs:
+* A panic in WebTransport: https://github.com/quic-go/webtransport-go/releases/tag/v0.5.2
+* Incorrect accounting of accepted connections in the swarm metrics: [#2147](https://github.com/libp2p/go-libp2p/pull/2147)
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.2
+
+# v0.26.1
+
+This version was retracted due to errors when publishing the release.
+
+# [v0.26.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.0)
+
+## ๐ฆ Highlights
+
+### Circuit Relay Changes
+
+#### [Removed Circuit Relay v1](https://github.com/libp2p/go-libp2p/pull/2107)
+
+We've decided to remove support for Circuit Relay v1 in this release. v1 Relays have been retired a few months ago. Notably, running the Relay v1 protocol was expensive and resulted in only a small number of nodes in the network. Users had to either manually configure these nodes as static relays, or discover them from the DHT.
+Furthermore, rust-libp2p [has dropped support](https://github.com/libp2p/rust-libp2p/pull/2549) and js-libp2p [is dropping support](https://github.com/libp2p/js-libp2p/pull/1533) for Relay v1.
+
+Support for Relay v2 was first added in [late 2021 in v0.16.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.16.0). With Circuit Relay v2 it became cheap to run (limited) relays. Public nodes also started the relay service by default. There's now a massive number of Relay v2 nodes on the IPFS network, and they don't advertise their service to the DHT any more. Because there's now so many of these nodes, connecting to just a small number of nodes (e.g. by joining the DHT), a node is statistically guaranteed to connect to some relays.
+
+#### [Unlimited Relay v2](https://github.com/libp2p/go-libp2p/pull/2125)
+
+In conjunction with removing relay v1, we also added an option to Circuit Relay v2 to disable limits.
+This done by enabling `WithInfiniteLimits`. When enabled this allows for users to have a drop in replacement for Relay v1 with Relay v2.
+
+### Additional metrics
+
+Since the last release, we've added additional metrics to different components.
+Metrics were added to:
+* [AutoNat](https://github.com/libp2p/go-libp2p/pull/2086): Current Reachability Status and Confidence, Client and Server DialResponses, Server DialRejections. The dashboard is [available here](https://github.com/libp2p/go-libp2p/blob/master/dashboards/autonat/autonat.json).
+* Swarm:
+ - [Early Muxer Selection](https://github.com/libp2p/go-libp2p/pull/2119): Added early_muxer label indicating whether a connection was established using early muxer selection.
+ - [IP Version](https://github.com/libp2p/go-libp2p/pull/2114): Added ip_version label to connection metrics
+* Identify:
+ - Metrics for Identify, IdentifyPush, PushesTriggered (https://github.com/libp2p/go-libp2p/pull/2069)
+ - Address Count, Protocol Count, Connection IDPush Support (https://github.com/libp2p/go-libp2p/pull/2126)
+
+
+We also migrated the metric dashboards to a top-level [dashboards](https://github.com/libp2p/go-libp2p/tree/master/dashboards) directory.
+
+## ๐ Bugfixes
+
+### AutoNat
+* [Fixed a bug](https://github.com/libp2p/go-libp2p/issues/2091) where AutoNat would emit events when the observed address has changed even though the node reachability hadn't changed.
+
+### Relay Manager
+* [Fixed a bug](https://github.com/libp2p/go-libp2p/pull/2093) where the Relay Manager started a new relay even though the previous reachability was `Public` or if a relay already existed.
+
+### [Stop sending detailed error messages on closing QUIC connections](https://github.com/libp2p/go-libp2p/pull/2112)
+
+Users reported seeing confusing error messages and could not determine the root cause or if the error was from a local or remote peer:
+
+```{12D... Application error 0x0: conn-27571160: system: cannot reserve inbound connection: resource limit exceeded}```
+
+This error occurred when a connection had been made with a remote peer but the remote peer dropped the connection (due to it exceeding limits).
+This was actually an `Application error` emitted by `quic-go` and it was a bug in go-libp2p that we sent the whole message.
+For now, we decided to stop sending this confusing error message. In the future, we will report such errors via [error codes](https://github.com/libp2p/specs/issues/479).
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.1...v0.26.0
+
+# [v0.25.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.1)
+
+Fix some test-utils used by https://github.com/libp2p/go-libp2p-kad-dht
+
+* mocknet: Start host in mocknet by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2078
+* chore: update go-multistream by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2081
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.0...v0.25.1
+
+# [v0.25.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.0)
+
+## ๐ฆ Highlights
+
+### Metrics
+
+We've started instrumenting the entire stack. In this release, we're adding metrics for:
+* the swarm: tracking incoming and outgoing connections, transports, security protocols and stream multiplexers in use: (https://github.com/libp2p/go-libp2p/blob/master/dashboards/swarm/swarm.json)
+* the event bus: tracking how different events are propagated through the stack and to external consumers (https://github.com/libp2p/go-libp2p/blob/master/dashboards/eventbus/eventbus.json)
+
+Our metrics effort is still ongoing, see https://github.com/libp2p/go-libp2p/issues/1356 for progress. We'll add metrics and dashboards for more libp2p components in a future release.
+
+### Switching to Google's official Protobuf compiler
+
+So far, we were using GoGo Protobuf to compile our Protobuf definitions to Go code. However, this library was deprecated in October last year: https://twitter.com/awalterschulze/status/1584553056100057088. We [benchmarked](https://github.com/libp2p/go-libp2p/issues/1976#issuecomment-1371527732) serialization and deserialization, and found that it's (only) 20% slower than GoGo. Since the vast majority of go-libp2p's CPU time is spent in code paths other than Protobuf handling, switching to the official compiler seemed like a worthwhile tradeoff.
+
+### Removal of OpenSSL
+
+Before this release, go-libp2p had an option to use OpenSSL bindings for certain cryptographic primitives, mostly to speed up the generation of signatures and their verification. When building go-libp2p using `go build`, we'd use the standard library crypto packages. OpenSSL was only used when passing in a build tag: `go build -tags openssl`.
+Maintaining our own fork of the long unmaintained [go-openssl package](https://github.com/libp2p/go-openssl) has proven to place a larger than expected maintenance burden on the libp2p stewards, and when we recently discovered a range of new bugs ([this](https://github.com/libp2p/go-openssl/issues/38) and [this](https://github.com/libp2p/go-libp2p/issues/1892) and [this](https://github.com/libp2p/go-libp2p/issues/1951)), we decided to re-evaluate if this code path is really worth it. The results surprised us, it turns out that:
+* The Go standard library is faster than OpenSSL for all key types that are not RSA.
+* Verifying RSA signatures is as fast as Ed25519 signatures using the Go standard library, and even faster in OpenSSL.
+* Generating RSA signatures is painfully slow, both using Go standard library crypto and using OpenSSL (but even slower using Go standard library).
+
+Now the good news is, that if your node is not using an RSA key, it will never create any RSA signatures (it might need to verify them though, when it connects to a node that uses RSA keys). If you're concerned about CPU performance, it's a good idea to avoid RSA keys (the same applies to bandwidth, RSA keys are huge!). Even for nodes using RSA keys, it turns out that generating the signatures is not a significant part of their CPU load, as verified by profiling one of Kubo's bootstrap nodes.
+
+We therefore concluded that it's safe to drop this code path altogether, and thereby reduce our maintenance burden.
+
+### New Resource Manager types
+
+* Introduces a new type `LimitVal` which can explicitly specify "use default", "unlimited", "block all", as well as any positive number. The zero value of `LimitVal` (the value when you create the object in Go) is "Use default".
+ * The JSON marshalling of this is straightforward.
+* Introduces a new `ResourceLimits` type which uses `LimitVal` instead of ints so it can encode the above for the resources.
+* Changes `LimitConfig` to `PartialLimitConfig` and uses `ResourceLimits`. This along with the marshalling changes means you can now marshal the fact that some resource limit is set to block all.
+ * Because the default is to use the defaults, this avoids the footgun of initializing the resource manager with 0 limits (that would block everything).
+
+In general, you can go from a resource config with defaults to a concrete one with `.Build()`. e.g. `ResourceLimits.Build() => BaseLimit`, `PartialLimitConfig.Build() => ConcreteLimitConfig`, `LimitVal.Build() => int`. See PR #2000 for more details.
+
+If you're using the defaults for the resource manager, there should be no changes needed.
+
+### Other Breaking Changes
+
+We've cleaned up our API to consistently use `protocol.ID` for libp2p and application protocols. Specifically, this means that the peer store now uses `protocol.ID`s, and the host's `SetStreamHandler` as well.
+
+## What's Changed
+* chore: use generic LRU cache by @muXxer in https://github.com/libp2p/go-libp2p/pull/1980
+* core/crypto: drop all OpenSSL code paths by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1953
+* add WebTransport to the list of default transports by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1915
+* identify: remove old code targeting Go 1.17 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1964
+* core: remove introspection package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1978
+* identify: remove support for Identify Delta by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1975
+* roadmap: remove optimizations of the TCP-based handshake by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1959
+* circuitv2: correctly set the transport in the ConnectionState by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1972
+* switch to Google's Protobuf library, make protobufs compile with go generate by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1979
+* ci: run go generate as part of the go-check workflow by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1986
+* ci: use GitHub token to install protoc by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1996
+* feat: add some users to the readme by @p-shahi in https://github.com/libp2p/go-libp2p/pull/1981
+* CI: Fast multidimensional Interop tests by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1991
+* Fix: Ignore zero values when marshalling Limits. by @ajnavarro in https://github.com/libp2p/go-libp2p/pull/1998
+* feat: add ci flakiness score to readme by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2002
+* peerstore: make it possible to use an empty peer ID by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2006
+* feat: rcmgr: Export resource manager errors by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2008
+* feat: ci test-plans: Parse test timeout parameter for interop test by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2014
+* Clean addresses with peer id before adding to addrbook by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2007
+* Expose muxer ids by @aschmahmann in https://github.com/libp2p/go-libp2p/pull/2012
+* swarm: add a basic metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1973
+* consistently use protocol.ID instead of strings by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2004
+* swarm metrics: fix datasource for dashboard by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2024
+* chore: remove textual roadmap in favor for Starmap by @p-shahi in https://github.com/libp2p/go-libp2p/pull/2036
+* rcmgr: *: Always close connscope by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2037
+* chore: remove license files from the eventbus package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2042
+* Migrate to test-plan composite action by @thomaseizinger in https://github.com/libp2p/go-libp2p/pull/2039
+* use quic-go and webtransport-go from quic-go organization by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2040
+* holepunch: fix flaky test by not removing holepunch protocol handler by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1948
+* quic / webtransport: extend test to test dialing a draft-29 and a v1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1957
+* p2p/test: add test for EvtLocalAddressesUpdated event by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2016
+* quic, tcp: only register Prometheus counters when metrics are enabled by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1971
+* p2p/test: fix flaky notification test by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2051
+* quic: disable sending of Version Negotiation packets by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2015
+* eventbus: add metrics by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2038
+* metrics: use a single slice pool for all metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2054
+* webtransport: tidy up some test output by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2053
+* set names for eventbus event subscriptions by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2057
+* autorelay: Split libp2p.EnableAutoRelay into 2 functions by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2022
+* rcmgr: Use prometheus SDK for rcmgr metrics by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2044
+* websocket: Replace gorilla websocket transport with nhooyr websocket transport by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1982
+* rcmgr: add libp2p prefix to all metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2063
+* chore: git-ignore various flavors of qlog files by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2064
+* interop: Update interop test to match spec by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2049
+* chore: update webtransport-go to v0.5.1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2072
+* identify: refactor sending of Identify pushes by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1984
+* feat!: rcmgr: Change LimitConfig to use LimitVal type by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2000
+* p2p/test/quic: use contexts with a timeout for Connect calls by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2070
+* identify: add some basic metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2069
+* chore: Release v0.25.0 by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2077
+
+## New Contributors
+* @muXxer made their first contribution in https://github.com/libp2p/go-libp2p/pull/1980
+* @ajnavarro made their first contribution in https://github.com/libp2p/go-libp2p/pull/1998
+* @sukunrt made their first contribution in https://github.com/libp2p/go-libp2p/pull/2007
+* @thomaseizinger made their first contribution in https://github.com/libp2p/go-libp2p/pull/2039
+
+**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.24.2...v0.25.0
diff --git a/FUNDING.json b/FUNDING.json
new file mode 100644
index 0000000000..5952e90cb0
--- /dev/null
+++ b/FUNDING.json
@@ -0,0 +1,10 @@
+{
+ "opRetro": {
+ "projectId": "0xc71faa1bcb4ceb785816c3f22823377e9e5e7c48649badd9f0a0ce491f20d4b3"
+ },
+ "drips": {
+ "filecoin": {
+ "ownedBy": "0x53DCAf729e11022D5b8949946f6601211C662B38"
+ }
+ }
+ }
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 075e51f7f5..0000000000
--- a/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-gx:
- go get github.com/whyrusleeping/gx
- go get github.com/whyrusleeping/gx-go
-
-deps-protocol-muxing: deps
- go get -u github.com/multiformats/go-multicodec
- go get -u github.com/libp2p/go-msgio
-
-deps: gx
- gx --verbose install --global
- gx-go rewrite
-
-publish:
- gx-go rewrite --undo
diff --git a/README.md b/README.md
index b44b25dec3..8eeb68b6a1 100644
--- a/README.md
+++ b/README.md
@@ -1,197 +1,150 @@
-
+
The Go implementation of the libp2p Networking Stack.
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-# Project status
-
-[](https://waffle.io/libp2p/go-libp2p/metrics/throughput)
-
-# Table of Contents
-
+# Table of Contents
- [Background](#background)
-- [Bundles](#bundles)
- [Usage](#usage)
- - [Install](#install)
- - [API](#api)
- [Examples](#examples)
-- [Development](#development)
- - [Tests](#tests)
- - [Packages](#packages)
+ - [Dashboards](#dashboards)
- [Contribute](#contribute)
-- [License](#license)
+ - [Supported Go Versions](#supported-go-versions)
+- [Notable Users](#notable-users)
-## Background
+# Background
[libp2p](https://github.com/libp2p/specs) is a networking stack and library modularized out of [The IPFS Project](https://github.com/ipfs/ipfs), and bundled separately for other tools to use.
>
-libp2p is the product of a long, and arduous quest of understanding -- a deep dive into the internet's network stack, and plentiful peer-to-peer protocols from the past. Building large scale peer-to-peer systems has been complex and difficult in the last 15 years, and libp2p is a way to fix that. It is a "network stack" -- a protocol suite -- that cleanly separates concerns, and enables sophisticated applications to only use the protocols they absolutely need, without giving up interoperability and upgradeability. libp2p grew out of IPFS, but it is built so that lots of people can use it, for lots of different projects.
->
-> We will be writing a set of docs, posts, tutorials, and talks to explain what p2p is, why it is tremendously useful, and how it can help your existing and new projects. But in the meantime, check out
->
-> - [**The libp2p Specification**](https://github.com/libp2p/specs)
-> - [**go-libp2p implementation**](https://github.com/libp2p/go-libp2p)
-> - [**js-libp2p implementation**](https://github.com/libp2p/js-libp2p)
-
+libp2p is the product of a long, and arduous quest of understanding -- a deep dive into the internet's network stack, and plentiful peer-to-peer protocols from the past. Building large-scale peer-to-peer systems has been complex and difficult in the last 15 years, and libp2p is a way to fix that. It is a "network stack" -- a protocol suite -- that cleanly separates concerns, and enables sophisticated applications to only use the protocols they absolutely need, without giving up interoperability and upgradeability. libp2p grew out of IPFS, but it is built so that lots of people can use it, for lots of different projects.
-## Bundles
+To learn more, check out the following resources:
+- [**Our documentation**](https://docs.libp2p.io)
+- [**Our community discussion forum**](https://discuss.libp2p.io)
+- [**The libp2p Specification**](https://github.com/libp2p/specs)
+- [**js-libp2p implementation**](https://github.com/libp2p/js-libp2p)
+- [**rust-libp2p implementation**](https://github.com/libp2p/rust-libp2p)
-There is currently only one bundle of `go-libp2p`, this package. This bundle is used by [`go-ipfs`](https://github.com/ipfs/go-ipfs).
+# Usage
-## Usage
+This repository (`go-libp2p`) serves as the entrypoint to the universe of packages that compose the Go implementation of the libp2p stack.
-`go-libp2p` repo is a place holder for the list of Go modules that compose Go libp2p, as well as its entry point.
+You can start using go-libp2p in your Go application simply by adding imports from our repos, e.g.:
-### Install
-
-```bash
-> go get -d github.com/libp2p/go-libp2p/...
-> cd $GOPATH/src/github.com/libp2p/go-libp2p
-> make
-> make deps
+```go
+import "github.com/libp2p/go-libp2p"
```
-### API
-
-[](https://godoc.org/github.com/libp2p/go-libp2p)
+## Examples
-### Examples
+Examples can be found in the [examples folder](examples).
-Examples can be found on the [examples folder](examples).
+## Dashboards
-## Development
+We provide prebuilt Grafana dashboards so that applications can better monitor libp2p in production.
+You can find the [dashboard JSON files here](https://github.com/libp2p/go-libp2p/tree/master/dashboards).
-### Dependencies
-
-While developing, you need to use [gx to install and link your dependencies](https://github.com/whyrusleeping/gx#dependencies), to do that, run:
-
-```sh
-> make deps
-```
+We also have live [Public Dashboards](https://github.com/libp2p/go-libp2p/tree/master/dashboards/README.md#public-dashboards) that you can check out to see real time monitoring in action.
-Before commiting and pushing to Github, make sure to rewind the gx'ify of dependencies. You can do that with:
-
-```sh
-> make publish
-```
-
-### Tests
-
-Running of individual tests is done through `gx test `
-
-```bash
-$ cd $GOPATH/src/github.com/libp2p/go-libp2p
-$ make deps
-$ gx test ./p2p/
-```
-
-### Packages
-
-> **WIP**
-
-List of packages currently in existence for libp2p:
-
-| Package | Version | CI |
-|--------------------|---------|---------------------|
-| **Transports** |
-| **Connection Upgrades** |
-| **Stream Muxers** |
-| **Discovery** |
-| **Crypto Channels** |
-| **Peer Routing** |
-| **Content Routing** |
-| **Miscellaneous** |
-| **Data Types** |
# Contribute
-go-libp2p is part of [The IPFS Project](https://github.com/ipfs/ipfs), and is MIT licensed open source software. We welcome contributions big and small! Take a look at the [community contributing notes](https://github.com/ipfs/community/blob/master/contributing.md). Please make sure to check the [issues](https://github.com/ipfs/go-libp2p/issues). Search the closed ones before reporting things, and help us with the open ones.
+go-libp2p is MIT-licensed open source software. We welcome contributions big and small! Take a look at the [community contributing notes](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md). Please make sure to check the [issues](https://github.com/libp2p/go-libp2p/issues). Search the closed ones before reporting things, and help us with the open ones.
Guidelines:
- read the [libp2p spec](https://github.com/libp2p/specs)
-- please make branches + pull-request, even if working on the main repository
-- ask questions or talk about things in [Issues](https://github.com/libp2p/go-libp2p/issues) or #ipfs on freenode.
-- ensure you are able to contribute (no legal issues please-- we use the DCO)
-- run `go fmt` before pushing any code
-- run `golint` and `go vet` too -- some things (like protobuf files) are expected to fail.
-- get in touch with @jbenet and @diasdavid about how best to contribute
+- ask questions or talk about things in our [discussion forums](https://discuss.libp2p.io), or open an [issue](https://github.com/libp2p/go-libp2p/issues) for bug reports, or #libp2p-implementers on [Filecoin slack](https://filecoin.io/slack).
+- ensure you are able to contribute (no legal issues please -- we use the DCO)
+- get in touch with @libp2p/go-libp2p-maintainers about how best to contribute
+- No drive-by contributions seeking to collect airdrops.
+ - Many projects aim to reward contributors to common goods. Great. However,
+ this creates an unfortunate incentive for low-effort PRs, submitted solely to
+ claim rewards. These PRs consume maintainersโ time and energy to triage, with
+ little to no impact on end users. If we suspect this is the intent of a PR,
+ we may close it without comment. If you believe this was done in error,
+ contact us via email. Reference this README section and explain why your PR
+ is not a โdrive-by contribution.โ
- have fun!
There's a few things you can do right now to help out:
- - Go through the modules below and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrasture behind it - for instance, you may need to read up on p2p and more complex operations like muxing to be able to help technically.
- **Perform code reviews**.
- **Add tests**. There can never be enough tests.
+ - Go through the modules below and **check out existing issues**. This would
+ be especially useful for modules in active development. Some knowledge of
+ IPFS/libp2p may be required, as well as the infrastructure behind it - for
+ instance, you may need to read up on p2p and more complex operations like
+ muxing to be able to help technically.
-## Modularizing go-libp2p
-
-We have currently a work in progress of modularizing go-libp2p from a repo monolith to several packages in different repos that can be reused for other projects of swapped for custom libp2p builds.
-
-We want to maintain history, so we'll use git-subtree for extracting packages. Find instructions below:
-
-```sh
-# 1) create the extracted tree (has the directory specified as -P as its root)
-> cd go-libp2p/
-> git subtree split -P p2p/crypto/secio/ -b libp2p-secio
-62b0a5c21574bcbe06c422785cd5feff378ae5bd
-# important to delete the tree now, so that outdated imports fail in step 5
-> git rm -r p2p/crypto/secio/
-> git commit
-> cd ../
-
-# 2) make the new repo
-> mkdir go-libp2p-secio
-> cd go-libp2p-secio/
-> git init && git commit --allow-empty
-
-# 3) fetch the extracted tree from the previous repo
-> git remote add libp2p ../go-libp2p
-> git fetch libp2p
-> git reset --hard libp2p/libp2p-secio
-
-# 4) update self import paths
-> sed -someflagsidontknow 'go-libp2p/p2p/crypto/secio' 'golibp2p-secio'
-> git commit
-
-# 5) create package.json and check all imports are correct
-> vim package.json
-> gx --verbose install --global
-> gx-go rewrite
-> go test ./...
-> gx-go rewrite --undo
-> git commit
-
-# 4) make the package ready
-> vim README.md LICENSE
-> git commit
-
-# 5) bump the version separately
-> vim package.json
-> gx publish
-> git add package.json .gx/
-> git commit -m 'Publish 1.2.3'
-
-# 6) clean up and push
-> git remote rm libp2p
-> git push origin master
-```
+## AI Assistance Notice
+
+> [!IMPORTANT]
+>
+> If you are using **any kind of AI assistance** to contribute to libp2p,
+> it must be disclosed in the pull request.
+
+If you are using any kind of AI assistance while contributing to libp2p,
+**this must be disclosed in the pull request**, along with the extent to
+which AI assistance was used (e.g. docs only vs. code generation).
+If PR responses are being generated by an AI, disclose that as well.
+As a small exception, trivial tab-completion doesn't need to be disclosed,
+so long as it is limited to single keywords or short phrases.
+
+An example disclosure:
+
+> This PR was written primarily by Claude Code.
+
+Or a more detailed disclosure:
+
+> I consulted ChatGPT to understand the codebase but the solution
+> was fully authored manually by myself.
+
+Failure to disclose this is first and foremost rude to the human operators
+on the other end of the pull request, but it also makes it difficult to
+determine how much scrutiny to apply to the contribution.
+
+In a perfect world, AI assistance would produce equal or higher quality
+work than any human. That isn't the world we live in today, and in most cases
+it's generating slop. I say this despite being a fan of and using them
+successfully myself (with heavy supervision)!
+
+Please be respectful to maintainers and disclose AI assistance.
+
+# Supported Go Versions
+
+We test against and support the two most recent major releases of Go. This is
+informed by Go's own [security policy](https://go.dev/doc/security/policy).
+
+# Notable Users
+Some notable users of go-libp2p are:
+- [Kubo](https://github.com/ipfs/kubo) - The original Go implementation of IPFS
+- [Lotus](https://github.com/filecoin-project/lotus) - An implementation of the Filecoin protocol
+- [Drand](https://github.com/drand/drand) - A distributed random beacon daemon
+- [Prysm](https://github.com/prysmaticlabs/prysm) - An Ethereum Beacon Chain consensus client built by [Prysmatic Labs](https://prysmaticlabs.com/)
+- [Berty](https://github.com/berty/berty) - An open, secure, offline-first, peer-to-peer and zero trust messaging app.
+- [Wasp](https://github.com/iotaledger/wasp) - A node that runs IOTA Smart Contracts built by the [IOTA Foundation](https://www.iota.org/)
+- [Mina](https://github.com/minaprotocol/mina) - A lightweight, constant-sized blockchain that runs zero-knowledge smart contracts
+- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) - A modular, extensible framework for building Ethereum compatible networks
+- [Celestia Node](https://github.com/celestiaorg/celestia-node) - The Go implementation of Celestia's data availability nodes
+- [Status go](https://github.com/status-im/status-go) - Status bindings for go-ethereum, built by [Status.im](https://status.im/)
+- [Flow](https://github.com/onflow/flow-go) - A blockchain built to support games, apps, and digital assets built by [Dapper Labs](https://www.dapperlabs.com/)
+- [Swarm Bee](https://github.com/ethersphere/bee) - A client for connecting to the [Swarm network](https://www.ethswarm.org/)
+- [MultiversX Node](https://github.com/multiversx/mx-chain-go) - The Go implementation of the MultiversX network protocol
+- [Sonr](https://github.com/sonr-io/sonr) - A platform to integrate DID Documents, WebAuthn, and IPFS and manage digital identity and assets.
+- [EdgeVPN](https://github.com/mudler/edgevpn) - A decentralized, immutable, portable VPN and reverse proxy over p2p.
+- [Kairos](https://github.com/kairos-io/kairos) - A Kubernetes-focused, Cloud Native Linux meta-distribution.
+- [Oasis Core](https://github.com/oasisprotocol/oasis-core) - The consensus and runtime layers of the [Oasis protocol](https://oasisprotocol.org/).
+- [Spacemesh](https://github.com/spacemeshos/go-spacemesh/) - The Go implementation of the [Spacemesh protocol](https://spacemesh.io/), a novel layer one blockchain
+- [Tau](https://github.com/taubyte/tau/) - Open source distributed Platform as a Service (PaaS)
+
+Please open a pull request if you want your project (min. 250 GitHub stars) to be added here.
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000000..0ecad4301b
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,20 @@
+# Security Policy
+
+go-libp2p is still in development. This means that there may be problems in our protocols,
+or there may be mistakes in our implementations.
+We take security vulnerabilities very seriously. If you discover a security issue,
+please bring it to our attention right away!
+
+## Reporting a Vulnerability
+
+If you find a vulnerability that may affect live deployments -- for example, by exposing
+a remote execution exploit -- please [**report privately**](https://github.com/libp2p/go-libp2p/security/advisories/new).
+Please **DO NOT file a public issue**.
+
+If the issue is an implementation weakness that cannot be immediately exploited or
+something not yet deployed, just discuss it openly.
+If you need assistance, please reach out to [security@libp2p.io](mailto:security@libp2p.io).
+
+## Reporting a non security bug
+
+For non-security bugs, please simply file a GitHub [issue](https://github.com/libp2p/go-libp2p/issues/new).
diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile
deleted file mode 100644
index b2067e6232..0000000000
--- a/ci/Jenkinsfile
+++ /dev/null
@@ -1 +0,0 @@
-golang()
diff --git a/codecov.yml b/codecov.yml
deleted file mode 100644
index 5f88a9ea27..0000000000
--- a/codecov.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-coverage:
- range: "50...100"
-comment: off
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 0000000000..2b3b6ee772
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,780 @@
+package config
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "net"
+ "slices"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/metrics"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/transport"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat"
+ "github.com/libp2p/go-libp2p/p2p/host/autorelay"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ blankhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/observedaddrs"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ routed "github.com/libp2p/go-libp2p/p2p/host/routed"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2"
+ circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ "github.com/prometheus/client_golang/prometheus"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+ "go.uber.org/fx"
+ "go.uber.org/fx/fxevent"
+)
+
+var log = logging.Logger("p2p-config")
+
+// AddrsFactory is a function that takes a set of multiaddrs we're listening on and
+// returns the set of multiaddrs we should advertise to the network.
+type AddrsFactory = bhost.AddrsFactory
+
+// NATManagerC is a NATManager constructor.
+type NATManagerC func(network.Network) bhost.NATManager
+
+type RoutingC func(host.Host) (routing.PeerRouting, error)
+
+// AutoNATConfig defines the AutoNAT behavior for the libp2p host.
+type AutoNATConfig struct {
+ ForceReachability *network.Reachability
+ EnableService bool
+ ThrottleGlobalLimit int
+ ThrottlePeerLimit int
+ ThrottleInterval time.Duration
+}
+
+type Security struct {
+ ID protocol.ID
+ Constructor interface{}
+}
+
+// Config describes a set of settings for a libp2p node
+//
+// This is *not* a stable interface. Use the options defined in the root
+// package.
+type Config struct {
+ // UserAgent is the identifier this node will send to other peers when
+ // identifying itself, e.g. via the identify protocol.
+ //
+ // Set it via the UserAgent option function.
+ UserAgent string
+
+ // ProtocolVersion is the protocol version that identifies the family
+ // of protocols used by the peer in the Identify protocol. It is set
+ // using the [ProtocolVersion] option.
+ ProtocolVersion string
+
+ PeerKey crypto.PrivKey
+
+ QUICReuse []fx.Option
+ Transports []fx.Option
+ Muxers []tptu.StreamMuxer
+ SecurityTransports []Security
+ Insecure bool
+ PSK pnet.PSK
+
+ DialTimeout time.Duration
+
+ RelayCustom bool
+ Relay bool // should the relay transport be used
+
+ EnableRelayService bool // should we run a circuitv2 relay (if publicly reachable)
+ RelayServiceOpts []relayv2.Option
+
+ ListenAddrs []ma.Multiaddr
+ AddrsFactory bhost.AddrsFactory
+ ConnectionGater connmgr.ConnectionGater
+
+ ConnManager connmgr.ConnManager
+ ResourceManager network.ResourceManager
+
+ NATManager NATManagerC
+ Peerstore peerstore.Peerstore
+ Reporter metrics.Reporter
+
+ MultiaddrResolver network.MultiaddrDNSResolver
+
+ DisablePing bool
+
+ Routing RoutingC
+
+ EnableAutoRelay bool
+ AutoRelayOpts []autorelay.Option
+ AutoNATConfig
+
+ EnableHolePunching bool
+ HolePunchingOptions []holepunch.Option
+
+ DisableMetrics bool
+ PrometheusRegisterer prometheus.Registerer
+
+ DialRanker network.DialRanker
+
+ SwarmOpts []swarm.Option
+
+ DisableIdentifyAddressDiscovery bool
+
+ EnableAutoNATv2 bool
+
+ UDPBlackHoleSuccessCounter *swarm.BlackHoleSuccessCounter
+ CustomUDPBlackHoleSuccessCounter bool
+ IPv6BlackHoleSuccessCounter *swarm.BlackHoleSuccessCounter
+ CustomIPv6BlackHoleSuccessCounter bool
+
+ UserFxOptions []fx.Option
+
+ ShareTCPListener bool
+}
+
+func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swarm, error) {
+ if cfg.Peerstore == nil {
+ return nil, fmt.Errorf("no peerstore specified")
+ }
+
+ // Check this early. Prevents us from even *starting* without verifying this.
+ if pnet.ForcePrivateNetwork && len(cfg.PSK) == 0 {
+ log.Error("tried to create a libp2p node with no Private Network Protector but usage of Private Networks is forced by the environment")
+ // Note: This is *also* checked the upgrader itself, so it'll be
+ // enforced even *if* you don't use the libp2p constructor.
+ return nil, pnet.ErrNotInPrivateNetwork
+ }
+
+ if cfg.PeerKey == nil {
+ return nil, fmt.Errorf("no peer key specified")
+ }
+
+ // Obtain Peer ID from public key
+ pid, err := peer.IDFromPublicKey(cfg.PeerKey.GetPublic())
+ if err != nil {
+ return nil, err
+ }
+
+ if err := cfg.Peerstore.AddPrivKey(pid, cfg.PeerKey); err != nil {
+ return nil, err
+ }
+ if err := cfg.Peerstore.AddPubKey(pid, cfg.PeerKey.GetPublic()); err != nil {
+ return nil, err
+ }
+
+ opts := append(cfg.SwarmOpts,
+ swarm.WithUDPBlackHoleSuccessCounter(cfg.UDPBlackHoleSuccessCounter),
+ swarm.WithIPv6BlackHoleSuccessCounter(cfg.IPv6BlackHoleSuccessCounter),
+ )
+ if cfg.Reporter != nil {
+ opts = append(opts, swarm.WithMetrics(cfg.Reporter))
+ }
+ if cfg.ConnectionGater != nil {
+ opts = append(opts, swarm.WithConnectionGater(cfg.ConnectionGater))
+ }
+ if cfg.DialTimeout != 0 {
+ opts = append(opts, swarm.WithDialTimeout(cfg.DialTimeout))
+ }
+ if cfg.ResourceManager != nil {
+ opts = append(opts, swarm.WithResourceManager(cfg.ResourceManager))
+ }
+ if cfg.MultiaddrResolver != nil {
+ opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver))
+ }
+ if cfg.DialRanker != nil {
+ opts = append(opts, swarm.WithDialRanker(cfg.DialRanker))
+ }
+
+ if enableMetrics {
+ opts = append(opts,
+ swarm.WithMetricsTracer(swarm.NewMetricsTracer(swarm.WithRegisterer(cfg.PrometheusRegisterer))))
+ }
+ // TODO: Make the swarm implementation configurable.
+ return swarm.NewSwarm(pid, cfg.Peerstore, eventBus, opts...)
+}
+
+func (cfg *Config) makeAutoNATV2Host() (host.Host, error) {
+ autonatPrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return nil, err
+ }
+
+ autoNatCfg := Config{
+ Transports: cfg.Transports,
+ Muxers: cfg.Muxers,
+ SecurityTransports: cfg.SecurityTransports,
+ Insecure: cfg.Insecure,
+ PSK: cfg.PSK,
+ ConnectionGater: cfg.ConnectionGater,
+ Reporter: cfg.Reporter,
+ PeerKey: autonatPrivKey,
+ Peerstore: ps,
+ DialRanker: swarm.NoDelayDialRanker,
+ UDPBlackHoleSuccessCounter: cfg.UDPBlackHoleSuccessCounter,
+ IPv6BlackHoleSuccessCounter: cfg.IPv6BlackHoleSuccessCounter,
+ ResourceManager: cfg.ResourceManager,
+ SwarmOpts: []swarm.Option{
+ // Don't update black hole state for failed autonat dials
+ swarm.WithReadOnlyBlackHoleDetector(),
+ },
+ }
+ fxopts, err := autoNatCfg.addTransports()
+ if err != nil {
+ return nil, err
+ }
+ var dialerHost host.Host
+ fxopts = append(fxopts,
+ fx.Provide(eventbus.NewBus),
+ fx.Provide(func(lifecycle fx.Lifecycle, b event.Bus) (*swarm.Swarm, error) {
+ lifecycle.Append(fx.Hook{
+ OnStop: func(context.Context) error {
+ return ps.Close()
+ }})
+ sw, err := autoNatCfg.makeSwarm(b, false)
+ return sw, err
+ }),
+ fx.Provide(func(sw *swarm.Swarm) *blankhost.BlankHost {
+ return blankhost.NewBlankHost(sw)
+ }),
+ fx.Provide(func(bh *blankhost.BlankHost) host.Host {
+ return bh
+ }),
+ fx.Provide(func() crypto.PrivKey { return autonatPrivKey }),
+ fx.Provide(func(bh host.Host) peer.ID { return bh.ID() }),
+ fx.Invoke(func(bh *blankhost.BlankHost) {
+ dialerHost = bh
+ }),
+ )
+ app := fx.New(fxopts...)
+ if err := app.Err(); err != nil {
+ return nil, err
+ }
+ err = app.Start(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ go func() {
+ <-dialerHost.Network().(*swarm.Swarm).Done()
+ app.Stop(context.Background())
+ }()
+ return dialerHost, nil
+}
+
+func (cfg *Config) addTransports() ([]fx.Option, error) {
+ fxopts := []fx.Option{
+ fx.WithLogger(func() fxevent.Logger {
+ return &fxevent.SlogLogger{
+ Logger: log.With("system", "fx"),
+ }
+ }),
+ fx.Provide(fx.Annotate(tptu.New, fx.ParamTags(`name:"security"`))),
+ fx.Supply(cfg.Muxers),
+ fx.Provide(func() connmgr.ConnectionGater { return cfg.ConnectionGater }),
+ fx.Provide(func() pnet.PSK { return cfg.PSK }),
+ fx.Provide(func() network.ResourceManager { return cfg.ResourceManager }),
+ fx.Provide(func(upgrader transport.Upgrader) *tcpreuse.ConnMgr {
+ if !cfg.ShareTCPListener {
+ return nil
+ }
+ return tcpreuse.NewConnMgr(tcpreuse.EnvReuseportVal, upgrader)
+ }),
+ fx.Provide(func(cm *quicreuse.ConnManager, sw *swarm.Swarm) libp2pwebrtc.ListenUDPFn {
+ hasQuicAddrPortFor := func(network string, laddr *net.UDPAddr) bool {
+ quicAddrPorts := map[string]struct{}{}
+ for _, addr := range sw.ListenAddresses() {
+ if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ netw, addr, err := manet.DialArgs(addr)
+ if err != nil {
+ return false
+ }
+ quicAddrPorts[netw+"_"+addr] = struct{}{}
+ }
+ }
+ _, ok := quicAddrPorts[network+"_"+laddr.String()]
+ return ok
+ }
+
+ return func(network string, laddr *net.UDPAddr) (net.PacketConn, error) {
+ if hasQuicAddrPortFor(network, laddr) {
+ return cm.SharedNonQUICPacketConn(network, laddr)
+ }
+ return net.ListenUDP(network, laddr)
+ }
+ }),
+ }
+ fxopts = append(fxopts, cfg.Transports...)
+ if cfg.Insecure {
+ fxopts = append(fxopts,
+ fx.Provide(
+ fx.Annotate(
+ func(id peer.ID, priv crypto.PrivKey) []sec.SecureTransport {
+ return []sec.SecureTransport{insecure.NewWithIdentity(insecure.ID, id, priv)}
+ },
+ fx.ResultTags(`name:"security"`),
+ ),
+ ),
+ )
+ } else {
+ // fx groups are unordered, but we need to preserve the order of the security transports
+ // First of all, we construct the security transports that are needed,
+ // and save them to a group call security_unordered.
+ for _, s := range cfg.SecurityTransports {
+ fxName := fmt.Sprintf(`name:"security_%s"`, s.ID)
+ fxopts = append(fxopts, fx.Supply(fx.Annotate(s.ID, fx.ResultTags(fxName))))
+ fxopts = append(fxopts,
+ fx.Provide(fx.Annotate(
+ s.Constructor,
+ fx.ParamTags(fxName),
+ fx.As(new(sec.SecureTransport)),
+ fx.ResultTags(`group:"security_unordered"`),
+ )),
+ )
+ }
+ // Then we consume the group security_unordered, and order them by the user's preference.
+ fxopts = append(fxopts, fx.Provide(
+ fx.Annotate(
+ func(secs []sec.SecureTransport) ([]sec.SecureTransport, error) {
+ if len(secs) != len(cfg.SecurityTransports) {
+ return nil, errors.New("inconsistent length for security transports")
+ }
+ t := make([]sec.SecureTransport, 0, len(secs))
+ for _, s := range cfg.SecurityTransports {
+ for _, st := range secs {
+ if s.ID != st.ID() {
+ continue
+ }
+ t = append(t, st)
+ }
+ }
+ return t, nil
+ },
+ fx.ParamTags(`group:"security_unordered"`),
+ fx.ResultTags(`name:"security"`),
+ )))
+ }
+
+ fxopts = append(fxopts, fx.Provide(PrivKeyToStatelessResetKey))
+ fxopts = append(fxopts, fx.Provide(PrivKeyToTokenGeneratorKey))
+ if cfg.QUICReuse != nil {
+ fxopts = append(fxopts, cfg.QUICReuse...)
+ } else {
+ fxopts = append(fxopts,
+ fx.Provide(func(key quic.StatelessResetKey, tokenGenerator quic.TokenGeneratorKey, rcmgr network.ResourceManager, lifecycle fx.Lifecycle) (*quicreuse.ConnManager, error) {
+ opts := []quicreuse.Option{
+ quicreuse.ConnContext(func(ctx context.Context, clientInfo *quic.ClientInfo) (context.Context, error) {
+ // even if creating the quic maddr fails, let the rcmgr decide what to do with the connection
+ addr, err := quicreuse.ToQuicMultiaddr(clientInfo.RemoteAddr, quic.Version1)
+ if err != nil {
+ addr = nil
+ }
+ scope, err := rcmgr.OpenConnection(network.DirInbound, false, addr)
+ if err != nil {
+ return ctx, err
+ }
+ ctx = network.WithConnManagementScope(ctx, scope)
+ context.AfterFunc(ctx, func() {
+ scope.Done()
+ })
+ return ctx, nil
+ }),
+ quicreuse.VerifySourceAddress(func(addr net.Addr) bool {
+ return rcmgr.VerifySourceAddress(addr)
+ }),
+ }
+ if !cfg.DisableMetrics {
+ opts = append(opts, quicreuse.EnableMetrics(cfg.PrometheusRegisterer))
+ }
+ cm, err := quicreuse.NewConnManager(key, tokenGenerator, opts...)
+ if err != nil {
+ return nil, err
+ }
+ lifecycle.Append(fx.StopHook(cm.Close))
+ return cm, nil
+ }),
+ )
+ }
+
+ fxopts = append(fxopts, fx.Invoke(
+ fx.Annotate(
+ func(swrm *swarm.Swarm, tpts []transport.Transport) error {
+ for _, t := range tpts {
+ if err := swrm.AddTransport(t); err != nil {
+ return err
+ }
+ }
+ return nil
+ },
+ fx.ParamTags("", `group:"transport"`),
+ )),
+ )
+ if cfg.Relay {
+ fxopts = append(fxopts, fx.Invoke(circuitv2.AddTransport))
+ }
+ return fxopts, nil
+}
+
+func (cfg *Config) newBasicHost(swrm *swarm.Swarm, eventBus event.Bus, an *autonatv2.AutoNAT, o bhost.ObservedAddrsManager) (*bhost.BasicHost, error) {
+ h, err := bhost.NewHost(swrm, &bhost.HostOpts{
+ EventBus: eventBus,
+ ConnManager: cfg.ConnManager,
+ AddrsFactory: cfg.AddrsFactory,
+ NATManager: cfg.NATManager,
+ EnablePing: !cfg.DisablePing,
+ UserAgent: cfg.UserAgent,
+ ProtocolVersion: cfg.ProtocolVersion,
+ EnableHolePunching: cfg.EnableHolePunching,
+ HolePunchingOptions: cfg.HolePunchingOptions,
+ EnableRelayService: cfg.EnableRelayService,
+ RelayServiceOpts: cfg.RelayServiceOpts,
+ EnableMetrics: !cfg.DisableMetrics,
+ PrometheusRegisterer: cfg.PrometheusRegisterer,
+ AutoNATv2: an,
+ ObservedAddrsManager: o,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+func (cfg *Config) validate() error {
+ if cfg.EnableAutoRelay && !cfg.Relay {
+ return fmt.Errorf("cannot enable autorelay; relay is not enabled")
+ }
+ // If possible check that the resource manager conn limit is higher than the
+ // limit set in the conn manager.
+ if l, ok := cfg.ResourceManager.(connmgr.GetConnLimiter); ok {
+ err := cfg.ConnManager.CheckLimit(l)
+ if err != nil {
+ log.Warn("rcmgr limit conflicts with connmgr limit", "err", err)
+ }
+ }
+
+ if len(cfg.PSK) > 0 && cfg.ShareTCPListener {
+ return errors.New("cannot use shared TCP listener with PSK")
+ }
+
+ return nil
+}
+
+// NewNode constructs a new libp2p Host from the Config.
+//
+// This function consumes the config. Do not reuse it (really!).
+func (cfg *Config) NewNode() (host.Host, error) {
+
+ validateErr := cfg.validate()
+ if validateErr != nil {
+ if cfg.ResourceManager != nil {
+ cfg.ResourceManager.Close()
+ }
+ if cfg.ConnManager != nil {
+ cfg.ConnManager.Close()
+ }
+ if cfg.Peerstore != nil {
+ cfg.Peerstore.Close()
+ }
+
+ return nil, validateErr
+ }
+
+ if !cfg.DisableMetrics {
+ rcmgr.MustRegisterWith(cfg.PrometheusRegisterer)
+ }
+
+ fxopts := []fx.Option{
+ fx.Provide(func() event.Bus {
+ return eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer(eventbus.WithRegisterer(cfg.PrometheusRegisterer))))
+ }),
+ fx.Provide(func() crypto.PrivKey {
+ return cfg.PeerKey
+ }),
+ // Make sure the swarm constructor depends on the quicreuse.ConnManager.
+ // That way, the ConnManager will be started before the swarm, and more importantly,
+ // the swarm will be stopped before the ConnManager.
+ fx.Provide(func(eventBus event.Bus, _ *quicreuse.ConnManager, lifecycle fx.Lifecycle) (*swarm.Swarm, error) {
+ sw, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics)
+ if err != nil {
+ return nil, err
+ }
+ lifecycle.Append(fx.Hook{
+ OnStart: func(context.Context) error {
+ // TODO: This method succeeds if listening on one address succeeds. We
+ // should probably fail if listening on *any* addr fails.
+ return sw.Listen(cfg.ListenAddrs...)
+ },
+ OnStop: func(context.Context) error {
+ return sw.Close()
+ },
+ })
+ return sw, nil
+ }),
+ fx.Provide(func(eventBus event.Bus, s *swarm.Swarm, lifecycle fx.Lifecycle) (bhost.ObservedAddrsManager, error) {
+ if cfg.DisableIdentifyAddressDiscovery {
+ return nil, nil
+ }
+ o, err := observedaddrs.NewManager(eventBus, s)
+ if err != nil {
+ return nil, err
+ }
+ lifecycle.Append(fx.Hook{
+ OnStart: func(context.Context) error {
+ o.Start(s)
+ return nil
+ },
+ OnStop: func(context.Context) error {
+ return o.Close()
+ },
+ })
+ return o, nil
+ }),
+ fx.Provide(func() (*autonatv2.AutoNAT, error) {
+ if !cfg.EnableAutoNATv2 {
+ return nil, nil
+ }
+ ah, err := cfg.makeAutoNATV2Host()
+ if err != nil {
+ return nil, err
+ }
+ var mt autonatv2.MetricsTracer
+ if !cfg.DisableMetrics {
+ mt = autonatv2.NewMetricsTracer(cfg.PrometheusRegisterer)
+ }
+ autoNATv2, err := autonatv2.New(ah, autonatv2.WithMetricsTracer(mt))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create autonatv2: %w", err)
+ }
+ return autoNATv2, nil
+ }),
+ fx.Provide(cfg.newBasicHost),
+ fx.Provide(func(bh *bhost.BasicHost) identify.IDService {
+ return bh.IDService()
+ }),
+ fx.Provide(func(bh *bhost.BasicHost) host.Host {
+ return bh
+ }),
+ fx.Provide(func(h *swarm.Swarm) peer.ID { return h.LocalPeer() }),
+ }
+ transportOpts, err := cfg.addTransports()
+ if err != nil {
+ return nil, err
+ }
+ fxopts = append(fxopts, transportOpts...)
+
+ // Configure routing
+ if cfg.Routing != nil {
+ fxopts = append(fxopts,
+ fx.Provide(cfg.Routing),
+ fx.Provide(func(h host.Host, router routing.PeerRouting) *routed.RoutedHost {
+ return routed.Wrap(h, router)
+ }),
+ )
+ }
+
+ // enable autorelay
+ fxopts = append(fxopts,
+ fx.Invoke(func(h *bhost.BasicHost, lifecycle fx.Lifecycle) error {
+ if cfg.EnableAutoRelay {
+ if !cfg.DisableMetrics {
+ mt := autorelay.WithMetricsTracer(
+ autorelay.NewMetricsTracer(autorelay.WithRegisterer(cfg.PrometheusRegisterer)))
+ mtOpts := []autorelay.Option{mt}
+ cfg.AutoRelayOpts = append(mtOpts, cfg.AutoRelayOpts...)
+ }
+
+ ar, err := autorelay.NewAutoRelay(h, cfg.AutoRelayOpts...)
+ if err != nil {
+ return err
+ }
+ lifecycle.Append(fx.StartStopHook(ar.Start, ar.Close))
+ return nil
+ }
+ return nil
+ }),
+ )
+
+ var bh *bhost.BasicHost
+ fxopts = append(fxopts, fx.Invoke(func(bho *bhost.BasicHost) { bh = bho }))
+ fxopts = append(fxopts, fx.Invoke(func(h *bhost.BasicHost, lifecycle fx.Lifecycle) {
+ lifecycle.Append(fx.StartHook(h.Start))
+ }))
+
+ var rh *routed.RoutedHost
+ if cfg.Routing != nil {
+ fxopts = append(fxopts, fx.Invoke(func(bho *routed.RoutedHost) { rh = bho }))
+ }
+
+ fxopts = append(fxopts, cfg.UserFxOptions...)
+
+ app := fx.New(fxopts...)
+ if err := app.Start(context.Background()); err != nil {
+ return nil, err
+ }
+
+ if err := cfg.addAutoNAT(bh); err != nil {
+ app.Stop(context.Background())
+ if cfg.Routing != nil {
+ rh.Close()
+ } else {
+ bh.Close()
+ }
+ return nil, err
+ }
+
+ if cfg.Routing != nil {
+ return &closableRoutedHost{
+ closableBasicHost: closableBasicHost{
+ App: app,
+ BasicHost: bh,
+ },
+ RoutedHost: rh,
+ }, nil
+ }
+ return &closableBasicHost{App: app, BasicHost: bh}, nil
+}
+
+func (cfg *Config) addAutoNAT(h *bhost.BasicHost) error {
+ // Only use public addresses for autonat
+ addrFunc := func() []ma.Multiaddr {
+ return slices.DeleteFunc(h.AllAddrs(), func(a ma.Multiaddr) bool { return !manet.IsPublicAddr(a) })
+ }
+ if cfg.AddrsFactory != nil {
+ addrFunc = func() []ma.Multiaddr {
+ return slices.DeleteFunc(
+ slices.Clone(cfg.AddrsFactory(h.AllAddrs())),
+ func(a ma.Multiaddr) bool { return !manet.IsPublicAddr(a) })
+ }
+ }
+ autonatOpts := []autonat.Option{
+ autonat.UsingAddresses(addrFunc),
+ }
+ if !cfg.DisableMetrics {
+ autonatOpts = append(autonatOpts, autonat.WithMetricsTracer(
+ autonat.NewMetricsTracer(autonat.WithRegisterer(cfg.PrometheusRegisterer)),
+ ))
+ }
+ if cfg.AutoNATConfig.ThrottleInterval != 0 {
+ autonatOpts = append(autonatOpts,
+ autonat.WithThrottling(cfg.AutoNATConfig.ThrottleGlobalLimit, cfg.AutoNATConfig.ThrottleInterval),
+ autonat.WithPeerThrottling(cfg.AutoNATConfig.ThrottlePeerLimit))
+ }
+ if cfg.AutoNATConfig.EnableService {
+ autonatPrivKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ return err
+ }
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return err
+ }
+
+ // Pull out the pieces of the config that we _actually_ care about.
+ // Specifically, don't set up things like listeners, identify, etc.
+ autoNatCfg := Config{
+ Transports: cfg.Transports,
+ Muxers: cfg.Muxers,
+ SecurityTransports: cfg.SecurityTransports,
+ Insecure: cfg.Insecure,
+ PSK: cfg.PSK,
+ ConnectionGater: cfg.ConnectionGater,
+ Reporter: cfg.Reporter,
+ PeerKey: autonatPrivKey,
+ Peerstore: ps,
+ DialRanker: swarm.NoDelayDialRanker,
+ ResourceManager: cfg.ResourceManager,
+ SwarmOpts: []swarm.Option{
+ swarm.WithUDPBlackHoleSuccessCounter(nil),
+ swarm.WithIPv6BlackHoleSuccessCounter(nil),
+ },
+ }
+
+ fxopts, err := autoNatCfg.addTransports()
+ if err != nil {
+ return err
+ }
+ var dialer *swarm.Swarm
+
+ fxopts = append(fxopts,
+ fx.Provide(eventbus.NewBus),
+ fx.Provide(func(lifecycle fx.Lifecycle, b event.Bus) (*swarm.Swarm, error) {
+ lifecycle.Append(fx.Hook{
+ OnStop: func(context.Context) error {
+ return ps.Close()
+ }})
+ var err error
+ dialer, err = autoNatCfg.makeSwarm(b, false)
+ return dialer, err
+
+ }),
+ fx.Provide(func(s *swarm.Swarm) peer.ID { return s.LocalPeer() }),
+ fx.Provide(func() crypto.PrivKey { return autonatPrivKey }),
+ )
+ app := fx.New(fxopts...)
+ if err := app.Err(); err != nil {
+ return err
+ }
+ err = app.Start(context.Background())
+ if err != nil {
+ return err
+ }
+ go func() {
+ <-dialer.Done() // The swarm used for autonat has closed, we can cleanup now
+ app.Stop(context.Background())
+ }()
+ autonatOpts = append(autonatOpts, autonat.EnableService(dialer))
+ }
+ if cfg.AutoNATConfig.ForceReachability != nil {
+ autonatOpts = append(autonatOpts, autonat.WithReachability(*cfg.AutoNATConfig.ForceReachability))
+ }
+
+ autonat, err := autonat.New(h, autonatOpts...)
+ if err != nil {
+ return fmt.Errorf("autonat init failed: %w", err)
+ }
+ h.SetAutoNat(autonat)
+ return nil
+}
+
+// Option is a libp2p config option that can be given to the libp2p constructor
+// (`libp2p.New`).
+type Option func(cfg *Config) error
+
+// Apply applies the given options to the config, returning the first error
+// encountered (if any).
+func (cfg *Config) Apply(opts ...Option) error {
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if err := opt(cfg); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/config/config_test.go b/config/config_test.go
new file mode 100644
index 0000000000..652d807d00
--- /dev/null
+++ b/config/config_test.go
@@ -0,0 +1,23 @@
+package config
+
+import (
+ "testing"
+)
+
+func TestNilOption(t *testing.T) {
+ var cfg Config
+ optsRun := 0
+ opt := func(_ *Config) error {
+ optsRun++
+ return nil
+ }
+ if err := cfg.Apply(nil); err != nil {
+ t.Fatal(err)
+ }
+ if err := cfg.Apply(opt, nil, nil, opt, opt, nil); err != nil {
+ t.Fatal(err)
+ }
+ if optsRun != 3 {
+ t.Fatalf("expected to have handled 3 options, handled %d", optsRun)
+ }
+}
diff --git a/config/host.go b/config/host.go
new file mode 100644
index 0000000000..804dcdd0e2
--- /dev/null
+++ b/config/host.go
@@ -0,0 +1,33 @@
+package config
+
+import (
+ "context"
+
+ basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ routed "github.com/libp2p/go-libp2p/p2p/host/routed"
+
+ "go.uber.org/fx"
+)
+
+type closableBasicHost struct {
+ *fx.App
+ *basichost.BasicHost
+}
+
+func (h *closableBasicHost) Close() error {
+ _ = h.App.Stop(context.Background())
+ return h.BasicHost.Close()
+}
+
+type closableRoutedHost struct {
+ // closableBasicHost is embedded here so that interface assertions on
+ // BasicHost exported methods work correctly.
+ closableBasicHost
+ *routed.RoutedHost
+}
+
+func (h *closableRoutedHost) Close() error {
+ _ = h.App.Stop(context.Background())
+ // The routed host will close the basic host
+ return h.RoutedHost.Close()
+}
diff --git a/config/quic.go b/config/quic.go
new file mode 100644
index 0000000000..66c40da97f
--- /dev/null
+++ b/config/quic.go
@@ -0,0 +1,43 @@
+package config
+
+import (
+ "crypto/sha256"
+ "io"
+
+ "golang.org/x/crypto/hkdf"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+
+ "github.com/quic-go/quic-go"
+)
+
+const (
+ statelessResetKeyInfo = "libp2p quic stateless reset key"
+ tokenGeneratorKeyInfo = "libp2p quic token generator key"
+)
+
+func PrivKeyToStatelessResetKey(key crypto.PrivKey) (quic.StatelessResetKey, error) {
+ var statelessResetKey quic.StatelessResetKey
+ keyBytes, err := key.Raw()
+ if err != nil {
+ return statelessResetKey, err
+ }
+ keyReader := hkdf.New(sha256.New, keyBytes, nil, []byte(statelessResetKeyInfo))
+ if _, err := io.ReadFull(keyReader, statelessResetKey[:]); err != nil {
+ return statelessResetKey, err
+ }
+ return statelessResetKey, nil
+}
+
+func PrivKeyToTokenGeneratorKey(key crypto.PrivKey) (quic.TokenGeneratorKey, error) {
+ var tokenKey quic.TokenGeneratorKey
+ keyBytes, err := key.Raw()
+ if err != nil {
+ return tokenKey, err
+ }
+ keyReader := hkdf.New(sha256.New, keyBytes, nil, []byte(tokenGeneratorKeyInfo))
+ if _, err := io.ReadFull(keyReader, tokenKey[:]); err != nil {
+ return tokenKey, err
+ }
+ return tokenKey, nil
+}
diff --git a/core/alias.go b/core/alias.go
new file mode 100644
index 0000000000..515083bb71
--- /dev/null
+++ b/core/alias.go
@@ -0,0 +1,51 @@
+// Package core provides convenient access to foundational, central go-libp2p primitives via type aliases.
+package core
+
+import (
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+// Multiaddr aliases the Multiaddr type from github.com/multiformats/go-multiaddr.
+//
+// Refer to the docs on that type for more info.
+type Multiaddr = multiaddr.Multiaddr
+
+// PeerID aliases peer.ID.
+//
+// Refer to the docs on that type for more info.
+type PeerID = peer.ID
+
+// ProtocolID aliases protocol.ID.
+//
+// Refer to the docs on that type for more info.
+type ProtocolID = protocol.ID
+
+// PeerAddrInfo aliases peer.AddrInfo.
+//
+// Refer to the docs on that type for more info.
+type PeerAddrInfo = peer.AddrInfo
+
+// Host aliases host.Host.
+//
+// Refer to the docs on that type for more info.
+type Host = host.Host
+
+// Network aliases network.Network.
+//
+// Refer to the docs on that type for more info.
+type Network = network.Network
+
+// Conn aliases network.Conn.
+//
+// Refer to the docs on that type for more info.
+type Conn = network.Conn
+
+// Stream aliases network.Stream.
+//
+// Refer to the docs on that type for more info.
+type Stream = network.Stream
diff --git a/core/canonicallog/canonicallog.go b/core/canonicallog/canonicallog.go
new file mode 100644
index 0000000000..9c31e0d877
--- /dev/null
+++ b/core/canonicallog/canonicallog.go
@@ -0,0 +1,92 @@
+package canonicallog
+
+import (
+ "context"
+ "log/slog"
+ "math/rand"
+ "net"
+ "os"
+ "runtime"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var log = slog.New(
+ slog.NewTextHandler(
+ os.Stderr,
+ &slog.HandlerOptions{
+ Level: logging.ConfigFromEnv().LevelForSystem("canonical-log"),
+ AddSource: true}))
+
+// logWithSkip logs at level with AddSource pointing to the caller `skip` frames up
+// from *this* functionโs caller (so skip=0 => the immediate caller of logWithSkip).
+func logWithSkip(ctx context.Context, l *slog.Logger, level slog.Level, skip int, msg string, args ...any) {
+ if !l.Enabled(ctx, level) {
+ return
+ }
+
+ var pcs [1]uintptr
+ // +2 to skip runtime.Callers and logWithSkip itself.
+ runtime.Callers(skip+2, pcs[:])
+
+ r := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ r.Add(args...)
+ _ = l.Handler().Handle(ctx, r)
+}
+
+// LogMisbehavingPeer is the canonical way to log a misbehaving peer.
+// Protocols should use this to identify a misbehaving peer to allow the end
+// user to easily identify these nodes across protocols and libp2p.
+func LogMisbehavingPeer(p peer.ID, peerAddr multiaddr.Multiaddr, component string, err error, msg string) {
+ logWithSkip(context.Background(), log, slog.LevelWarn, 1, "CANONICAL_MISBEHAVING_PEER",
+ "peer", p,
+ "addr", peerAddr,
+ "component", component,
+ "err", err,
+ "msg", msg)
+}
+
+// LogMisbehavingPeerNetAddr is the canonical way to log a misbehaving peer.
+// Protocols should use this to identify a misbehaving peer to allow the end
+// user to easily identify these nodes across protocols and libp2p.
+func LogMisbehavingPeerNetAddr(p peer.ID, peerAddr net.Addr, component string, originalErr error, msg string) {
+ ma, err := manet.FromNetAddr(peerAddr)
+ if err != nil {
+ logWithSkip(context.Background(), log, slog.LevelWarn, 1, "CANONICAL_MISBEHAVING_PEER",
+ "peer", p,
+ "net_addr", peerAddr.String(),
+ "component", component,
+ "err", originalErr,
+ "msg", msg)
+ return
+ }
+
+ LogMisbehavingPeer(p, ma, component, originalErr, msg)
+}
+
+// LogPeerStatus logs any useful information about a peer. It takes in a sample
+// rate and will only log one in every sampleRate messages (randomly). This is
+// useful in surfacing events that are normal in isolation, but may be abnormal
+// in large quantities. For example, a successful connection from an IP address
+// is normal. 10,000 connections from that same IP address is not normal. libp2p
+// itself does nothing besides emitting this log. Hook this up to another tool
+// like fail2ban to action on the log.
+func LogPeerStatus(sampleRate int, p peer.ID, peerAddr multiaddr.Multiaddr, keyVals ...string) {
+ if rand.Intn(sampleRate) == 0 {
+ args := []any{
+ "peer", p,
+ "addr", peerAddr.String(),
+ "sample_rate", sampleRate,
+ }
+ // Add the additional key-value pairs
+ for _, kv := range keyVals {
+ args = append(args, kv)
+ }
+ logWithSkip(context.Background(), log, slog.LevelInfo, 1, "CANONICAL_PEER_STATUS", args...)
+ }
+}
diff --git a/core/canonicallog/canonicallog_test.go b/core/canonicallog/canonicallog_test.go
new file mode 100644
index 0000000000..5a79975c43
--- /dev/null
+++ b/core/canonicallog/canonicallog_test.go
@@ -0,0 +1,29 @@
+package canonicallog
+
+import (
+ "fmt"
+ "log/slog"
+ "net"
+ "os"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/test"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+func TestLogs(t *testing.T) {
+ originalLogger := log
+ defer func() {
+ log = originalLogger
+ }()
+ // Override to print debug logs
+ log = slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelInfo, AddSource: true}))
+
+ LogMisbehavingPeer(test.RandPeerIDFatal(t), multiaddr.StringCast("/ip4/1.2.3.4"), "somecomponent", fmt.Errorf("something"), "hi")
+
+ netAddr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 80}
+ LogMisbehavingPeerNetAddr(test.RandPeerIDFatal(t), netAddr, "somecomponent", fmt.Errorf("something"), "hello \"world\"")
+
+ LogPeerStatus(1, test.RandPeerIDFatal(t), multiaddr.StringCast("/ip4/1.2.3.4"), "extra", "info")
+}
diff --git a/core/connmgr/decay.go b/core/connmgr/decay.go
new file mode 100644
index 0000000000..783a06c3a8
--- /dev/null
+++ b/core/connmgr/decay.go
@@ -0,0 +1,109 @@
+package connmgr
+
+import (
+ "io"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// Decayer is implemented by connection managers supporting decaying tags. A
+// decaying tag is one whose value automatically decays over time.
+//
+// The actual application of the decay behaviour is encapsulated in a
+// user-provided decaying function (DecayFn). The function is called on every
+// tick (determined by the interval parameter), and returns either the new value
+// of the tag, or whether it should be erased altogether.
+//
+// We do not set values on a decaying tag. Rather, we "bump" decaying tags by a
+// delta. This calls the BumpFn with the old value and the delta, to determine
+// the new value.
+//
+// Such a pluggable design affords a great deal of flexibility and versatility.
+// Behaviours that are straightforward to implement include:
+//
+// - Decay a tag by -1, or by half its current value, on every tick.
+// - Every time a value is bumped, sum it to its current value.
+// - Exponentially boost a score with every bump.
+// - Sum the incoming score, but keep it within min, max bounds.
+//
+// Commonly used DecayFns and BumpFns are provided in this package.
+type Decayer interface {
+ io.Closer
+
+ // RegisterDecayingTag creates and registers a new decaying tag, if and only
+ // if a tag with the supplied name doesn't exist yet. Otherwise, an error is
+ // returned.
+ //
+ // The caller provides the interval at which the tag is refreshed, as well
+ // as the decay function and the bump function. Refer to godocs on DecayFn
+ // and BumpFn for more info.
+ RegisterDecayingTag(name string, interval time.Duration, decayFn DecayFn, bumpFn BumpFn) (DecayingTag, error)
+}
+
+// DecayFn applies a decay to the peer's score. The implementation must call
+// DecayFn at the interval supplied when registering the tag.
+//
+// It receives a copy of the decaying value, and returns the score after
+// applying the decay, as well as a flag to signal if the tag should be erased.
+type DecayFn func(value DecayingValue) (after int, rm bool)
+
+// BumpFn applies a delta onto an existing score, and returns the new score.
+//
+// Non-trivial bump functions include exponential boosting, moving averages,
+// ceilings, etc.
+type BumpFn func(value DecayingValue, delta int) (after int)
+
+// DecayingTag represents a decaying tag. The tag is a long-lived general
+// object, used to operate on tag values for peers.
+type DecayingTag interface {
+ // Name returns the name of the tag.
+ Name() string
+
+ // Interval is the effective interval at which this tag will tick. Upon
+ // registration, the desired interval may be overwritten depending on the
+ // decayer's resolution, and this method allows you to obtain the effective
+ // interval.
+ Interval() time.Duration
+
+ // Bump applies a delta to a tag value, calling its bump function. The bump
+ // will be applied asynchronously, and a non-nil error indicates a fault
+ // when queuing.
+ Bump(peer peer.ID, delta int) error
+
+ // Remove removes a decaying tag from a peer. The removal will be applied
+ // asynchronously, and a non-nil error indicates a fault when queuing.
+ Remove(peer peer.ID) error
+
+ // Close closes a decaying tag. The Decayer will stop tracking this tag,
+ // and the state of all peers in the Connection Manager holding this tag
+ // will be updated.
+ //
+ // The deletion is performed asynchronously.
+ //
+ // Once deleted, a tag should not be used, and further calls to Bump/Remove
+ // will error.
+ //
+ // Duplicate calls to Remove will not return errors, but a failure to queue
+ // the first actual removal, will (e.g. when the system is backlogged).
+ Close() error
+}
+
+// DecayingValue represents a value for a decaying tag.
+type DecayingValue struct {
+ // Tag points to the tag this value belongs to.
+ Tag DecayingTag
+
+ // Peer is the peer ID to whom this value is associated.
+ Peer peer.ID
+
+ // Added is the timestamp when this value was added for the first time for
+ // a tag and a peer.
+ Added time.Time
+
+ // LastVisit is the timestamp of the last visit.
+ LastVisit time.Time
+
+ // Value is the current value of the tag.
+ Value int
+}
diff --git a/core/connmgr/gater.go b/core/connmgr/gater.go
new file mode 100644
index 0000000000..82fa56a876
--- /dev/null
+++ b/core/connmgr/gater.go
@@ -0,0 +1,89 @@
+package connmgr
+
+import (
+ ma "github.com/multiformats/go-multiaddr"
+
+ "github.com/libp2p/go-libp2p/core/control"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// ConnectionGater can be implemented by a type that supports active
+// inbound or outbound connection gating.
+//
+// ConnectionGaters are active, whereas ConnManagers tend to be passive.
+//
+// A ConnectionGater will be consulted during different states in the lifecycle
+// of a connection being established/upgraded. Specific functions will be called
+// throughout the process, to allow you to intercept the connection at that stage.
+//
+// InterceptPeerDial is called on an imminent outbound peer dial request, prior
+// to the addresses of that peer being available/resolved. Blocking connections
+// at this stage is typical for blacklisting scenarios.
+//
+// InterceptAddrDial is called on an imminent outbound dial to a peer on a
+// particular address. Blocking connections at this stage is typical for
+// address filtering.
+//
+// InterceptAccept is called as soon as a transport listener receives an
+// inbound connection request, before any upgrade takes place. Transports who
+// accept already secure and/or multiplexed connections (e.g. possibly QUIC)
+// MUST call this method regardless, for correctness/consistency.
+//
+// InterceptSecured is called for both inbound and outbound connections,
+// after a security handshake has taken place and we've authenticated the peer.
+//
+// InterceptUpgraded is called for inbound and outbound connections, after
+// libp2p has finished upgrading the connection entirely to a secure,
+// multiplexed channel.
+//
+// This interface can be used to implement *strict/active* connection management
+// policies, such as hard limiting of connections once a maximum count has been
+// reached, maintaining a peer blacklist, or limiting connections by transport
+// quotas.
+//
+// EXPERIMENTAL: a DISCONNECT protocol/message will be supported in the future.
+// This allows gaters and other components to communicate the intention behind
+// a connection closure, to curtail potential reconnection attempts.
+//
+// For now, InterceptUpgraded can return a non-zero DisconnectReason when
+// blocking a connection, but this interface is likely to change in the future
+// as we solidify this feature. The reason why only this method can handle
+// DisconnectReasons is that we require stream multiplexing capability to open a
+// control protocol stream to transmit the message.
+type ConnectionGater interface {
+ // InterceptPeerDial tests whether we're permitted to Dial the specified peer.
+ //
+ // This is called by the network.Network implementation when dialling a peer.
+ InterceptPeerDial(p peer.ID) (allow bool)
+
+ // InterceptAddrDial tests whether we're permitted to dial the specified
+ // multiaddr for the given peer.
+ //
+ // This is called by the network.Network implementation after it has
+ // resolved the peer's addrs, and prior to dialling each.
+ InterceptAddrDial(peer.ID, ma.Multiaddr) (allow bool)
+
+ // InterceptAccept tests whether an incipient inbound connection is allowed.
+ //
+ // This is called by the upgrader, or by the transport directly (e.g. QUIC,
+ // Bluetooth), straight after it has accepted a connection from its socket.
+ InterceptAccept(network.ConnMultiaddrs) (allow bool)
+
+ // InterceptSecured tests whether a given connection, now authenticated,
+ // is allowed.
+ //
+ // This is called by the upgrader, after it has performed the security
+ // handshake, and before it negotiates the muxer, or by the directly by the
+ // transport, at the exact same checkpoint.
+ InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool)
+
+ // InterceptUpgraded tests whether a fully capable connection is allowed.
+ //
+ // At this point, the connection a multiplexer has been selected.
+ // When rejecting a connection, the gater can return a DisconnectReason.
+ // Refer to the godoc on the ConnectionGater type for more information.
+ //
+ // NOTE: the go-libp2p implementation currently IGNORES the disconnect reason.
+ InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason)
+}
diff --git a/core/connmgr/manager.go b/core/connmgr/manager.go
new file mode 100644
index 0000000000..c4d796b39a
--- /dev/null
+++ b/core/connmgr/manager.go
@@ -0,0 +1,101 @@
+// Package connmgr provides connection tracking and management interfaces for libp2p.
+//
+// The ConnManager interface exported from this package allows libp2p to enforce an
+// upper bound on the total number of open connections. To avoid service disruptions,
+// connections can be tagged with metadata and optionally "protected" to ensure that
+// essential connections are not arbitrarily cut.
+package connmgr
+
+import (
+ "context"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// SupportsDecay evaluates if the provided ConnManager supports decay, and if
+// so, it returns the Decayer object. Refer to godocs on Decayer for more info.
+func SupportsDecay(mgr ConnManager) (Decayer, bool) {
+ d, ok := mgr.(Decayer)
+ return d, ok
+}
+
+// ConnManager tracks connections to peers, and allows consumers to associate
+// metadata with each peer.
+//
+// It enables connections to be trimmed based on implementation-defined
+// heuristics. The ConnManager allows libp2p to enforce an upper bound on the
+// total number of open connections.
+//
+// ConnManagers supporting decaying tags implement Decayer. Use the
+// SupportsDecay function to safely cast an instance to Decayer, if supported.
+type ConnManager interface {
+ // TagPeer tags a peer with a string, associating a weight with the tag.
+ TagPeer(peer.ID, string, int)
+
+ // UntagPeer removes the tagged value from the peer.
+ UntagPeer(p peer.ID, tag string)
+
+ // UpsertTag updates an existing tag or inserts a new one.
+ //
+ // The connection manager calls the upsert function supplying the current
+ // value of the tag (or zero if inexistent). The return value is used as
+ // the new value of the tag.
+ UpsertTag(p peer.ID, tag string, upsert func(int) int)
+
+ // GetTagInfo returns the metadata associated with the peer,
+ // or nil if no metadata has been recorded for the peer.
+ GetTagInfo(p peer.ID) *TagInfo
+
+ // TrimOpenConns terminates open connections based on an implementation-defined
+ // heuristic.
+ TrimOpenConns(ctx context.Context)
+
+ // Notifee returns an implementation that can be called back to inform of
+ // opened and closed connections.
+ Notifee() network.Notifiee
+
+ // Protect protects a peer from having its connection(s) pruned.
+ //
+ // Tagging allows different parts of the system to manage protections without interfering with one another.
+ //
+ // Calls to Protect() with the same tag are idempotent. They are not refcounted, so after multiple calls
+ // to Protect() with the same tag, a single Unprotect() call bearing the same tag will revoke the protection.
+ Protect(id peer.ID, tag string)
+
+ // Unprotect removes a protection that may have been placed on a peer, under the specified tag.
+ //
+ // The return value indicates whether the peer continues to be protected after this call, by way of a different tag.
+ // See notes on Protect() for more info.
+ Unprotect(id peer.ID, tag string) (protected bool)
+
+ // IsProtected returns true if the peer is protected for some tag; if the tag is the empty string
+ // then it will return true if the peer is protected for any tag
+ IsProtected(id peer.ID, tag string) (protected bool)
+
+ // CheckLimit will return an error if the connection manager's internal
+ // connection limit exceeds the provided system limit.
+ CheckLimit(l GetConnLimiter) error
+
+ // Close closes the connection manager and stops background processes.
+ Close() error
+}
+
+// TagInfo stores metadata associated with a peer.
+type TagInfo struct {
+ FirstSeen time.Time
+ Value int
+
+ // Tags maps tag ids to the numerical values.
+ Tags map[string]int
+
+ // Conns maps connection ids (such as remote multiaddr) to their creation time.
+ Conns map[string]time.Time
+}
+
+// GetConnLimiter provides access to a component's total connection limit.
+type GetConnLimiter interface {
+ // GetConnLimit returns the total connection limit of the implementing component.
+ GetConnLimit() int
+}
diff --git a/core/connmgr/null.go b/core/connmgr/null.go
new file mode 100644
index 0000000000..3c7e6aef05
--- /dev/null
+++ b/core/connmgr/null.go
@@ -0,0 +1,25 @@
+package connmgr
+
+import (
+ "context"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// NullConnMgr is a ConnMgr that provides no functionality.
+type NullConnMgr struct{}
+
+var _ ConnManager = (*NullConnMgr)(nil)
+
+func (NullConnMgr) TagPeer(peer.ID, string, int) {}
+func (NullConnMgr) UntagPeer(peer.ID, string) {}
+func (NullConnMgr) UpsertTag(peer.ID, string, func(int) int) {}
+func (NullConnMgr) GetTagInfo(peer.ID) *TagInfo { return &TagInfo{} }
+func (NullConnMgr) TrimOpenConns(_ context.Context) {}
+func (NullConnMgr) Notifee() network.Notifiee { return network.GlobalNoopNotifiee }
+func (NullConnMgr) Protect(peer.ID, string) {}
+func (NullConnMgr) Unprotect(peer.ID, string) bool { return false }
+func (NullConnMgr) IsProtected(peer.ID, string) bool { return false }
+func (NullConnMgr) CheckLimit(_ GetConnLimiter) error { return nil }
+func (NullConnMgr) Close() error { return nil }
diff --git a/core/connmgr/presets.go b/core/connmgr/presets.go
new file mode 100644
index 0000000000..b4f2631315
--- /dev/null
+++ b/core/connmgr/presets.go
@@ -0,0 +1,67 @@
+package connmgr
+
+import (
+ "math"
+ "time"
+)
+
+// DecayNone applies no decay.
+func DecayNone() DecayFn {
+ return func(value DecayingValue) (_ int, rm bool) {
+ return value.Value, false
+ }
+}
+
+// DecayFixed subtracts from by the provided minuend, and deletes the tag when
+// first reaching 0 or negative.
+func DecayFixed(minuend int) DecayFn {
+ return func(value DecayingValue) (_ int, rm bool) {
+ v := value.Value - minuend
+ return v, v <= 0
+ }
+}
+
+// DecayLinear applies a fractional coefficient to the value of the current tag,
+// rounding down via math.Floor. It erases the tag when the result is zero.
+func DecayLinear(coef float64) DecayFn {
+ return func(value DecayingValue) (after int, rm bool) {
+ v := math.Floor(float64(value.Value) * coef)
+ return int(v), v <= 0
+ }
+}
+
+// DecayExpireWhenInactive expires a tag after a certain period of no bumps.
+func DecayExpireWhenInactive(after time.Duration) DecayFn {
+ return func(value DecayingValue) (_ int, rm bool) {
+ rm = time.Until(value.LastVisit) >= after
+ return 0, rm
+ }
+}
+
+// BumpSumUnbounded adds the incoming value to the peer's score.
+func BumpSumUnbounded() BumpFn {
+ return func(value DecayingValue, delta int) (after int) {
+ return value.Value + delta
+ }
+}
+
+// BumpSumBounded keeps summing the incoming score, keeping it within a
+// [min, max] range.
+func BumpSumBounded(min, max int) BumpFn {
+ return func(value DecayingValue, delta int) (after int) {
+ v := value.Value + delta
+ if v >= max {
+ return max
+ } else if v <= min {
+ return min
+ }
+ return v
+ }
+}
+
+// BumpOverwrite replaces the current value of the tag with the incoming one.
+func BumpOverwrite() BumpFn {
+ return func(_ DecayingValue, delta int) (after int) {
+ return delta
+ }
+}
diff --git a/core/control/disconnect.go b/core/control/disconnect.go
new file mode 100644
index 0000000000..ad1fc5b8a6
--- /dev/null
+++ b/core/control/disconnect.go
@@ -0,0 +1,9 @@
+package control
+
+// DisconnectReason communicates the reason why a connection is being closed.
+//
+// A zero value stands for "no reason" / NA.
+//
+// This is an EXPERIMENTAL type. It will change in the future. Refer to the
+// connmgr.ConnectionGater godoc for more info.
+type DisconnectReason int
diff --git a/core/crypto/bench_test.go b/core/crypto/bench_test.go
new file mode 100644
index 0000000000..e1ed4b76fe
--- /dev/null
+++ b/core/crypto/bench_test.go
@@ -0,0 +1,84 @@
+package crypto
+
+import "testing"
+
+func BenchmarkSignRSA1B(b *testing.B) { RunBenchmarkSignRSA(b, 1) }
+func BenchmarkSignRSA10B(b *testing.B) { RunBenchmarkSignRSA(b, 10) }
+func BenchmarkSignRSA100B(b *testing.B) { RunBenchmarkSignRSA(b, 100) }
+func BenchmarkSignRSA1000B(b *testing.B) { RunBenchmarkSignRSA(b, 1000) }
+func BenchmarkSignRSA10000B(b *testing.B) { RunBenchmarkSignRSA(b, 10000) }
+func BenchmarkSignRSA100000B(b *testing.B) { RunBenchmarkSignRSA(b, 100000) }
+
+func BenchmarkVerifyRSA1B(b *testing.B) { RunBenchmarkVerifyRSA(b, 1) }
+func BenchmarkVerifyRSA10B(b *testing.B) { RunBenchmarkVerifyRSA(b, 10) }
+func BenchmarkVerifyRSA100B(b *testing.B) { RunBenchmarkVerifyRSA(b, 100) }
+func BenchmarkVerifyRSA1000B(b *testing.B) { RunBenchmarkVerifyRSA(b, 1000) }
+func BenchmarkVerifyRSA10000B(b *testing.B) { RunBenchmarkVerifyRSA(b, 10000) }
+func BenchmarkVerifyRSA100000B(b *testing.B) { RunBenchmarkVerifyRSA(b, 100000) }
+
+func BenchmarkSignEd255191B(b *testing.B) { RunBenchmarkSignEd25519(b, 1) }
+func BenchmarkSignEd2551910B(b *testing.B) { RunBenchmarkSignEd25519(b, 10) }
+func BenchmarkSignEd25519100B(b *testing.B) { RunBenchmarkSignEd25519(b, 100) }
+func BenchmarkSignEd255191000B(b *testing.B) { RunBenchmarkSignEd25519(b, 1000) }
+func BenchmarkSignEd2551910000B(b *testing.B) { RunBenchmarkSignEd25519(b, 10000) }
+func BenchmarkSignEd25519100000B(b *testing.B) { RunBenchmarkSignEd25519(b, 100000) }
+
+func BenchmarkVerifyEd255191B(b *testing.B) { RunBenchmarkVerifyEd25519(b, 1) }
+func BenchmarkVerifyEd2551910B(b *testing.B) { RunBenchmarkVerifyEd25519(b, 10) }
+func BenchmarkVerifyEd25519100B(b *testing.B) { RunBenchmarkVerifyEd25519(b, 100) }
+func BenchmarkVerifyEd255191000B(b *testing.B) { RunBenchmarkVerifyEd25519(b, 1000) }
+func BenchmarkVerifyEd2551910000B(b *testing.B) { RunBenchmarkVerifyEd25519(b, 10000) }
+func BenchmarkVerifyEd25519100000B(b *testing.B) { RunBenchmarkVerifyEd25519(b, 100000) }
+
+func RunBenchmarkSignRSA(b *testing.B, numBytes int) {
+ runBenchmarkSign(b, numBytes, RSA)
+}
+
+func RunBenchmarkSignEd25519(b *testing.B, numBytes int) {
+ runBenchmarkSign(b, numBytes, Ed25519)
+}
+
+func runBenchmarkSign(b *testing.B, numBytes int, t int) {
+ secret, _, err := GenerateKeyPair(t, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+ someData := make([]byte, numBytes)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := secret.Sign(someData)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func RunBenchmarkVerifyRSA(b *testing.B, numBytes int) {
+ runBenchmarkVerify(b, numBytes, RSA)
+}
+
+func RunBenchmarkVerifyEd25519(b *testing.B, numBytes int) {
+ runBenchmarkVerify(b, numBytes, Ed25519)
+}
+
+func runBenchmarkVerify(b *testing.B, numBytes int, t int) {
+ secret, public, err := GenerateKeyPair(t, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+ someData := make([]byte, numBytes)
+ signature, err := secret.Sign(someData)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ valid, err := public.Verify(someData, signature)
+ if err != nil {
+ b.Fatal(err)
+ }
+ if !valid {
+ b.Fatal("signature should be valid")
+ }
+ }
+}
diff --git a/core/crypto/ecdsa.go b/core/crypto/ecdsa.go
new file mode 100644
index 0000000000..c890afe836
--- /dev/null
+++ b/core/crypto/ecdsa.go
@@ -0,0 +1,186 @@
+package crypto
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+ "io"
+ "math/big"
+
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+)
+
+// ECDSAPrivateKey is an implementation of an ECDSA private key
+type ECDSAPrivateKey struct {
+ priv *ecdsa.PrivateKey
+}
+
+// ECDSAPublicKey is an implementation of an ECDSA public key
+type ECDSAPublicKey struct {
+ pub *ecdsa.PublicKey
+}
+
+// ECDSASig holds the r and s values of an ECDSA signature
+type ECDSASig struct {
+ R, S *big.Int
+}
+
+var (
+ // ErrNotECDSAPubKey is returned when the public key passed is not an ecdsa public key
+ ErrNotECDSAPubKey = errors.New("not an ecdsa public key")
+ // ErrNilSig is returned when the signature is nil
+ ErrNilSig = errors.New("sig is nil")
+ // ErrNilPrivateKey is returned when a nil private key is provided
+ ErrNilPrivateKey = errors.New("private key is nil")
+ // ErrNilPublicKey is returned when a nil public key is provided
+ ErrNilPublicKey = errors.New("public key is nil")
+ // ECDSACurve is the default ecdsa curve used
+ ECDSACurve = elliptic.P256()
+)
+
+// GenerateECDSAKeyPair generates a new ecdsa private and public key
+func GenerateECDSAKeyPair(src io.Reader) (PrivKey, PubKey, error) {
+ return GenerateECDSAKeyPairWithCurve(ECDSACurve, src)
+}
+
+// GenerateECDSAKeyPairWithCurve generates a new ecdsa private and public key with a specified curve
+func GenerateECDSAKeyPairWithCurve(curve elliptic.Curve, src io.Reader) (PrivKey, PubKey, error) {
+ priv, err := ecdsa.GenerateKey(curve, src)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &ECDSAPrivateKey{priv}, &ECDSAPublicKey{&priv.PublicKey}, nil
+}
+
+// ECDSAKeyPairFromKey generates a new ecdsa private and public key from an input private key
+func ECDSAKeyPairFromKey(priv *ecdsa.PrivateKey) (PrivKey, PubKey, error) {
+ if priv == nil {
+ return nil, nil, ErrNilPrivateKey
+ }
+
+ return &ECDSAPrivateKey{priv}, &ECDSAPublicKey{&priv.PublicKey}, nil
+}
+
+// ECDSAPublicKeyFromPubKey generates a new ecdsa public key from an input public key
+func ECDSAPublicKeyFromPubKey(pub ecdsa.PublicKey) (PubKey, error) {
+ return &ECDSAPublicKey{pub: &pub}, nil
+}
+
+// MarshalECDSAPrivateKey returns x509 bytes from a private key
+func MarshalECDSAPrivateKey(ePriv ECDSAPrivateKey) (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ECDSA private-key marshal") }()
+ return x509.MarshalECPrivateKey(ePriv.priv)
+}
+
+// MarshalECDSAPublicKey returns x509 bytes from a public key
+func MarshalECDSAPublicKey(ePub ECDSAPublicKey) (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ECDSA public-key marshal") }()
+ return x509.MarshalPKIXPublicKey(ePub.pub)
+}
+
+// UnmarshalECDSAPrivateKey returns a private key from x509 bytes
+func UnmarshalECDSAPrivateKey(data []byte) (res PrivKey, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ECDSA private-key unmarshal") }()
+
+ priv, err := x509.ParseECPrivateKey(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ECDSAPrivateKey{priv}, nil
+}
+
+// UnmarshalECDSAPublicKey returns the public key from x509 bytes
+func UnmarshalECDSAPublicKey(data []byte) (key PubKey, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ECDSA public-key unmarshal") }()
+
+ pubIfc, err := x509.ParsePKIXPublicKey(data)
+ if err != nil {
+ return nil, err
+ }
+
+ pub, ok := pubIfc.(*ecdsa.PublicKey)
+ if !ok {
+ return nil, ErrNotECDSAPubKey
+ }
+
+ return &ECDSAPublicKey{pub}, nil
+}
+
+// Type returns the key type
+func (ePriv *ECDSAPrivateKey) Type() pb.KeyType {
+ return pb.KeyType_ECDSA
+}
+
+// Raw returns x509 bytes from a private key
+func (ePriv *ECDSAPrivateKey) Raw() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ECDSA private-key marshal") }()
+ return x509.MarshalECPrivateKey(ePriv.priv)
+}
+
+// Equals compares two private keys
+func (ePriv *ECDSAPrivateKey) Equals(o Key) bool {
+ return basicEquals(ePriv, o)
+}
+
+// Sign returns the signature of the input data
+func (ePriv *ECDSAPrivateKey) Sign(data []byte) (sig []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ECDSA signing") }()
+ hash := sha256.Sum256(data)
+ r, s, err := ecdsa.Sign(rand.Reader, ePriv.priv, hash[:])
+ if err != nil {
+ return nil, err
+ }
+
+ return asn1.Marshal(ECDSASig{
+ R: r,
+ S: s,
+ })
+}
+
+// GetPublic returns a public key
+func (ePriv *ECDSAPrivateKey) GetPublic() PubKey {
+ return &ECDSAPublicKey{&ePriv.priv.PublicKey}
+}
+
+// Type returns the key type
+func (ePub *ECDSAPublicKey) Type() pb.KeyType {
+ return pb.KeyType_ECDSA
+}
+
+// Raw returns x509 bytes from a public key
+func (ePub *ECDSAPublicKey) Raw() ([]byte, error) {
+ return x509.MarshalPKIXPublicKey(ePub.pub)
+}
+
+// Equals compares to public keys
+func (ePub *ECDSAPublicKey) Equals(o Key) bool {
+ return basicEquals(ePub, o)
+}
+
+// Verify compares data to a signature
+func (ePub *ECDSAPublicKey) Verify(data, sigBytes []byte) (success bool, err error) {
+ defer func() {
+ catch.HandlePanic(recover(), &err, "ECDSA signature verification")
+
+ // Just to be extra paranoid.
+ if err != nil {
+ success = false
+ }
+ }()
+
+ sig := new(ECDSASig)
+ if _, err := asn1.Unmarshal(sigBytes, sig); err != nil {
+ return false, err
+ }
+
+ hash := sha256.Sum256(data)
+
+ return ecdsa.Verify(ePub.pub, hash[:], sig.R, sig.S), nil
+}
diff --git a/core/crypto/ecdsa_test.go b/core/crypto/ecdsa_test.go
new file mode 100644
index 0000000000..6e4720d7c2
--- /dev/null
+++ b/core/crypto/ecdsa_test.go
@@ -0,0 +1,142 @@
+package crypto
+
+import (
+ "crypto/ecdsa"
+ "crypto/rand"
+ "testing"
+)
+
+func TestECDSABasicSignAndVerify(t *testing.T) {
+ priv, pub, err := GenerateECDSAKeyPair(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := []byte("hello! and welcome to some awesome crypto primitives")
+
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+
+ // change data
+ data[0] = ^data[0]
+ ok, err = pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ok {
+ t.Fatal("signature matched and shouldn't")
+ }
+}
+
+func TestECDSASignZero(t *testing.T) {
+ priv, pub, err := GenerateECDSAKeyPair(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := make([]byte, 0)
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+}
+
+func TestECDSAMarshalLoop(t *testing.T) {
+ priv, pub, err := GenerateECDSAKeyPair(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privB, err := MarshalPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privNew, err := UnmarshalPrivateKey(privB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !priv.Equals(privNew) || !privNew.Equals(priv) {
+ t.Fatal("keys are not equal")
+ }
+
+ pubB, err := MarshalPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubNew, err := UnmarshalPublicKey(pubB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !pub.Equals(pubNew) || !pubNew.Equals(pub) {
+ t.Fatal("keys are not equal")
+ }
+
+}
+
+func TestECDSAPublicKeyFromPubKey(t *testing.T) {
+ ecdsaPrivK, err := ecdsa.GenerateKey(ECDSACurve, rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privK, _, err := ECDSAKeyPairFromKey(ecdsaPrivK)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := []byte("Hello world!")
+ signature, err := privK.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pubKey, err := ECDSAPublicKeyFromPubKey(ecdsaPrivK.PublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pubKey.Verify(data, signature)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+
+ pubB, err := MarshalPublicKey(pubKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubNew, err := UnmarshalPublicKey(pubB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !pubKey.Equals(pubNew) || !pubNew.Equals(pubKey) {
+ t.Fatal("keys are not equal")
+ }
+}
diff --git a/core/crypto/ed25519.go b/core/crypto/ed25519.go
new file mode 100644
index 0000000000..d6e3031c03
--- /dev/null
+++ b/core/crypto/ed25519.go
@@ -0,0 +1,156 @@
+package crypto
+
+import (
+ "bytes"
+ "crypto/ed25519"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "io"
+
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+)
+
+// Ed25519PrivateKey is an ed25519 private key.
+type Ed25519PrivateKey struct {
+ k ed25519.PrivateKey
+}
+
+// Ed25519PublicKey is an ed25519 public key.
+type Ed25519PublicKey struct {
+ k ed25519.PublicKey
+}
+
+// GenerateEd25519Key generates a new ed25519 private and public key pair.
+func GenerateEd25519Key(src io.Reader) (PrivKey, PubKey, error) {
+ pub, priv, err := ed25519.GenerateKey(src)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return &Ed25519PrivateKey{
+ k: priv,
+ },
+ &Ed25519PublicKey{
+ k: pub,
+ },
+ nil
+}
+
+// Type of the private key (Ed25519).
+func (k *Ed25519PrivateKey) Type() pb.KeyType {
+ return pb.KeyType_Ed25519
+}
+
+// Raw private key bytes.
+func (k *Ed25519PrivateKey) Raw() ([]byte, error) {
+ // The Ed25519 private key contains two 32-bytes curve points, the private
+ // key and the public key.
+ // It makes it more efficient to get the public key without re-computing an
+ // elliptic curve multiplication.
+ buf := make([]byte, len(k.k))
+ copy(buf, k.k)
+
+ return buf, nil
+}
+
+func (k *Ed25519PrivateKey) pubKeyBytes() []byte {
+ return k.k[ed25519.PrivateKeySize-ed25519.PublicKeySize:]
+}
+
+// Equals compares two ed25519 private keys.
+func (k *Ed25519PrivateKey) Equals(o Key) bool {
+ edk, ok := o.(*Ed25519PrivateKey)
+ if !ok {
+ return basicEquals(k, o)
+ }
+
+ return subtle.ConstantTimeCompare(k.k, edk.k) == 1
+}
+
+// GetPublic returns an ed25519 public key from a private key.
+func (k *Ed25519PrivateKey) GetPublic() PubKey {
+ return &Ed25519PublicKey{k: k.pubKeyBytes()}
+}
+
+// Sign returns a signature from an input message.
+func (k *Ed25519PrivateKey) Sign(msg []byte) (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "ed15519 signing") }()
+
+ return ed25519.Sign(k.k, msg), nil
+}
+
+// Type of the public key (Ed25519).
+func (k *Ed25519PublicKey) Type() pb.KeyType {
+ return pb.KeyType_Ed25519
+}
+
+// Raw public key bytes.
+func (k *Ed25519PublicKey) Raw() ([]byte, error) {
+ return k.k, nil
+}
+
+// Equals compares two ed25519 public keys.
+func (k *Ed25519PublicKey) Equals(o Key) bool {
+ edk, ok := o.(*Ed25519PublicKey)
+ if !ok {
+ return basicEquals(k, o)
+ }
+
+ return bytes.Equal(k.k, edk.k)
+}
+
+// Verify checks a signature against the input data.
+func (k *Ed25519PublicKey) Verify(data []byte, sig []byte) (success bool, err error) {
+ defer func() {
+ catch.HandlePanic(recover(), &err, "ed15519 signature verification")
+
+ // To be safe.
+ if err != nil {
+ success = false
+ }
+ }()
+ return ed25519.Verify(k.k, data, sig), nil
+}
+
+// UnmarshalEd25519PublicKey returns a public key from input bytes.
+func UnmarshalEd25519PublicKey(data []byte) (PubKey, error) {
+ if len(data) != 32 {
+ return nil, errors.New("expect ed25519 public key data size to be 32")
+ }
+
+ return &Ed25519PublicKey{
+ k: ed25519.PublicKey(data),
+ }, nil
+}
+
+// UnmarshalEd25519PrivateKey returns a private key from input bytes.
+func UnmarshalEd25519PrivateKey(data []byte) (PrivKey, error) {
+ switch len(data) {
+ case ed25519.PrivateKeySize + ed25519.PublicKeySize:
+ // Remove the redundant public key. See issue #36.
+ redundantPk := data[ed25519.PrivateKeySize:]
+ pk := data[ed25519.PrivateKeySize-ed25519.PublicKeySize : ed25519.PrivateKeySize]
+ if subtle.ConstantTimeCompare(pk, redundantPk) == 0 {
+ return nil, errors.New("expected redundant ed25519 public key to be redundant")
+ }
+
+ // No point in storing the extra data.
+ newKey := make([]byte, ed25519.PrivateKeySize)
+ copy(newKey, data[:ed25519.PrivateKeySize])
+ data = newKey
+ case ed25519.PrivateKeySize:
+ default:
+ return nil, fmt.Errorf(
+ "expected ed25519 data size to be %d or %d, got %d",
+ ed25519.PrivateKeySize,
+ ed25519.PrivateKeySize+ed25519.PublicKeySize,
+ len(data),
+ )
+ }
+
+ return &Ed25519PrivateKey{
+ k: ed25519.PrivateKey(data),
+ }, nil
+}
diff --git a/core/crypto/ed25519_test.go b/core/crypto/ed25519_test.go
new file mode 100644
index 0000000000..dd24b42234
--- /dev/null
+++ b/core/crypto/ed25519_test.go
@@ -0,0 +1,214 @@
+package crypto
+
+import (
+ "crypto/ed25519"
+ "crypto/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto/pb"
+
+ "google.golang.org/protobuf/proto"
+)
+
+func TestBasicSignAndVerify(t *testing.T) {
+ priv, pub, err := GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := []byte("hello! and welcome to some awesome crypto primitives")
+
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+
+ // change data
+ data[0] = ^data[0]
+ ok, err = pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ok {
+ t.Fatal("signature matched and shouldn't")
+ }
+}
+
+func TestSignZero(t *testing.T) {
+ priv, pub, err := GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := make([]byte, 0)
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+}
+
+func TestMarshalLoop(t *testing.T) {
+ priv, pub, err := GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("PrivateKey", func(t *testing.T) {
+ for name, f := range map[string]func() ([]byte, error){
+ "Marshal": func() ([]byte, error) {
+ return MarshalPrivateKey(priv)
+ },
+ "Redundant": func() ([]byte, error) {
+ // See issue #36.
+ // Ed25519 private keys used to contain the public key twice.
+ // For backwards-compatibility, we need to continue supporting
+ // that scenario.
+ data, err := priv.Raw()
+ if err != nil {
+ t.Fatal(err)
+ }
+ data = append(data, data[len(data)-ed25519.PublicKeySize:]...)
+ return proto.Marshal(&pb.PrivateKey{
+ Type: priv.Type().Enum(),
+ Data: data,
+ })
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ bts, err := f()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privNew, err := UnmarshalPrivateKey(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !priv.Equals(privNew) || !privNew.Equals(priv) {
+ t.Fatal("keys are not equal")
+ }
+
+ msg := []byte("My child, my sister,\nThink of the rapture\nOf living together there!")
+ signed, err := privNew.Sign(msg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := privNew.GetPublic().Verify(msg, signed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+ })
+ }
+ })
+
+ t.Run("PublicKey", func(t *testing.T) {
+ for name, f := range map[string]func() ([]byte, error){
+ "Marshal": func() ([]byte, error) {
+ return MarshalPublicKey(pub)
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ bts, err := f()
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubNew, err := UnmarshalPublicKey(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !pub.Equals(pubNew) || !pubNew.Equals(pub) {
+ t.Fatal("keys are not equal")
+ }
+ })
+ }
+ })
+}
+
+func TestUnmarshalErrors(t *testing.T) {
+ t.Run("PublicKey", func(t *testing.T) {
+ t.Run("Invalid data length", func(t *testing.T) {
+ data, err := proto.Marshal(&pb.PublicKey{
+ Type: pb.KeyType_Ed25519.Enum(),
+ Data: []byte{42},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := UnmarshalPublicKey(data); err == nil {
+ t.Fatal("expected an error")
+ }
+ })
+ })
+
+ t.Run("PrivateKey", func(t *testing.T) {
+ t.Run("Redundant public key mismatch", func(t *testing.T) {
+ priv, _, err := GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data, err := priv.Raw()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Append the private key instead of the public key.
+ data = append(data, data[:ed25519.PublicKeySize]...)
+
+ b, err := proto.Marshal(&pb.PrivateKey{
+ Type: priv.Type().Enum(),
+ Data: data,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = UnmarshalPrivateKey(b)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ if err.Error() != "expected redundant ed25519 public key to be redundant" {
+ t.Fatalf("invalid error received: %s", err.Error())
+ }
+ })
+
+ t.Run("Invalid data length", func(t *testing.T) {
+ data, err := proto.Marshal(&pb.PrivateKey{
+ Type: pb.KeyType_Ed25519.Enum(),
+ Data: []byte{42},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = UnmarshalPrivateKey(data)
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ })
+ })
+}
diff --git a/core/crypto/fixture_test.go b/core/crypto/fixture_test.go
new file mode 100644
index 0000000000..aa4f51bcc6
--- /dev/null
+++ b/core/crypto/fixture_test.go
@@ -0,0 +1,132 @@
+package crypto_test
+
+import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ crypto_pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+)
+
+var message = []byte("Libp2p is the _best_!")
+
+type testCase struct {
+ keyType crypto_pb.KeyType
+ gen func(i io.Reader) (crypto.PrivKey, crypto.PubKey, error)
+ sigDeterministic bool
+}
+
+var keyTypes = []testCase{
+ {
+ keyType: crypto_pb.KeyType_ECDSA,
+ gen: crypto.GenerateECDSAKeyPair,
+ },
+ {
+ keyType: crypto_pb.KeyType_Secp256k1,
+ sigDeterministic: true,
+ gen: crypto.GenerateSecp256k1Key,
+ },
+ {
+ keyType: crypto_pb.KeyType_RSA,
+ sigDeterministic: true,
+ gen: func(i io.Reader) (crypto.PrivKey, crypto.PubKey, error) {
+ return crypto.GenerateRSAKeyPair(2048, i)
+ },
+ },
+}
+
+func fname(kt crypto_pb.KeyType, ext string) string {
+ return fmt.Sprintf("test_data/%d.%s", kt, ext)
+}
+
+func TestFixtures(t *testing.T) {
+ for _, tc := range keyTypes {
+ t.Run(tc.keyType.String(), func(t *testing.T) {
+ pubBytes, err := os.ReadFile(fname(tc.keyType, "pub"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ privBytes, err := os.ReadFile(fname(tc.keyType, "priv"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ sigBytes, err := os.ReadFile(fname(tc.keyType, "sig"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ pub, err := crypto.UnmarshalPublicKey(pubBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubBytes2, err := crypto.MarshalPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(pubBytes2, pubBytes) {
+ t.Fatal("encoding round-trip failed")
+ }
+ priv, err := crypto.UnmarshalPrivateKey(privBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privBytes2, err := crypto.MarshalPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(privBytes2, privBytes) {
+ t.Fatal("encoding round-trip failed")
+ }
+ ok, err := pub.Verify(message, sigBytes)
+ if !ok || err != nil {
+ t.Fatal("failed to validate signature with public key")
+ }
+
+ if tc.sigDeterministic {
+ sigBytes2, err := priv.Sign(message)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(sigBytes2, sigBytes) {
+ t.Fatal("signature not deterministic")
+ }
+ }
+ })
+ }
+}
+
+func init() {
+ // set to true to re-generate test data
+ if false {
+ generate()
+ panic("generated")
+ }
+}
+
+// generate re-generates test data
+func generate() {
+ for _, tc := range keyTypes {
+ priv, pub, err := tc.gen(rand.Reader)
+ if err != nil {
+ panic(err)
+ }
+ pubb, err := crypto.MarshalPublicKey(pub)
+ if err != nil {
+ panic(err)
+ }
+ privb, err := crypto.MarshalPrivateKey(priv)
+ if err != nil {
+ panic(err)
+ }
+ sig, err := priv.Sign(message)
+ if err != nil {
+ panic(err)
+ }
+ os.WriteFile(fname(tc.keyType, "pub"), pubb, 0666)
+ os.WriteFile(fname(tc.keyType, "priv"), privb, 0666)
+ os.WriteFile(fname(tc.keyType, "sig"), sig, 0666)
+ }
+}
diff --git a/core/crypto/key.go b/core/crypto/key.go
new file mode 100644
index 0000000000..ef697ad6ff
--- /dev/null
+++ b/core/crypto/key.go
@@ -0,0 +1,242 @@
+// Package crypto implements various cryptographic utilities used by libp2p.
+// This includes a Public and Private key interface and key implementations
+// for supported key algorithms.
+package crypto
+
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "encoding/base64"
+ "errors"
+ "io"
+
+ "github.com/libp2p/go-libp2p/core/crypto/pb"
+
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ // RSA is an enum for the supported RSA key type
+ RSA = iota
+ // Ed25519 is an enum for the supported Ed25519 key type
+ Ed25519
+ // Secp256k1 is an enum for the supported Secp256k1 key type
+ Secp256k1
+ // ECDSA is an enum for the supported ECDSA key type
+ ECDSA
+)
+
+var (
+ // ErrBadKeyType is returned when a key is not supported
+ ErrBadKeyType = errors.New("invalid or unsupported key type")
+ // KeyTypes is a list of supported keys
+ KeyTypes = []int{
+ RSA,
+ Ed25519,
+ Secp256k1,
+ ECDSA,
+ }
+)
+
+// PubKeyUnmarshaller is a func that creates a PubKey from a given slice of bytes
+type PubKeyUnmarshaller func(data []byte) (PubKey, error)
+
+// PrivKeyUnmarshaller is a func that creates a PrivKey from a given slice of bytes
+type PrivKeyUnmarshaller func(data []byte) (PrivKey, error)
+
+// PubKeyUnmarshallers is a map of unmarshallers by key type
+var PubKeyUnmarshallers = map[pb.KeyType]PubKeyUnmarshaller{
+ pb.KeyType_RSA: UnmarshalRsaPublicKey,
+ pb.KeyType_Ed25519: UnmarshalEd25519PublicKey,
+ pb.KeyType_Secp256k1: UnmarshalSecp256k1PublicKey,
+ pb.KeyType_ECDSA: UnmarshalECDSAPublicKey,
+}
+
+// PrivKeyUnmarshallers is a map of unmarshallers by key type
+var PrivKeyUnmarshallers = map[pb.KeyType]PrivKeyUnmarshaller{
+ pb.KeyType_RSA: UnmarshalRsaPrivateKey,
+ pb.KeyType_Ed25519: UnmarshalEd25519PrivateKey,
+ pb.KeyType_Secp256k1: UnmarshalSecp256k1PrivateKey,
+ pb.KeyType_ECDSA: UnmarshalECDSAPrivateKey,
+}
+
+// Key represents a crypto key that can be compared to another key
+type Key interface {
+ // Equals checks whether two PubKeys are the same
+ Equals(Key) bool
+
+ // Raw returns the raw bytes of the key (not wrapped in the
+ // libp2p-crypto protobuf).
+ //
+ // This function is the inverse of {Priv,Pub}KeyUnmarshaler.
+ Raw() ([]byte, error)
+
+ // Type returns the protobuf key type.
+ Type() pb.KeyType
+}
+
+// PrivKey represents a private key that can be used to generate a public key and sign data
+type PrivKey interface {
+ Key
+
+ // Cryptographically sign the given bytes
+ Sign([]byte) ([]byte, error)
+
+ // Return a public key paired with this private key
+ GetPublic() PubKey
+}
+
+// PubKey is a public key that can be used to verify data signed with the corresponding private key
+type PubKey interface {
+ Key
+
+ // Verify that 'sig' is the signed hash of 'data'
+ Verify(data []byte, sig []byte) (bool, error)
+}
+
+// GenSharedKey generates the shared key from a given private key
+type GenSharedKey func([]byte) ([]byte, error)
+
+// GenerateKeyPair generates a private and public key
+func GenerateKeyPair(typ, bits int) (PrivKey, PubKey, error) {
+ return GenerateKeyPairWithReader(typ, bits, rand.Reader)
+}
+
+// GenerateKeyPairWithReader returns a keypair of the given type and bit-size
+func GenerateKeyPairWithReader(typ, bits int, src io.Reader) (PrivKey, PubKey, error) {
+ switch typ {
+ case RSA:
+ return GenerateRSAKeyPair(bits, src)
+ case Ed25519:
+ return GenerateEd25519Key(src)
+ case Secp256k1:
+ return GenerateSecp256k1Key(src)
+ case ECDSA:
+ return GenerateECDSAKeyPair(src)
+ default:
+ return nil, nil, ErrBadKeyType
+ }
+}
+
+// UnmarshalPublicKey converts a protobuf serialized public key into its
+// representative object
+func UnmarshalPublicKey(data []byte) (PubKey, error) {
+ pmes := new(pb.PublicKey)
+ err := proto.Unmarshal(data, pmes)
+ if err != nil {
+ return nil, err
+ }
+
+ return PublicKeyFromProto(pmes)
+}
+
+// PublicKeyFromProto converts an unserialized protobuf PublicKey message
+// into its representative object.
+func PublicKeyFromProto(pmes *pb.PublicKey) (PubKey, error) {
+ um, ok := PubKeyUnmarshallers[pmes.GetType()]
+ if !ok {
+ return nil, ErrBadKeyType
+ }
+
+ data := pmes.GetData()
+
+ pk, err := um(data)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tpk := pk.(type) {
+ case *RsaPublicKey:
+ tpk.cached, _ = proto.Marshal(pmes)
+ }
+
+ return pk, nil
+}
+
+// MarshalPublicKey converts a public key object into a protobuf serialized
+// public key
+func MarshalPublicKey(k PubKey) ([]byte, error) {
+ pbmes, err := PublicKeyToProto(k)
+ if err != nil {
+ return nil, err
+ }
+
+ return proto.Marshal(pbmes)
+}
+
+// PublicKeyToProto converts a public key object into an unserialized
+// protobuf PublicKey message.
+func PublicKeyToProto(k PubKey) (*pb.PublicKey, error) {
+ data, err := k.Raw()
+ if err != nil {
+ return nil, err
+ }
+ return &pb.PublicKey{
+ Type: k.Type().Enum(),
+ Data: data,
+ }, nil
+}
+
+// UnmarshalPrivateKey converts a protobuf serialized private key into its
+// representative object
+func UnmarshalPrivateKey(data []byte) (PrivKey, error) {
+ pmes := new(pb.PrivateKey)
+ err := proto.Unmarshal(data, pmes)
+ if err != nil {
+ return nil, err
+ }
+
+ um, ok := PrivKeyUnmarshallers[pmes.GetType()]
+ if !ok {
+ return nil, ErrBadKeyType
+ }
+
+ return um(pmes.GetData())
+}
+
+// MarshalPrivateKey converts a key object into its protobuf serialized form.
+func MarshalPrivateKey(k PrivKey) ([]byte, error) {
+ data, err := k.Raw()
+ if err != nil {
+ return nil, err
+ }
+ return proto.Marshal(&pb.PrivateKey{
+ Type: k.Type().Enum(),
+ Data: data,
+ })
+}
+
+// ConfigDecodeKey decodes from b64 (for config file) to a byte array that can be unmarshalled.
+func ConfigDecodeKey(b string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(b)
+}
+
+// ConfigEncodeKey encodes a marshalled key to b64 (for config file).
+func ConfigEncodeKey(b []byte) string {
+ return base64.StdEncoding.EncodeToString(b)
+}
+
+// KeyEqual checks whether two Keys are equivalent (have identical byte representations).
+func KeyEqual(k1, k2 Key) bool {
+ if k1 == k2 {
+ return true
+ }
+
+ return k1.Equals(k2)
+}
+
+func basicEquals(k1, k2 Key) bool {
+ if k1.Type() != k2.Type() {
+ return false
+ }
+
+ a, err := k1.Raw()
+ if err != nil {
+ return false
+ }
+ b, err := k2.Raw()
+ if err != nil {
+ return false
+ }
+ return subtle.ConstantTimeCompare(a, b) == 1
+}
diff --git a/core/crypto/key_test.go b/core/crypto/key_test.go
new file mode 100644
index 0000000000..efd8ed5149
--- /dev/null
+++ b/core/crypto/key_test.go
@@ -0,0 +1,293 @@
+package crypto_test
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "fmt"
+ "reflect"
+ "testing"
+
+ . "github.com/libp2p/go-libp2p/core/crypto"
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/test"
+
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ secp256k1ecdsa "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
+)
+
+func TestKeys(t *testing.T) {
+ for _, typ := range KeyTypes {
+ testKeyType(typ, t)
+ }
+}
+
+func TestKeyPairFromKey(t *testing.T) {
+ var (
+ data = []byte(`hello world`)
+ hashed = sha256.Sum256(data)
+ )
+
+ privk, err := secp256k1.GeneratePrivateKey()
+ if err != nil {
+ t.Fatalf("err generating btcec priv key:\n%v", err)
+ }
+ sigK := secp256k1ecdsa.Sign(privk, hashed[:])
+
+ eKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ t.Fatalf("err generating ecdsa priv key:\n%v", err)
+ }
+ sigE, err := eKey.Sign(rand.Reader, hashed[:], crypto.SHA256)
+ if err != nil {
+ t.Fatalf("err generating ecdsa sig:\n%v", err)
+ }
+
+ rKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ t.Fatalf("err generating rsa priv key:\n%v", err)
+ }
+ sigR, err := rKey.Sign(rand.Reader, hashed[:], crypto.SHA256)
+ if err != nil {
+ t.Fatalf("err generating rsa sig:\n%v", err)
+ }
+
+ _, edKey, err := ed25519.GenerateKey(rand.Reader)
+ sigEd := ed25519.Sign(edKey, data[:])
+ if err != nil {
+ t.Fatalf("err generating ed25519 sig:\n%v", err)
+ }
+
+ for i, tt := range []struct {
+ in crypto.PrivateKey
+ typ pb.KeyType
+ sig []byte
+ }{
+ {
+ eKey,
+ ECDSA,
+ sigE,
+ },
+ {
+ privk,
+ Secp256k1,
+ sigK.Serialize(),
+ },
+ {
+ rKey,
+ RSA,
+ sigR,
+ },
+ {
+ &edKey,
+ Ed25519,
+ sigEd,
+ },
+ } {
+ t.Run(fmt.Sprintf("%v", i), func(t *testing.T) {
+ priv, pub, err := KeyPairFromStdKey(tt.in)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if priv == nil || pub == nil {
+ t.Errorf("received nil private key or public key: %v, %v", priv, pub)
+ }
+
+ if priv == nil || priv.Type() != tt.typ {
+ t.Errorf("want %v; got %v", tt.typ, priv.Type())
+ }
+
+ v, err := pub.Verify(data[:], tt.sig)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !v {
+ t.Error("signature was not verified")
+ }
+
+ stdPub, err := PubKeyToStdKey(pub)
+ if stdPub == nil {
+ t.Errorf("err getting std public key from key: %v", err)
+ }
+
+ var stdPubBytes []byte
+
+ switch p := stdPub.(type) {
+ case *Secp256k1PublicKey:
+ stdPubBytes, err = p.Raw()
+ case ed25519.PublicKey:
+ stdPubBytes = []byte(p)
+ default:
+ stdPubBytes, err = x509.MarshalPKIXPublicKey(stdPub)
+ }
+
+ if err != nil {
+ t.Errorf("Error while marshaling %v key: %v", reflect.TypeOf(stdPub), err)
+ }
+
+ pubBytes, err := pub.Raw()
+ if err != nil {
+ t.Errorf("err getting raw bytes for %v key: %v", reflect.TypeOf(pub), err)
+ }
+ if !bytes.Equal(stdPubBytes, pubBytes) {
+ t.Errorf("err roundtripping %v key", reflect.TypeOf(pub))
+ }
+
+ stdPriv, err := PrivKeyToStdKey(priv)
+ if stdPub == nil {
+ t.Errorf("err getting std private key from key: %v", err)
+ }
+
+ var stdPrivBytes []byte
+
+ switch p := stdPriv.(type) {
+ case *Secp256k1PrivateKey:
+ stdPrivBytes, err = p.Raw()
+ case *ecdsa.PrivateKey:
+ stdPrivBytes, err = x509.MarshalECPrivateKey(p)
+ case *ed25519.PrivateKey:
+ stdPrivBytes = *p
+ case *rsa.PrivateKey:
+ stdPrivBytes = x509.MarshalPKCS1PrivateKey(p)
+ }
+
+ if err != nil {
+ t.Errorf("err marshaling %v key: %v", reflect.TypeOf(stdPriv), err)
+ }
+
+ privBytes, err := priv.Raw()
+ if err != nil {
+ t.Errorf("err getting raw bytes for %v key: %v", reflect.TypeOf(priv), err)
+ }
+
+ if !bytes.Equal(stdPrivBytes, privBytes) {
+ t.Errorf("err roundtripping %v key", reflect.TypeOf(priv))
+ }
+ })
+ }
+}
+
+func testKeyType(typ int, t *testing.T) {
+ bits := 512
+ if typ == RSA {
+ bits = 2048
+ }
+ sk, pk, err := test.RandTestKeyPair(typ, bits)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testKeySignature(t, sk)
+ testKeyEncoding(t, sk)
+ testKeyEquals(t, sk)
+ testKeyEquals(t, pk)
+}
+
+func testKeySignature(t *testing.T, sk PrivKey) {
+ pk := sk.GetPublic()
+
+ text := make([]byte, 16)
+ if _, err := rand.Read(text); err != nil {
+ t.Fatal(err)
+ }
+
+ sig, err := sk.Sign(text)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ valid, err := pk.Verify(text, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !valid {
+ t.Fatal("Invalid signature.")
+ }
+}
+
+func testKeyEncoding(t *testing.T, sk PrivKey) {
+ skbm, err := MarshalPrivateKey(sk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sk2, err := UnmarshalPrivateKey(skbm)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !sk.Equals(sk2) {
+ t.Error("Unmarshaled private key didn't match original.\n")
+ }
+
+ skbm2, err := MarshalPrivateKey(sk2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(skbm, skbm2) {
+ t.Error("skb -> marshal -> unmarshal -> skb failed.\n", skbm, "\n", skbm2)
+ }
+
+ pk := sk.GetPublic()
+ pkbm, err := MarshalPublicKey(pk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pk2, err := UnmarshalPublicKey(pkbm)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !pk.Equals(pk2) {
+ t.Error("Unmarshaled public key didn't match original.\n")
+ }
+
+ pkbm2, err := MarshalPublicKey(pk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(pkbm, pkbm2) {
+ t.Error("skb -> marshal -> unmarshal -> skb failed.\n", pkbm, "\n", pkbm2)
+ }
+}
+
+func testKeyEquals(t *testing.T, k Key) {
+ // kb, err := k.Raw()
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+
+ if !KeyEqual(k, k) {
+ t.Fatal("Key not equal to itself.")
+ }
+
+ // bad test, relies on deep internals..
+ // if !KeyEqual(k, testkey(kb)) {
+ // t.Fatal("Key not equal to key with same bytes.")
+ // }
+
+ sk, pk, err := test.RandTestKeyPair(RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if KeyEqual(k, sk) {
+ t.Fatal("Keys should not equal.")
+ }
+
+ if KeyEqual(k, pk) {
+ t.Fatal("Keys should not equal.")
+ }
+}
diff --git a/core/crypto/key_to_stdlib.go b/core/crypto/key_to_stdlib.go
new file mode 100644
index 0000000000..aead1d2513
--- /dev/null
+++ b/core/crypto/key_to_stdlib.go
@@ -0,0 +1,78 @@
+package crypto
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+)
+
+// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p/core/crypto keys
+func KeyPairFromStdKey(priv crypto.PrivateKey) (PrivKey, PubKey, error) {
+ if priv == nil {
+ return nil, nil, ErrNilPrivateKey
+ }
+
+ switch p := priv.(type) {
+ case *rsa.PrivateKey:
+ return &RsaPrivateKey{*p}, &RsaPublicKey{k: p.PublicKey}, nil
+
+ case *ecdsa.PrivateKey:
+ return &ECDSAPrivateKey{p}, &ECDSAPublicKey{&p.PublicKey}, nil
+
+ case *ed25519.PrivateKey:
+ pubIfc := p.Public()
+ pub, _ := pubIfc.(ed25519.PublicKey)
+ return &Ed25519PrivateKey{*p}, &Ed25519PublicKey{pub}, nil
+
+ case *secp256k1.PrivateKey:
+ sPriv := Secp256k1PrivateKey(*p)
+ sPub := Secp256k1PublicKey(*p.PubKey())
+ return &sPriv, &sPub, nil
+
+ default:
+ return nil, nil, ErrBadKeyType
+ }
+}
+
+// PrivKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) private keys
+func PrivKeyToStdKey(priv PrivKey) (crypto.PrivateKey, error) {
+ if priv == nil {
+ return nil, ErrNilPrivateKey
+ }
+
+ switch p := priv.(type) {
+ case *RsaPrivateKey:
+ return &p.sk, nil
+ case *ECDSAPrivateKey:
+ return p.priv, nil
+ case *Ed25519PrivateKey:
+ return &p.k, nil
+ case *Secp256k1PrivateKey:
+ return p, nil
+ default:
+ return nil, ErrBadKeyType
+ }
+}
+
+// PubKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) public keys
+func PubKeyToStdKey(pub PubKey) (crypto.PublicKey, error) {
+ if pub == nil {
+ return nil, ErrNilPublicKey
+ }
+
+ switch p := pub.(type) {
+ case *RsaPublicKey:
+ return &p.k, nil
+ case *ECDSAPublicKey:
+ return p.pub, nil
+ case *Ed25519PublicKey:
+ return p.k, nil
+ case *Secp256k1PublicKey:
+ return p, nil
+ default:
+ return nil, ErrBadKeyType
+ }
+}
diff --git a/core/crypto/pb/crypto.pb.go b/core/crypto/pb/crypto.pb.go
new file mode 100644
index 0000000000..c4aa78f9fd
--- /dev/null
+++ b/core/crypto/pb/crypto.pb.go
@@ -0,0 +1,260 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: core/crypto/pb/crypto.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type KeyType int32
+
+const (
+ KeyType_RSA KeyType = 0
+ KeyType_Ed25519 KeyType = 1
+ KeyType_Secp256k1 KeyType = 2
+ KeyType_ECDSA KeyType = 3
+)
+
+// Enum value maps for KeyType.
+var (
+ KeyType_name = map[int32]string{
+ 0: "RSA",
+ 1: "Ed25519",
+ 2: "Secp256k1",
+ 3: "ECDSA",
+ }
+ KeyType_value = map[string]int32{
+ "RSA": 0,
+ "Ed25519": 1,
+ "Secp256k1": 2,
+ "ECDSA": 3,
+ }
+)
+
+func (x KeyType) Enum() *KeyType {
+ p := new(KeyType)
+ *p = x
+ return p
+}
+
+func (x KeyType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (KeyType) Descriptor() protoreflect.EnumDescriptor {
+ return file_core_crypto_pb_crypto_proto_enumTypes[0].Descriptor()
+}
+
+func (KeyType) Type() protoreflect.EnumType {
+ return &file_core_crypto_pb_crypto_proto_enumTypes[0]
+}
+
+func (x KeyType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *KeyType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = KeyType(num)
+ return nil
+}
+
+// Deprecated: Use KeyType.Descriptor instead.
+func (KeyType) EnumDescriptor() ([]byte, []int) {
+ return file_core_crypto_pb_crypto_proto_rawDescGZIP(), []int{0}
+}
+
+type PublicKey struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PublicKey) Reset() {
+ *x = PublicKey{}
+ mi := &file_core_crypto_pb_crypto_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PublicKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PublicKey) ProtoMessage() {}
+
+func (x *PublicKey) ProtoReflect() protoreflect.Message {
+ mi := &file_core_crypto_pb_crypto_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead.
+func (*PublicKey) Descriptor() ([]byte, []int) {
+ return file_core_crypto_pb_crypto_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *PublicKey) GetType() KeyType {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return KeyType_RSA
+}
+
+func (x *PublicKey) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type PrivateKey struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"`
+ Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PrivateKey) Reset() {
+ *x = PrivateKey{}
+ mi := &file_core_crypto_pb_crypto_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PrivateKey) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PrivateKey) ProtoMessage() {}
+
+func (x *PrivateKey) ProtoReflect() protoreflect.Message {
+ mi := &file_core_crypto_pb_crypto_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PrivateKey.ProtoReflect.Descriptor instead.
+func (*PrivateKey) Descriptor() ([]byte, []int) {
+ return file_core_crypto_pb_crypto_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *PrivateKey) GetType() KeyType {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return KeyType_RSA
+}
+
+func (x *PrivateKey) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+var File_core_crypto_pb_crypto_proto protoreflect.FileDescriptor
+
+const file_core_crypto_pb_crypto_proto_rawDesc = "" +
+ "\n" +
+ "\x1bcore/crypto/pb/crypto.proto\x12\tcrypto.pb\"G\n" +
+ "\tPublicKey\x12&\n" +
+ "\x04Type\x18\x01 \x02(\x0e2\x12.crypto.pb.KeyTypeR\x04Type\x12\x12\n" +
+ "\x04Data\x18\x02 \x02(\fR\x04Data\"H\n" +
+ "\n" +
+ "PrivateKey\x12&\n" +
+ "\x04Type\x18\x01 \x02(\x0e2\x12.crypto.pb.KeyTypeR\x04Type\x12\x12\n" +
+ "\x04Data\x18\x02 \x02(\fR\x04Data*9\n" +
+ "\aKeyType\x12\a\n" +
+ "\x03RSA\x10\x00\x12\v\n" +
+ "\aEd25519\x10\x01\x12\r\n" +
+ "\tSecp256k1\x10\x02\x12\t\n" +
+ "\x05ECDSA\x10\x03B,Z*github.com/libp2p/go-libp2p/core/crypto/pb"
+
+var (
+ file_core_crypto_pb_crypto_proto_rawDescOnce sync.Once
+ file_core_crypto_pb_crypto_proto_rawDescData []byte
+)
+
+func file_core_crypto_pb_crypto_proto_rawDescGZIP() []byte {
+ file_core_crypto_pb_crypto_proto_rawDescOnce.Do(func() {
+ file_core_crypto_pb_crypto_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_core_crypto_pb_crypto_proto_rawDesc), len(file_core_crypto_pb_crypto_proto_rawDesc)))
+ })
+ return file_core_crypto_pb_crypto_proto_rawDescData
+}
+
+var file_core_crypto_pb_crypto_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_core_crypto_pb_crypto_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_core_crypto_pb_crypto_proto_goTypes = []any{
+ (KeyType)(0), // 0: crypto.pb.KeyType
+ (*PublicKey)(nil), // 1: crypto.pb.PublicKey
+ (*PrivateKey)(nil), // 2: crypto.pb.PrivateKey
+}
+var file_core_crypto_pb_crypto_proto_depIdxs = []int32{
+ 0, // 0: crypto.pb.PublicKey.Type:type_name -> crypto.pb.KeyType
+ 0, // 1: crypto.pb.PrivateKey.Type:type_name -> crypto.pb.KeyType
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_core_crypto_pb_crypto_proto_init() }
+func file_core_crypto_pb_crypto_proto_init() {
+ if File_core_crypto_pb_crypto_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_core_crypto_pb_crypto_proto_rawDesc), len(file_core_crypto_pb_crypto_proto_rawDesc)),
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_core_crypto_pb_crypto_proto_goTypes,
+ DependencyIndexes: file_core_crypto_pb_crypto_proto_depIdxs,
+ EnumInfos: file_core_crypto_pb_crypto_proto_enumTypes,
+ MessageInfos: file_core_crypto_pb_crypto_proto_msgTypes,
+ }.Build()
+ File_core_crypto_pb_crypto_proto = out.File
+ file_core_crypto_pb_crypto_proto_goTypes = nil
+ file_core_crypto_pb_crypto_proto_depIdxs = nil
+}
diff --git a/core/crypto/pb/crypto.proto b/core/crypto/pb/crypto.proto
new file mode 100644
index 0000000000..44e504423b
--- /dev/null
+++ b/core/crypto/pb/crypto.proto
@@ -0,0 +1,22 @@
+syntax = "proto2";
+
+package crypto.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/core/crypto/pb";
+
+enum KeyType {
+ RSA = 0;
+ Ed25519 = 1;
+ Secp256k1 = 2;
+ ECDSA = 3;
+}
+
+message PublicKey {
+ required KeyType Type = 1;
+ required bytes Data = 2;
+}
+
+message PrivateKey {
+ required KeyType Type = 1;
+ required bytes Data = 2;
+}
diff --git a/core/crypto/rsa_common.go b/core/crypto/rsa_common.go
new file mode 100644
index 0000000000..2b05eb6a35
--- /dev/null
+++ b/core/crypto/rsa_common.go
@@ -0,0 +1,28 @@
+package crypto
+
+import (
+ "fmt"
+ "os"
+)
+
+// WeakRsaKeyEnv is an environment variable which, when set, lowers the
+// minimum required bits of RSA keys to 512. This should be used exclusively in
+// test situations.
+const WeakRsaKeyEnv = "LIBP2P_ALLOW_WEAK_RSA_KEYS"
+
+var MinRsaKeyBits = 2048
+
+var maxRsaKeyBits = 8192
+
+// ErrRsaKeyTooSmall is returned when trying to generate or parse an RSA key
+// that's smaller than MinRsaKeyBits bits. In test
+var ErrRsaKeyTooSmall error
+var ErrRsaKeyTooBig error = fmt.Errorf("rsa keys must be <= %d bits", maxRsaKeyBits)
+
+func init() {
+ if _, ok := os.LookupEnv(WeakRsaKeyEnv); ok {
+ MinRsaKeyBits = 512
+ }
+
+ ErrRsaKeyTooSmall = fmt.Errorf("rsa keys must be >= %d bits to be useful", MinRsaKeyBits)
+}
diff --git a/core/crypto/rsa_go.go b/core/crypto/rsa_go.go
new file mode 100644
index 0000000000..845dae926a
--- /dev/null
+++ b/core/crypto/rsa_go.go
@@ -0,0 +1,154 @@
+package crypto
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "errors"
+ "io"
+
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+)
+
+// RsaPrivateKey is a rsa private key
+type RsaPrivateKey struct {
+ sk rsa.PrivateKey
+}
+
+// RsaPublicKey is a rsa public key
+type RsaPublicKey struct {
+ k rsa.PublicKey
+
+ cached []byte
+}
+
+// GenerateRSAKeyPair generates a new rsa private and public key
+func GenerateRSAKeyPair(bits int, src io.Reader) (PrivKey, PubKey, error) {
+ if bits < MinRsaKeyBits {
+ return nil, nil, ErrRsaKeyTooSmall
+ }
+ if bits > maxRsaKeyBits {
+ return nil, nil, ErrRsaKeyTooBig
+ }
+ priv, err := rsa.GenerateKey(src, bits)
+ if err != nil {
+ return nil, nil, err
+ }
+ pk := priv.PublicKey
+ return &RsaPrivateKey{sk: *priv}, &RsaPublicKey{k: pk}, nil
+}
+
+// Verify compares a signature against input data
+func (pk *RsaPublicKey) Verify(data, sig []byte) (success bool, err error) {
+ defer func() {
+ catch.HandlePanic(recover(), &err, "RSA signature verification")
+
+ // To be safe
+ if err != nil {
+ success = false
+ }
+ }()
+ hashed := sha256.Sum256(data)
+ err = rsa.VerifyPKCS1v15(&pk.k, crypto.SHA256, hashed[:], sig)
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (pk *RsaPublicKey) Type() pb.KeyType {
+ return pb.KeyType_RSA
+}
+
+func (pk *RsaPublicKey) Raw() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "RSA public-key marshaling") }()
+ return x509.MarshalPKIXPublicKey(&pk.k)
+}
+
+// Equals checks whether this key is equal to another
+func (pk *RsaPublicKey) Equals(k Key) bool {
+ // make sure this is a rsa public key
+ other, ok := (k).(*RsaPublicKey)
+ if !ok {
+ return basicEquals(pk, k)
+ }
+
+ return pk.k.N.Cmp(other.k.N) == 0 && pk.k.E == other.k.E
+}
+
+// Sign returns a signature of the input data
+func (sk *RsaPrivateKey) Sign(message []byte) (sig []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "RSA signing") }()
+ hashed := sha256.Sum256(message)
+ return rsa.SignPKCS1v15(rand.Reader, &sk.sk, crypto.SHA256, hashed[:])
+}
+
+// GetPublic returns a public key
+func (sk *RsaPrivateKey) GetPublic() PubKey {
+ return &RsaPublicKey{k: sk.sk.PublicKey}
+}
+
+func (sk *RsaPrivateKey) Type() pb.KeyType {
+ return pb.KeyType_RSA
+}
+
+func (sk *RsaPrivateKey) Raw() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "RSA private-key marshaling") }()
+ b := x509.MarshalPKCS1PrivateKey(&sk.sk)
+ return b, nil
+}
+
+// Equals checks whether this key is equal to another
+func (sk *RsaPrivateKey) Equals(k Key) bool {
+ // make sure this is a rsa public key
+ other, ok := (k).(*RsaPrivateKey)
+ if !ok {
+ return basicEquals(sk, k)
+ }
+
+ a := sk.sk
+ b := other.sk
+
+ // Don't care about constant time. We're only comparing the public half.
+ return a.PublicKey.N.Cmp(b.PublicKey.N) == 0 && a.PublicKey.E == b.PublicKey.E
+}
+
+// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes
+func UnmarshalRsaPrivateKey(b []byte) (key PrivKey, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "RSA private-key unmarshaling") }()
+ sk, err := x509.ParsePKCS1PrivateKey(b)
+ if err != nil {
+ return nil, err
+ }
+ if sk.N.BitLen() < MinRsaKeyBits {
+ return nil, ErrRsaKeyTooSmall
+ }
+ if sk.N.BitLen() > maxRsaKeyBits {
+ return nil, ErrRsaKeyTooBig
+ }
+ return &RsaPrivateKey{sk: *sk}, nil
+}
+
+// UnmarshalRsaPublicKey returns a public key from the input x509 bytes
+func UnmarshalRsaPublicKey(b []byte) (key PubKey, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "RSA public-key unmarshaling") }()
+ pub, err := x509.ParsePKIXPublicKey(b)
+ if err != nil {
+ return nil, err
+ }
+ pk, ok := pub.(*rsa.PublicKey)
+ if !ok {
+ return nil, errors.New("not actually an rsa public key")
+ }
+ if pk.N.BitLen() < MinRsaKeyBits {
+ return nil, ErrRsaKeyTooSmall
+ }
+ if pk.N.BitLen() > maxRsaKeyBits {
+ return nil, ErrRsaKeyTooBig
+ }
+
+ return &RsaPublicKey{k: *pk}, nil
+}
diff --git a/core/crypto/rsa_test.go b/core/crypto/rsa_test.go
new file mode 100644
index 0000000000..dbc88dda25
--- /dev/null
+++ b/core/crypto/rsa_test.go
@@ -0,0 +1,162 @@
+package crypto
+
+import (
+ "crypto/rand"
+ "testing"
+)
+
+func TestRSABasicSignAndVerify(t *testing.T) {
+ priv, pub, err := GenerateRSAKeyPair(2048, rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := []byte("hello! and welcome to some awesome crypto primitives")
+
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+
+ // change data
+ data[0] = ^data[0]
+ ok, err = pub.Verify(data, sig)
+ if err == nil {
+ t.Fatal("should have produced a verification error")
+ }
+
+ if ok {
+ t.Fatal("signature matched and shouldn't")
+ }
+}
+
+func TestRSASmallKey(t *testing.T) {
+ _, _, err := GenerateRSAKeyPair(MinRsaKeyBits/2, rand.Reader)
+ if err != ErrRsaKeyTooSmall {
+ t.Fatal("should have refused to create small RSA key")
+ }
+ MinRsaKeyBits /= 2
+ badPriv, badPub, err := GenerateRSAKeyPair(MinRsaKeyBits, rand.Reader)
+ if err != nil {
+ t.Fatalf("should have succeeded, got: %s", err)
+ }
+ pubBytes, err := MarshalPublicKey(badPub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privBytes, err := MarshalPrivateKey(badPriv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ MinRsaKeyBits *= 2
+ _, err = UnmarshalPublicKey(pubBytes)
+ if err != ErrRsaKeyTooSmall {
+ t.Fatal("should have refused to unmarshal a weak key")
+ }
+ _, err = UnmarshalPrivateKey(privBytes)
+ if err != ErrRsaKeyTooSmall {
+ t.Fatal("should have refused to unmarshal a weak key")
+ }
+}
+
+func TestRSABigKeyFailsToGenerate(t *testing.T) {
+ _, _, err := GenerateRSAKeyPair(maxRsaKeyBits*2, rand.Reader)
+ if err != ErrRsaKeyTooBig {
+ t.Fatal("should have refused to create too big RSA key")
+ }
+}
+
+func TestRSABigKey(t *testing.T) {
+ // Make the global limit smaller for this test to run faster.
+ // Note we also change the limit below, but this is different
+ origSize := maxRsaKeyBits
+ maxRsaKeyBits = 2048
+ defer func() { maxRsaKeyBits = origSize }() //
+
+ maxRsaKeyBits *= 2
+ badPriv, badPub, err := GenerateRSAKeyPair(maxRsaKeyBits, rand.Reader)
+ if err != nil {
+ t.Fatalf("should have succeeded, got: %s", err)
+ }
+ pubBytes, err := MarshalPublicKey(badPub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privBytes, err := MarshalPrivateKey(badPriv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ maxRsaKeyBits /= 2
+ _, err = UnmarshalPublicKey(pubBytes)
+ if err != ErrRsaKeyTooBig {
+ t.Fatal("should have refused to unmarshal a too big key")
+ }
+ _, err = UnmarshalPrivateKey(privBytes)
+ if err != ErrRsaKeyTooBig {
+ t.Fatal("should have refused to unmarshal a too big key")
+ }
+}
+
+func TestRSASignZero(t *testing.T) {
+ priv, pub, err := GenerateRSAKeyPair(2048, rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := make([]byte, 0)
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+}
+
+func TestRSAMarshalLoop(t *testing.T) {
+ priv, pub, err := GenerateRSAKeyPair(2048, rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privB, err := MarshalPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privNew, err := UnmarshalPrivateKey(privB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !priv.Equals(privNew) || !privNew.Equals(priv) {
+ t.Fatal("keys are not equal")
+ }
+
+ pubB, err := MarshalPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubNew, err := UnmarshalPublicKey(pubB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !pub.Equals(pubNew) || !pubNew.Equals(pub) {
+ t.Fatal("keys are not equal")
+ }
+}
diff --git a/core/crypto/secp256k1.go b/core/crypto/secp256k1.go
new file mode 100644
index 0000000000..4798f39587
--- /dev/null
+++ b/core/crypto/secp256k1.go
@@ -0,0 +1,127 @@
+package crypto
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "io"
+
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa"
+)
+
+// Secp256k1PrivateKey is a Secp256k1 private key
+type Secp256k1PrivateKey secp256k1.PrivateKey
+
+// Secp256k1PublicKey is a Secp256k1 public key
+type Secp256k1PublicKey secp256k1.PublicKey
+
+// GenerateSecp256k1Key generates a new Secp256k1 private and public key pair
+func GenerateSecp256k1Key(_ io.Reader) (PrivKey, PubKey, error) {
+ privk, err := secp256k1.GeneratePrivateKey()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ k := (*Secp256k1PrivateKey)(privk)
+ return k, k.GetPublic(), nil
+}
+
+// UnmarshalSecp256k1PrivateKey returns a private key from bytes
+func UnmarshalSecp256k1PrivateKey(data []byte) (k PrivKey, err error) {
+ if len(data) != secp256k1.PrivKeyBytesLen {
+ return nil, fmt.Errorf("expected secp256k1 data size to be %d", secp256k1.PrivKeyBytesLen)
+ }
+ defer func() { catch.HandlePanic(recover(), &err, "secp256k1 private-key unmarshal") }()
+
+ privk := secp256k1.PrivKeyFromBytes(data)
+ return (*Secp256k1PrivateKey)(privk), nil
+}
+
+// UnmarshalSecp256k1PublicKey returns a public key from bytes
+func UnmarshalSecp256k1PublicKey(data []byte) (_k PubKey, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "secp256k1 public-key unmarshal") }()
+ k, err := secp256k1.ParsePubKey(data)
+ if err != nil {
+ return nil, err
+ }
+
+ return (*Secp256k1PublicKey)(k), nil
+}
+
+// Type returns the private key type
+func (k *Secp256k1PrivateKey) Type() pb.KeyType {
+ return pb.KeyType_Secp256k1
+}
+
+// Raw returns the bytes of the key
+func (k *Secp256k1PrivateKey) Raw() ([]byte, error) {
+ return (*secp256k1.PrivateKey)(k).Serialize(), nil
+}
+
+// Equals compares two private keys
+func (k *Secp256k1PrivateKey) Equals(o Key) bool {
+ sk, ok := o.(*Secp256k1PrivateKey)
+ if !ok {
+ return basicEquals(k, o)
+ }
+
+ return k.GetPublic().Equals(sk.GetPublic())
+}
+
+// Sign returns a signature from input data
+func (k *Secp256k1PrivateKey) Sign(data []byte) (_sig []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "secp256k1 signing") }()
+ key := (*secp256k1.PrivateKey)(k)
+ hash := sha256.Sum256(data)
+ sig := ecdsa.Sign(key, hash[:])
+
+ return sig.Serialize(), nil
+}
+
+// GetPublic returns a public key
+func (k *Secp256k1PrivateKey) GetPublic() PubKey {
+ return (*Secp256k1PublicKey)((*secp256k1.PrivateKey)(k).PubKey())
+}
+
+// Type returns the public key type
+func (k *Secp256k1PublicKey) Type() pb.KeyType {
+ return pb.KeyType_Secp256k1
+}
+
+// Raw returns the bytes of the key
+func (k *Secp256k1PublicKey) Raw() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "secp256k1 public key marshaling") }()
+ return (*secp256k1.PublicKey)(k).SerializeCompressed(), nil
+}
+
+// Equals compares two public keys
+func (k *Secp256k1PublicKey) Equals(o Key) bool {
+ sk, ok := o.(*Secp256k1PublicKey)
+ if !ok {
+ return basicEquals(k, o)
+ }
+
+ return (*secp256k1.PublicKey)(k).IsEqual((*secp256k1.PublicKey)(sk))
+}
+
+// Verify compares a signature against the input data
+func (k *Secp256k1PublicKey) Verify(data []byte, sigStr []byte) (success bool, err error) {
+ defer func() {
+ catch.HandlePanic(recover(), &err, "secp256k1 signature verification")
+
+ // To be extra safe.
+ if err != nil {
+ success = false
+ }
+ }()
+ sig, err := ecdsa.ParseDERSignature(sigStr)
+ if err != nil {
+ return false, err
+ }
+
+ hash := sha256.Sum256(data)
+ return sig.Verify(hash[:], (*secp256k1.PublicKey)(k)), nil
+}
diff --git a/core/crypto/secp256k1_test.go b/core/crypto/secp256k1_test.go
new file mode 100644
index 0000000000..67bbe470b9
--- /dev/null
+++ b/core/crypto/secp256k1_test.go
@@ -0,0 +1,96 @@
+package crypto
+
+import (
+ "crypto/rand"
+ "testing"
+)
+
+func TestSecp256k1BasicSignAndVerify(t *testing.T) {
+ priv, pub, err := GenerateSecp256k1Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := []byte("hello! and welcome to some awesome crypto primitives")
+
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+
+ // change data
+ data[0] = ^data[0]
+ ok, err = pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ok {
+ t.Fatal("signature matched and shouldn't")
+ }
+}
+
+func TestSecp256k1SignZero(t *testing.T) {
+ priv, pub, err := GenerateSecp256k1Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ data := make([]byte, 0)
+ sig, err := priv.Sign(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ok, err := pub.Verify(data, sig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !ok {
+ t.Fatal("signature didn't match")
+ }
+}
+
+func TestSecp256k1MarshalLoop(t *testing.T) {
+ priv, pub, err := GenerateSecp256k1Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privB, err := MarshalPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ privNew, err := UnmarshalPrivateKey(privB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !priv.Equals(privNew) || !privNew.Equals(priv) {
+ t.Fatal("keys are not equal")
+ }
+
+ pubB, err := MarshalPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pubNew, err := UnmarshalPublicKey(pubB)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !pub.Equals(pubNew) || !pubNew.Equals(pub) {
+ t.Fatal("keys are not equal")
+ }
+
+}
diff --git a/core/crypto/test_data/0.priv b/core/crypto/test_data/0.priv
new file mode 100644
index 0000000000..9047d5d95d
Binary files /dev/null and b/core/crypto/test_data/0.priv differ
diff --git a/core/crypto/test_data/0.pub b/core/crypto/test_data/0.pub
new file mode 100644
index 0000000000..d4295e8893
Binary files /dev/null and b/core/crypto/test_data/0.pub differ
diff --git a/core/crypto/test_data/0.sig b/core/crypto/test_data/0.sig
new file mode 100644
index 0000000000..2f16825274
Binary files /dev/null and b/core/crypto/test_data/0.sig differ
diff --git a/core/crypto/test_data/2.priv b/core/crypto/test_data/2.priv
new file mode 100644
index 0000000000..1004ab9153
--- /dev/null
+++ b/core/crypto/test_data/2.priv
@@ -0,0 +1 @@
+ 1Aฝ`jPLD๒4ุ๓N๒[นต-ชะXพถเFฑX
\ No newline at end of file
diff --git a/core/crypto/test_data/2.pub b/core/crypto/test_data/2.pub
new file mode 100644
index 0000000000..dd984bdcc8
--- /dev/null
+++ b/core/crypto/test_data/2.pub
@@ -0,0 +1 @@
+!5@ญ*๘ค5QฉMฅUฉ&Pk๙Sา็ฃกณึข
\ No newline at end of file
diff --git a/core/crypto/test_data/2.sig b/core/crypto/test_data/2.sig
new file mode 100644
index 0000000000..b96001bc8b
--- /dev/null
+++ b/core/crypto/test_data/2.sig
@@ -0,0 +1 @@
+0D 1งึ3ย๓ไZCu๚จข@ล๕ณสาศL๓๒ I๊!EาGuีC๊ฒpCG๛5I<@;ยยYฒ
\ No newline at end of file
diff --git a/core/crypto/test_data/3.priv b/core/crypto/test_data/3.priv
new file mode 100644
index 0000000000..7a05f359f8
Binary files /dev/null and b/core/crypto/test_data/3.priv differ
diff --git a/core/crypto/test_data/3.pub b/core/crypto/test_data/3.pub
new file mode 100644
index 0000000000..f4551f8811
Binary files /dev/null and b/core/crypto/test_data/3.pub differ
diff --git a/core/crypto/test_data/3.sig b/core/crypto/test_data/3.sig
new file mode 100644
index 0000000000..253c09f6d7
Binary files /dev/null and b/core/crypto/test_data/3.sig differ
diff --git a/core/discovery/discovery.go b/core/discovery/discovery.go
new file mode 100644
index 0000000000..feeb2c7970
--- /dev/null
+++ b/core/discovery/discovery.go
@@ -0,0 +1,27 @@
+// Package discovery provides service advertisement and peer discovery interfaces for libp2p.
+package discovery
+
+import (
+ "context"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// Advertiser is an interface for advertising services
+type Advertiser interface {
+ // Advertise advertises a service
+ Advertise(ctx context.Context, ns string, opts ...Option) (time.Duration, error)
+}
+
+// Discoverer is an interface for peer discovery
+type Discoverer interface {
+ // FindPeers discovers peers providing a service
+ FindPeers(ctx context.Context, ns string, opts ...Option) (<-chan peer.AddrInfo, error)
+}
+
+// Discovery is an interface that combines service advertisement and peer discovery
+type Discovery interface {
+ Advertiser
+ Discoverer
+}
diff --git a/core/discovery/options.go b/core/discovery/options.go
new file mode 100644
index 0000000000..7b28305268
--- /dev/null
+++ b/core/discovery/options.go
@@ -0,0 +1,41 @@
+package discovery
+
+import "time"
+
+// DiscoveryOpt is a single discovery option.
+type Option func(opts *Options) error
+
+// DiscoveryOpts is a set of discovery options.
+type Options struct {
+ Ttl time.Duration
+ Limit int
+
+ // Other (implementation-specific) options
+ Other map[interface{}]interface{}
+}
+
+// Apply applies the given options to this DiscoveryOpts
+func (opts *Options) Apply(options ...Option) error {
+ for _, o := range options {
+ if err := o(opts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TTL is an option that provides a hint for the duration of an advertisement
+func TTL(ttl time.Duration) Option {
+ return func(opts *Options) error {
+ opts.Ttl = ttl
+ return nil
+ }
+}
+
+// Limit is an option that provides an upper bound on the peer count for discovery
+func Limit(limit int) Option {
+ return func(opts *Options) error {
+ opts.Limit = limit
+ return nil
+ }
+}
diff --git a/core/event/addrs.go b/core/event/addrs.go
new file mode 100644
index 0000000000..67849aedb3
--- /dev/null
+++ b/core/event/addrs.go
@@ -0,0 +1,88 @@
+package event
+
+import (
+ "github.com/libp2p/go-libp2p/core/record"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// AddrAction represents an action taken on one of a Host's listen addresses.
+// It is used to add context to address change events in EvtLocalAddressesUpdated.
+type AddrAction int
+
+const (
+ // Unknown means that the event producer was unable to determine why the address
+ // is in the current state.
+ Unknown AddrAction = iota
+
+ // Added means that the address is new and was not present prior to the event.
+ Added
+
+ // Maintained means that the address was not altered between the current and
+ // previous states.
+ Maintained
+
+ // Removed means that the address was removed from the Host.
+ Removed
+)
+
+// UpdatedAddress is used in the EvtLocalAddressesUpdated event to convey
+// address change information.
+type UpdatedAddress struct {
+ // Address contains the address that was updated.
+ Address ma.Multiaddr
+
+ // Action indicates what action was taken on the address during the
+ // event. May be Unknown if the event producer cannot produce diffs.
+ Action AddrAction
+}
+
+// EvtLocalAddressesUpdated should be emitted when the set of listen addresses for
+// the local host changes. This may happen for a number of reasons. For example,
+// we may have opened a new relay connection, established a new NAT mapping via
+// UPnP, or been informed of our observed address by another peer.
+//
+// EvtLocalAddressesUpdated contains a snapshot of the current listen addresses,
+// and may also contain a diff between the current state and the previous state.
+// If the event producer is capable of creating a diff, the Diffs field will be
+// true, and event consumers can inspect the Action field of each UpdatedAddress
+// to see how each address was modified.
+//
+// For example, the Action will tell you whether an address in
+// the Current list was Added by the event producer, or was Maintained without
+// changes. Addresses that were removed from the Host will have the AddrAction
+// of Removed, and will be in the Removed list.
+//
+// If the event producer is not capable or producing diffs, the Diffs field will
+// be false, the Removed list will always be empty, and the Action for each
+// UpdatedAddress in the Current list will be Unknown.
+//
+// In addition to the above, EvtLocalAddressesUpdated also contains the updated peer.PeerRecord
+// for the Current set of listen addresses, wrapped in a record.Envelope and signed by the Host's private key.
+// This record can be shared with other peers to inform them of what we believe are our diallable addresses
+// a secure and authenticated way.
+type EvtLocalAddressesUpdated struct {
+
+ // Diffs indicates whether this event contains a diff of the Host's previous
+ // address set.
+ Diffs bool
+
+ // Current contains all current listen addresses for the Host.
+ // If Diffs == true, the Action field of each UpdatedAddress will tell
+ // you whether an address was Added, or was Maintained from the previous
+ // state.
+ Current []UpdatedAddress
+
+ // Removed contains addresses that were removed from the Host.
+ // This field is only set when Diffs == true.
+ Removed []UpdatedAddress
+
+ // SignedPeerRecord contains our own updated peer.PeerRecord, listing the addresses enumerated in Current.
+ // wrapped in a record.Envelope and signed by the Host's private key.
+ SignedPeerRecord *record.Envelope
+}
+
+// EvtAutoRelayAddrsUpdated is sent by the autorelay when the node's relay addresses are updated
+type EvtAutoRelayAddrsUpdated struct {
+ RelayAddrs []ma.Multiaddr
+}
diff --git a/core/event/bus.go b/core/event/bus.go
new file mode 100644
index 0000000000..1929f064d2
--- /dev/null
+++ b/core/event/bus.go
@@ -0,0 +1,100 @@
+package event
+
+import (
+ "io"
+ "reflect"
+)
+
+// SubscriptionOpt represents a subscriber option. Use the options exposed by the implementation of choice.
+type SubscriptionOpt = func(interface{}) error
+
+// EmitterOpt represents an emitter option. Use the options exposed by the implementation of choice.
+type EmitterOpt = func(interface{}) error
+
+// CancelFunc closes a subscriber.
+type CancelFunc = func()
+
+// wildcardSubscriptionType is a virtual type to represent wildcard
+// subscriptions.
+type wildcardSubscriptionType interface{}
+
+// WildcardSubscription is the type to subscribe to receive all events
+// emitted in the eventbus.
+var WildcardSubscription = new(wildcardSubscriptionType)
+
+// Emitter represents an actor that emits events onto the eventbus.
+type Emitter interface {
+ io.Closer
+
+ // Emit emits an event onto the eventbus. If any channel subscribed to the topic is blocked,
+ // calls to Emit will block.
+ //
+ // Calling this function with wrong event type will cause a panic.
+ Emit(evt interface{}) error
+}
+
+// Subscription represents a subscription to one or multiple event types.
+type Subscription interface {
+ io.Closer
+
+ // Out returns the channel from which to consume events.
+ Out() <-chan interface{}
+
+ // Name returns the name for the subscription
+ Name() string
+}
+
+// Bus is an interface for a type-based event delivery system.
+type Bus interface {
+ // Subscribe creates a new Subscription.
+ //
+ // eventType can be either a pointer to a single event type, or a slice of pointers to
+ // subscribe to multiple event types at once, under a single subscription (and channel).
+ //
+ // Failing to drain the channel may cause publishers to block.
+ //
+ // If you want to subscribe to ALL events emitted in the bus, use
+ // `WildcardSubscription` as the `eventType`:
+ //
+ // eventbus.Subscribe(WildcardSubscription)
+ //
+ // Simple example
+ //
+ // sub, err := eventbus.Subscribe(new(EventType))
+ // defer sub.Close()
+ // for e := range sub.Out() {
+ // event := e.(EventType) // guaranteed safe
+ // [...]
+ // }
+ //
+ // Multi-type example
+ //
+ // sub, err := eventbus.Subscribe([]interface{}{new(EventA), new(EventB)})
+ // defer sub.Close()
+ // for e := range sub.Out() {
+ // select e.(type):
+ // case EventA:
+ // [...]
+ // case EventB:
+ // [...]
+ // }
+ // }
+ Subscribe(eventType interface{}, opts ...SubscriptionOpt) (Subscription, error)
+
+ // Emitter creates a new event emitter.
+ //
+ // eventType accepts typed nil pointers, and uses the type information for wiring purposes.
+ //
+ // Example:
+ // em, err := eventbus.Emitter(new(EventT))
+ // defer em.Close() // MUST call this after being done with the emitter
+ // em.Emit(EventT{})
+ Emitter(eventType interface{}, opts ...EmitterOpt) (Emitter, error)
+
+ // GetAllEventTypes returns all the event types that this bus knows about
+ // (having emitters and subscribers). It omits the WildcardSubscription.
+ //
+ // The caller is guaranteed that this function will only return value types;
+ // no pointer types will be returned.
+ GetAllEventTypes() []reflect.Type
+}
diff --git a/core/event/dht.go b/core/event/dht.go
new file mode 100644
index 0000000000..22e924e256
--- /dev/null
+++ b/core/event/dht.go
@@ -0,0 +1,21 @@
+package event
+
+// RawJSON is a type that contains a raw JSON string.
+type RawJSON string
+
+// GenericDHTEvent is a type that encapsulates an actual DHT event by carrying
+// its raw JSON.
+//
+// Context: the DHT event system is rather bespoke and a bit messy at the time,
+// so until we unify/clean that up, this event bridges the gap. It should only
+// be consumed for informational purposes.
+//
+// EXPERIMENTAL: this will likely be removed if/when the DHT event types are
+// hoisted to core, and the DHT event system is reconciled with the eventbus.
+type GenericDHTEvent struct {
+ // Type is the type of the DHT event that occurred.
+ Type string
+
+ // Raw is the raw JSON representation of the event payload.
+ Raw RawJSON
+}
diff --git a/core/event/doc.go b/core/event/doc.go
new file mode 100644
index 0000000000..7ba4bd6f45
--- /dev/null
+++ b/core/event/doc.go
@@ -0,0 +1,11 @@
+// Package event contains the abstractions for a local event bus, along with the standard events
+// that libp2p subsystems may emit.
+//
+// Source code is arranged as follows:
+// - doc.go: this file.
+// - bus.go: abstractions for the event bus.
+// - rest: event structs, sensibly categorised in files by entity, and following this naming convention:
+// Evt[Entity (noun)][Event (verb past tense / gerund)]
+// The past tense is used to convey that something happened, whereas the gerund form of the verb (-ing)
+// expresses that a process is in progress. Examples: EvtConnEstablishing, EvtConnEstablished.
+package event
diff --git a/core/event/identify.go b/core/event/identify.go
new file mode 100644
index 0000000000..888572a2d5
--- /dev/null
+++ b/core/event/identify.go
@@ -0,0 +1,46 @@
+package event
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/multiformats/go-multiaddr"
+)
+
+// EvtPeerIdentificationCompleted is emitted when the initial identification round for a peer is completed.
+type EvtPeerIdentificationCompleted struct {
+ // Peer is the ID of the peer whose identification succeeded.
+ Peer peer.ID
+
+ // Conn is the connection we identified.
+ Conn network.Conn
+
+ // ListenAddrs is the list of addresses the peer is listening on.
+ ListenAddrs []multiaddr.Multiaddr
+
+ // Protocols is the list of protocols the peer advertised on this connection.
+ Protocols []protocol.ID
+
+ // SignedPeerRecord is the provided signed peer record of the peer. May be nil.
+ SignedPeerRecord *record.Envelope
+
+ // AgentVersion is like a UserAgent string in browsers, or client version in
+ // bittorrent includes the client name and client.
+ AgentVersion string
+
+ // ProtocolVersion is the protocolVersion field in the identify message
+ ProtocolVersion string
+
+ // ObservedAddr is the our side's connection address as observed by the
+ // peer. This is not verified, the peer could return anything here.
+ ObservedAddr multiaddr.Multiaddr
+}
+
+// EvtPeerIdentificationFailed is emitted when the initial identification round for a peer failed.
+type EvtPeerIdentificationFailed struct {
+ // Peer is the ID of the peer whose identification failed.
+ Peer peer.ID
+ // Reason is the reason why identification failed.
+ Reason error
+}
diff --git a/core/event/nattype.go b/core/event/nattype.go
new file mode 100644
index 0000000000..5ac4c525de
--- /dev/null
+++ b/core/event/nattype.go
@@ -0,0 +1,18 @@
+package event
+
+import "github.com/libp2p/go-libp2p/core/network"
+
+// EvtNATDeviceTypeChanged is an event struct to be emitted when the type of the NAT device changes for a Transport Protocol.
+//
+// Note: This event is meaningful ONLY if the AutoNAT Reachability is Private.
+// Consumers of this event should ALSO consume the `EvtLocalReachabilityChanged` event and interpret
+// this event ONLY if the Reachability on the `EvtLocalReachabilityChanged` is Private.
+type EvtNATDeviceTypeChanged struct {
+ // TransportProtocol is the Transport Protocol for which the NAT Device Type has been determined.
+ TransportProtocol network.NATTransportProtocol
+ // NatDeviceType indicates the type of the NAT Device for the Transport Protocol.
+ // Currently, it can be either a `EndpointIndependent NAT` or a `EndpointDependent NAT`. Please see the detailed documentation
+ // on `network.NATDeviceType` enumerations for a better understanding of what these types mean and
+ // how they impact Connectivity and Hole Punching.
+ NatDeviceType network.NATDeviceType
+}
diff --git a/core/event/network.go b/core/event/network.go
new file mode 100644
index 0000000000..37dd09ca9a
--- /dev/null
+++ b/core/event/network.go
@@ -0,0 +1,55 @@
+package event
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// EvtPeerConnectednessChanged should be emitted every time the "connectedness" to a
+// given peer changes. Specifically, this event is emitted in the following
+// cases:
+//
+// - Connectedness = Connected: Every time we transition from having no
+// connections to a peer to having at least one connection to the peer.
+// - Connectedness = NotConnected: Every time we transition from having at least
+// one connection to a peer to having no connections to the peer.
+//
+// Additional connectedness states may be added in the future. This list should
+// not be considered exhaustive.
+//
+// Take note:
+//
+// - It's possible to have _multiple_ connections to a given peer.
+// - Both libp2p and networks are asynchronous.
+//
+// This means that all the following situations are possible:
+//
+// A connection is cut and is re-established:
+//
+// - Peer A observes a transition from Connected -> NotConnected -> Connected
+// - Peer B observes a transition from Connected -> NotConnected -> Connected
+//
+// Explanation: Both peers observe the connection die. This is the "nice" case.
+//
+// A connection is cut and is re-established.
+//
+// - Peer A observes a transition from Connected -> NotConnected -> Connected.
+// - Peer B observes no transition.
+//
+// Explanation: Peer A re-establishes the dead connection. Peer B observes the
+// new connection form before it observes the old connection die.
+//
+// A connection is cut:
+//
+// - Peer A observes no transition.
+// - Peer B observes no transition.
+//
+// Explanation: There were two connections and one was cut. This connection
+// might have been in active use but neither peer will observe a change in
+// "connectedness". Peers should always make sure to retry network requests.
+type EvtPeerConnectednessChanged struct {
+ // Peer is the remote peer whose connectedness has changed.
+ Peer peer.ID
+ // Connectedness is the new connectedness state.
+ Connectedness network.Connectedness
+}
diff --git a/core/event/protocol.go b/core/event/protocol.go
new file mode 100644
index 0000000000..b8f1fe637c
--- /dev/null
+++ b/core/event/protocol.go
@@ -0,0 +1,26 @@
+package event
+
+import (
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ protocol "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// EvtPeerProtocolsUpdated should be emitted when a peer we're connected to adds or removes protocols from their stack.
+type EvtPeerProtocolsUpdated struct {
+ // Peer is the peer whose protocols were updated.
+ Peer peer.ID
+ // Added enumerates the protocols that were added by this peer.
+ Added []protocol.ID
+ // Removed enumerates the protocols that were removed by this peer.
+ Removed []protocol.ID
+}
+
+// EvtLocalProtocolsUpdated should be emitted when stream handlers are attached or detached from the local host.
+// For handlers attached with a matcher predicate (host.SetStreamHandlerMatch()), only the protocol ID will be
+// included in this event.
+type EvtLocalProtocolsUpdated struct {
+ // Added enumerates the protocols that were added locally.
+ Added []protocol.ID
+ // Removed enumerates the protocols that were removed locally.
+ Removed []protocol.ID
+}
diff --git a/core/event/reachability.go b/core/event/reachability.go
new file mode 100644
index 0000000000..18032544eb
--- /dev/null
+++ b/core/event/reachability.go
@@ -0,0 +1,24 @@
+package event
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// EvtLocalReachabilityChanged is an event struct to be emitted when the local's
+// node reachability changes state.
+//
+// This event is usually emitted by the AutoNAT subsystem.
+type EvtLocalReachabilityChanged struct {
+ Reachability network.Reachability
+}
+
+// EvtHostReachableAddrsChanged is sent when host's reachable or unreachable addresses change
+// Reachable, Unreachable, and Unknown only contain Public IP or DNS addresses
+//
+// Experimental: This API is unstable. Any changes to this event will be done without a deprecation notice.
+type EvtHostReachableAddrsChanged struct {
+ Reachable []ma.Multiaddr
+ Unreachable []ma.Multiaddr
+ Unknown []ma.Multiaddr
+}
diff --git a/core/host/helpers.go b/core/host/helpers.go
new file mode 100644
index 0000000000..e57f326a7c
--- /dev/null
+++ b/core/host/helpers.go
@@ -0,0 +1,11 @@
+package host
+
+import "github.com/libp2p/go-libp2p/core/peer"
+
+// InfoFromHost returns a peer.AddrInfo struct with the Host's ID and all of its Addrs.
+func InfoFromHost(h Host) *peer.AddrInfo {
+ return &peer.AddrInfo{
+ ID: h.ID(),
+ Addrs: h.Addrs(),
+ }
+}
diff --git a/core/host/host.go b/core/host/host.go
new file mode 100644
index 0000000000..ca0a7e00d9
--- /dev/null
+++ b/core/host/host.go
@@ -0,0 +1,75 @@
+// Package host provides the core Host interface for libp2p.
+//
+// Host represents a single libp2p node in a peer-to-peer network.
+package host
+
+import (
+ "context"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// Host is an object participating in a p2p network, which
+// implements protocols or provides services. It handles
+// requests like a Server, and issues requests like a Client.
+// It is called Host because it is both Server and Client (and Peer
+// may be confusing).
+type Host interface {
+ // ID returns the (local) peer.ID associated with this Host
+ ID() peer.ID
+
+ // Peerstore returns the Host's repository of Peer Addresses and Keys.
+ Peerstore() peerstore.Peerstore
+
+ // Addrs returns the listen addresses of the Host
+ Addrs() []ma.Multiaddr
+
+ // Network returns the Network interface of the Host
+ Network() network.Network
+
+ // Mux returns the Mux multiplexing incoming streams to protocol handlers
+ Mux() protocol.Switch
+
+ // Connect ensures there is a connection between this host and the peer with
+ // given peer.ID. Connect will absorb the addresses in pi into its internal
+ // peerstore. If there is not an active connection, Connect will issue a
+ // h.Network.Dial, and block until a connection is open, or an error is
+ // returned.
+ Connect(ctx context.Context, pi peer.AddrInfo) error
+
+ // SetStreamHandler sets the protocol handler on the Host's Mux.
+ // This is equivalent to:
+ // host.Mux().SetHandler(proto, handler)
+ // (Thread-safe)
+ SetStreamHandler(pid protocol.ID, handler network.StreamHandler)
+
+ // SetStreamHandlerMatch sets the protocol handler on the Host's Mux
+ // using a matching function for protocol selection.
+ SetStreamHandlerMatch(protocol.ID, func(protocol.ID) bool, network.StreamHandler)
+
+ // RemoveStreamHandler removes a handler on the mux that was set by
+ // SetStreamHandler
+ RemoveStreamHandler(pid protocol.ID)
+
+ // NewStream opens a new stream to given peer p, and writes a p2p/protocol
+ // header with given ProtocolID. If there is no connection to p, attempts
+ // to create one. If ProtocolID is "", writes no header.
+ // (Thread-safe)
+ NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error)
+
+ // Close shuts down the host, its Network, and services.
+ Close() error
+
+ // ConnManager returns this hosts connection manager
+ ConnManager() connmgr.ConnManager
+
+ // EventBus returns the hosts eventbus
+ EventBus() event.Bus
+}
diff --git a/core/internal/catch/catch.go b/core/internal/catch/catch.go
new file mode 100644
index 0000000000..c61ee2aa3d
--- /dev/null
+++ b/core/internal/catch/catch.go
@@ -0,0 +1,18 @@
+package catch
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "runtime/debug"
+)
+
+var panicWriter io.Writer = os.Stderr
+
+// HandlePanic handles and logs panics.
+func HandlePanic(rerr interface{}, err *error, where string) {
+ if rerr != nil {
+ fmt.Fprintf(panicWriter, "caught panic: %s\n%s\n", rerr, debug.Stack())
+ *err = fmt.Errorf("panic in %s: %s", where, rerr)
+ }
+}
diff --git a/core/internal/catch/catch_test.go b/core/internal/catch/catch_test.go
new file mode 100644
index 0000000000..1674f4f4df
--- /dev/null
+++ b/core/internal/catch/catch_test.go
@@ -0,0 +1,28 @@
+package catch
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestCatch(t *testing.T) {
+ buf := new(bytes.Buffer)
+
+ oldPanicWriter := panicWriter
+ t.Cleanup(func() { panicWriter = oldPanicWriter })
+ panicWriter = buf
+
+ panicAndCatch := func() (err error) {
+ defer func() { HandlePanic(recover(), &err, "somewhere") }()
+
+ panic("here")
+ }
+
+ err := panicAndCatch()
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "panic in somewhere: here")
+
+ require.Contains(t, buf.String(), "caught panic: here")
+}
diff --git a/core/metrics/bandwidth.go b/core/metrics/bandwidth.go
new file mode 100644
index 0000000000..84360bd7c3
--- /dev/null
+++ b/core/metrics/bandwidth.go
@@ -0,0 +1,176 @@
+// Package metrics provides metrics collection and reporting interfaces for libp2p.
+package metrics
+
+import (
+ "time"
+
+ "github.com/libp2p/go-flow-metrics"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// BandwidthCounter tracks incoming and outgoing data transferred by the local peer.
+// Metrics are available for total bandwidth across all peers / protocols, as well
+// as segmented by remote peer ID and protocol ID.
+type BandwidthCounter struct {
+ totalIn flow.Meter
+ totalOut flow.Meter
+
+ protocolIn flow.MeterRegistry
+ protocolOut flow.MeterRegistry
+
+ peerIn flow.MeterRegistry
+ peerOut flow.MeterRegistry
+}
+
+// NewBandwidthCounter creates a new BandwidthCounter.
+func NewBandwidthCounter() *BandwidthCounter {
+ return new(BandwidthCounter)
+}
+
+// LogSentMessage records the size of an outgoing message
+// without associating the bandwidth to a specific peer or protocol.
+func (bwc *BandwidthCounter) LogSentMessage(size int64) {
+ bwc.totalOut.Mark(uint64(size))
+}
+
+// LogRecvMessage records the size of an incoming message
+// without associating the bandwidth to a specific peer or protocol.
+func (bwc *BandwidthCounter) LogRecvMessage(size int64) {
+ bwc.totalIn.Mark(uint64(size))
+}
+
+// LogSentMessageStream records the size of an outgoing message over a single logical stream.
+// Bandwidth is associated with the given protocol.ID and peer.ID.
+func (bwc *BandwidthCounter) LogSentMessageStream(size int64, proto protocol.ID, p peer.ID) {
+ bwc.protocolOut.Get(string(proto)).Mark(uint64(size))
+ bwc.peerOut.Get(string(p)).Mark(uint64(size))
+}
+
+// LogRecvMessageStream records the size of an incoming message over a single logical stream.
+// Bandwidth is associated with the given protocol.ID and peer.ID.
+func (bwc *BandwidthCounter) LogRecvMessageStream(size int64, proto protocol.ID, p peer.ID) {
+ bwc.protocolIn.Get(string(proto)).Mark(uint64(size))
+ bwc.peerIn.Get(string(p)).Mark(uint64(size))
+}
+
+// GetBandwidthForPeer returns a Stats struct with bandwidth metrics associated with the given peer.ID.
+// The metrics returned include all traffic sent / received for the peer, regardless of protocol.
+func (bwc *BandwidthCounter) GetBandwidthForPeer(p peer.ID) (out Stats) {
+ inSnap := bwc.peerIn.Get(string(p)).Snapshot()
+ outSnap := bwc.peerOut.Get(string(p)).Snapshot()
+
+ return Stats{
+ TotalIn: int64(inSnap.Total),
+ TotalOut: int64(outSnap.Total),
+ RateIn: inSnap.Rate,
+ RateOut: outSnap.Rate,
+ }
+}
+
+// GetBandwidthForProtocol returns a Stats struct with bandwidth metrics associated with the given protocol.ID.
+// The metrics returned include all traffic sent / received for the protocol, regardless of which peers were
+// involved.
+func (bwc *BandwidthCounter) GetBandwidthForProtocol(proto protocol.ID) (out Stats) {
+ inSnap := bwc.protocolIn.Get(string(proto)).Snapshot()
+ outSnap := bwc.protocolOut.Get(string(proto)).Snapshot()
+
+ return Stats{
+ TotalIn: int64(inSnap.Total),
+ TotalOut: int64(outSnap.Total),
+ RateIn: inSnap.Rate,
+ RateOut: outSnap.Rate,
+ }
+}
+
+// GetBandwidthTotals returns a Stats struct with bandwidth metrics for all data sent / received by the
+// local peer, regardless of protocol or remote peer IDs.
+func (bwc *BandwidthCounter) GetBandwidthTotals() (out Stats) {
+ inSnap := bwc.totalIn.Snapshot()
+ outSnap := bwc.totalOut.Snapshot()
+
+ return Stats{
+ TotalIn: int64(inSnap.Total),
+ TotalOut: int64(outSnap.Total),
+ RateIn: inSnap.Rate,
+ RateOut: outSnap.Rate,
+ }
+}
+
+// GetBandwidthByPeer returns a map of all remembered peers and the bandwidth
+// metrics with respect to each. This method may be very expensive.
+func (bwc *BandwidthCounter) GetBandwidthByPeer() map[peer.ID]Stats {
+ peers := make(map[peer.ID]Stats)
+
+ bwc.peerIn.ForEach(func(p string, meter *flow.Meter) {
+ id := peer.ID(p)
+ snap := meter.Snapshot()
+
+ stat := peers[id]
+ stat.TotalIn = int64(snap.Total)
+ stat.RateIn = snap.Rate
+ peers[id] = stat
+ })
+
+ bwc.peerOut.ForEach(func(p string, meter *flow.Meter) {
+ id := peer.ID(p)
+ snap := meter.Snapshot()
+
+ stat := peers[id]
+ stat.TotalOut = int64(snap.Total)
+ stat.RateOut = snap.Rate
+ peers[id] = stat
+ })
+
+ return peers
+}
+
+// GetBandwidthByProtocol returns a map of all remembered protocols and
+// the bandwidth metrics with respect to each. This method may be moderately
+// expensive.
+func (bwc *BandwidthCounter) GetBandwidthByProtocol() map[protocol.ID]Stats {
+ protocols := make(map[protocol.ID]Stats)
+
+ bwc.protocolIn.ForEach(func(p string, meter *flow.Meter) {
+ id := protocol.ID(p)
+ snap := meter.Snapshot()
+
+ stat := protocols[id]
+ stat.TotalIn = int64(snap.Total)
+ stat.RateIn = snap.Rate
+ protocols[id] = stat
+ })
+
+ bwc.protocolOut.ForEach(func(p string, meter *flow.Meter) {
+ id := protocol.ID(p)
+ snap := meter.Snapshot()
+
+ stat := protocols[id]
+ stat.TotalOut = int64(snap.Total)
+ stat.RateOut = snap.Rate
+ protocols[id] = stat
+ })
+
+ return protocols
+}
+
+// Reset clears all stats.
+func (bwc *BandwidthCounter) Reset() {
+ bwc.totalIn.Reset()
+ bwc.totalOut.Reset()
+
+ bwc.protocolIn.Clear()
+ bwc.protocolOut.Clear()
+
+ bwc.peerIn.Clear()
+ bwc.peerOut.Clear()
+}
+
+// TrimIdle trims all timers idle since the given time.
+func (bwc *BandwidthCounter) TrimIdle(since time.Time) {
+ bwc.peerIn.TrimIdle(since)
+ bwc.peerOut.TrimIdle(since)
+ bwc.protocolIn.TrimIdle(since)
+ bwc.protocolOut.TrimIdle(since)
+}
diff --git a/core/metrics/bandwidth_test.go b/core/metrics/bandwidth_test.go
new file mode 100644
index 0000000000..84c507ecc5
--- /dev/null
+++ b/core/metrics/bandwidth_test.go
@@ -0,0 +1,170 @@
+package metrics
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ "github.com/libp2p/go-flow-metrics"
+
+ "github.com/benbjohnson/clock"
+ "github.com/stretchr/testify/require"
+)
+
+var cl = clock.NewMock()
+
+func init() {
+ flow.SetClock(cl)
+}
+
+func BenchmarkBandwidthCounter(b *testing.B) {
+ b.StopTimer()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ bwc := NewBandwidthCounter()
+ round(bwc, b)
+ }
+}
+
+func round(bwc *BandwidthCounter, b *testing.B) {
+ start := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Add(10000)
+ for i := 0; i < 1000; i++ {
+ p := peer.ID(fmt.Sprintf("peer-%d", i))
+ for j := 0; j < 10; j++ {
+ proto := protocol.ID(fmt.Sprintf("bitswap-%d", j))
+ go func() {
+ defer wg.Done()
+ <-start
+
+ for i := 0; i < 1000; i++ {
+ bwc.LogSentMessage(100)
+ bwc.LogSentMessageStream(100, proto, p)
+ time.Sleep(1 * time.Millisecond)
+ }
+ }()
+ }
+ }
+
+ b.StartTimer()
+ close(start)
+ wg.Wait()
+ b.StopTimer()
+}
+
+func TestBandwidthCounter(t *testing.T) {
+ bwc := NewBandwidthCounter()
+ for i := 0; i < 40; i++ {
+ for i := 0; i < 100; i++ {
+ p := peer.ID(fmt.Sprintf("peer-%d", i))
+ for j := 0; j < 2; j++ {
+ proto := protocol.ID(fmt.Sprintf("proto-%d", j))
+
+ // make sure the bandwidth counters are active
+ bwc.LogSentMessage(100)
+ bwc.LogRecvMessage(50)
+ bwc.LogSentMessageStream(100, proto, p)
+ bwc.LogRecvMessageStream(50, proto, p)
+
+ // <-start
+ }
+ }
+ cl.Add(100 * time.Millisecond)
+ }
+
+ assertProtocols := func(check func(Stats)) {
+ byProtocol := bwc.GetBandwidthByProtocol()
+ require.Len(t, byProtocol, 2, "expected 2 protocols")
+ for i := 0; i < 2; i++ {
+ p := protocol.ID(fmt.Sprintf("proto-%d", i))
+ for _, stats := range [...]Stats{bwc.GetBandwidthForProtocol(p), byProtocol[p]} {
+ check(stats)
+ }
+ }
+ }
+
+ assertPeers := func(check func(Stats)) {
+ byPeer := bwc.GetBandwidthByPeer()
+ require.Len(t, byPeer, 100, "expected 100 peers")
+ for i := 0; i < 100; i++ {
+ p := peer.ID(fmt.Sprintf("peer-%d", i))
+ for _, stats := range [...]Stats{bwc.GetBandwidthForPeer(p), byPeer[p]} {
+ check(stats)
+ }
+ }
+ }
+
+ assertPeers(func(stats Stats) {
+ require.Equal(t, int64(8000), stats.TotalOut)
+ require.Equal(t, int64(4000), stats.TotalIn)
+ })
+
+ assertProtocols(func(stats Stats) {
+ require.Equal(t, int64(400000), stats.TotalOut)
+ require.Equal(t, int64(200000), stats.TotalIn)
+ })
+
+ stats := bwc.GetBandwidthTotals()
+ require.Equal(t, int64(800000), stats.TotalOut)
+ require.Equal(t, int64(400000), stats.TotalIn)
+}
+
+func TestResetBandwidthCounter(t *testing.T) {
+ bwc := NewBandwidthCounter()
+
+ p := peer.ID("peer-0")
+ proto := protocol.ID("proto-0")
+
+ // We don't calculate bandwidth till we've been active for a second.
+ bwc.LogSentMessage(42)
+ bwc.LogRecvMessage(24)
+ bwc.LogSentMessageStream(100, proto, p)
+ bwc.LogRecvMessageStream(50, proto, p)
+
+ time.Sleep(200 * time.Millisecond) // make sure the meters are registered with the sweeper
+ cl.Add(time.Second)
+
+ bwc.LogSentMessage(42)
+ bwc.LogRecvMessage(24)
+ bwc.LogSentMessageStream(100, proto, p)
+ bwc.LogRecvMessageStream(50, proto, p)
+
+ cl.Add(time.Second)
+
+ {
+ stats := bwc.GetBandwidthTotals()
+ require.Equal(t, int64(84), stats.TotalOut)
+ require.Equal(t, int64(48), stats.TotalIn)
+ }
+
+ {
+ stats := bwc.GetBandwidthByProtocol()
+ require.Len(t, stats, 1)
+ stat := stats[proto]
+ require.Equal(t, float64(100), stat.RateOut)
+ require.Equal(t, float64(50), stat.RateIn)
+ }
+
+ {
+ stats := bwc.GetBandwidthByPeer()
+ require.Len(t, stats, 1)
+ stat := stats[p]
+ require.Equal(t, float64(100), stat.RateOut)
+ require.Equal(t, float64(50), stat.RateIn)
+ }
+
+ bwc.Reset()
+ {
+ stats := bwc.GetBandwidthTotals()
+ require.Zero(t, stats.TotalOut)
+ require.Zero(t, stats.TotalIn)
+ require.Empty(t, bwc.GetBandwidthByProtocol(), "expected 0 protocols")
+ require.Empty(t, bwc.GetBandwidthByPeer(), "expected 0 peers")
+ }
+}
diff --git a/core/metrics/reporter.go b/core/metrics/reporter.go
new file mode 100644
index 0000000000..c0e10d8e31
--- /dev/null
+++ b/core/metrics/reporter.go
@@ -0,0 +1,34 @@
+// Package metrics provides metrics collection and reporting interfaces for libp2p.
+package metrics
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "time"
+)
+
+// Stats represents a point-in-time snapshot of bandwidth metrics.
+//
+// The TotalIn and TotalOut fields record cumulative bytes sent / received.
+// The RateIn and RateOut fields record bytes sent / received per second.
+type Stats struct {
+ TotalIn int64
+ TotalOut int64
+ RateIn float64
+ RateOut float64
+}
+
+// Reporter provides methods for logging and retrieving metrics.
+type Reporter interface {
+ LogSentMessage(int64)
+ LogRecvMessage(int64)
+ LogSentMessageStream(int64, protocol.ID, peer.ID)
+ LogRecvMessageStream(int64, protocol.ID, peer.ID)
+ GetBandwidthForPeer(peer.ID) Stats
+ GetBandwidthForProtocol(protocol.ID) Stats
+ GetBandwidthTotals() Stats
+ GetBandwidthByPeer() map[peer.ID]Stats
+ GetBandwidthByProtocol() map[protocol.ID]Stats
+ Reset()
+ TrimIdle(since time.Time)
+}
diff --git a/core/network/conn.go b/core/network/conn.go
new file mode 100644
index 0000000000..aa6b96f718
--- /dev/null
+++ b/core/network/conn.go
@@ -0,0 +1,143 @@
+package network
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+type ConnErrorCode uint32
+
+type ConnError struct {
+ Remote bool
+ ErrorCode ConnErrorCode
+ TransportError error
+}
+
+func (c *ConnError) Error() string {
+ side := "local"
+ if c.Remote {
+ side = "remote"
+ }
+ if c.TransportError != nil {
+ return fmt.Sprintf("connection closed (%s): code: 0x%x: transport error: %s", side, c.ErrorCode, c.TransportError)
+ }
+ return fmt.Sprintf("connection closed (%s): code: 0x%x", side, c.ErrorCode)
+}
+
+func (c *ConnError) Is(target error) bool {
+ if tce, ok := target.(*ConnError); ok {
+ return tce.ErrorCode == c.ErrorCode && tce.Remote == c.Remote
+ }
+ return false
+}
+
+func (c *ConnError) Unwrap() []error {
+ return []error{ErrReset, c.TransportError}
+}
+
+const (
+ ConnNoError ConnErrorCode = 0
+ ConnProtocolNegotiationFailed ConnErrorCode = 0x1000
+ ConnResourceLimitExceeded ConnErrorCode = 0x1001
+ ConnRateLimited ConnErrorCode = 0x1002
+ ConnProtocolViolation ConnErrorCode = 0x1003
+ ConnSupplanted ConnErrorCode = 0x1004
+ ConnGarbageCollected ConnErrorCode = 0x1005
+ ConnShutdown ConnErrorCode = 0x1006
+ ConnGated ConnErrorCode = 0x1007
+ ConnCodeOutOfRange ConnErrorCode = 0x1008
+)
+
+// Conn is a connection to a remote peer. It multiplexes streams.
+// Usually there is no need to use a Conn directly, but it may
+// be useful to get information about the peer on the other side:
+//
+// stream.Conn().RemotePeer()
+type Conn interface {
+ io.Closer
+
+ ConnSecurity
+ ConnMultiaddrs
+ ConnStat
+ ConnScoper
+
+ // CloseWithError closes the connection with errCode. The errCode is sent to the
+ // peer on a best effort basis. For transports that do not support sending error
+ // codes on connection close, the behavior is identical to calling Close.
+ CloseWithError(errCode ConnErrorCode) error
+
+ // ID returns an identifier that uniquely identifies this Conn within this
+ // host, during this run. Connection IDs may repeat across restarts.
+ ID() string
+
+ // NewStream constructs a new Stream over this conn.
+ NewStream(context.Context) (Stream, error)
+
+ // GetStreams returns all open streams over this conn.
+ GetStreams() []Stream
+
+ // IsClosed returns whether a connection is fully closed, so it can
+ // be garbage collected.
+ IsClosed() bool
+}
+
+// ConnectionState holds information about the connection.
+type ConnectionState struct {
+ // The stream multiplexer used on this connection (if any). For example: /yamux/1.0.0
+ StreamMultiplexer protocol.ID
+ // The security protocol used on this connection (if any). For example: /tls/1.0.0
+ Security protocol.ID
+ // the transport used on this connection. For example: tcp
+ Transport string
+ // indicates whether StreamMultiplexer was selected using inlined muxer negotiation
+ UsedEarlyMuxerNegotiation bool
+}
+
+// ConnSecurity is the interface that one can mix into a connection interface to
+// give it the security methods.
+type ConnSecurity interface {
+ // LocalPeer returns our peer ID
+ LocalPeer() peer.ID
+
+ // RemotePeer returns the peer ID of the remote peer.
+ RemotePeer() peer.ID
+
+ // RemotePublicKey returns the public key of the remote peer.
+ RemotePublicKey() ic.PubKey
+
+ // ConnState returns information about the connection state.
+ ConnState() ConnectionState
+}
+
+// ConnMultiaddrs is an interface mixin for connection types that provide multiaddr
+// addresses for the endpoints.
+type ConnMultiaddrs interface {
+ // LocalMultiaddr returns the local Multiaddr associated
+ // with this connection
+ LocalMultiaddr() ma.Multiaddr
+
+ // RemoteMultiaddr returns the remote Multiaddr associated
+ // with this connection
+ RemoteMultiaddr() ma.Multiaddr
+}
+
+// ConnStat is an interface mixin for connection types that provide connection statistics.
+type ConnStat interface {
+ // Stat stores metadata pertaining to this conn.
+ Stat() ConnStats
+}
+
+// ConnScoper is the interface that one can mix into a connection interface to give it a resource
+// management scope
+type ConnScoper interface {
+ // Scope returns the user view of this connection's resource scope
+ Scope() ConnScope
+}
diff --git a/core/network/context.go b/core/network/context.go
new file mode 100644
index 0000000000..75db775932
--- /dev/null
+++ b/core/network/context.go
@@ -0,0 +1,130 @@
+package network
+
+import (
+ "context"
+ "time"
+)
+
+// DialPeerTimeout is the default timeout for a single call to `DialPeer`. When
+// there are multiple concurrent calls to `DialPeer`, this timeout will apply to
+// each independently.
+var DialPeerTimeout = 60 * time.Second
+
+type noDialCtxKey struct{}
+type dialPeerTimeoutCtxKey struct{}
+type forceDirectDialCtxKey struct{}
+type allowLimitedConnCtxKey struct{}
+type simConnectCtxKey struct{ isClient bool }
+
+var noDial = noDialCtxKey{}
+var forceDirectDial = forceDirectDialCtxKey{}
+var allowLimitedConn = allowLimitedConnCtxKey{}
+var simConnectIsServer = simConnectCtxKey{}
+var simConnectIsClient = simConnectCtxKey{isClient: true}
+
+// EXPERIMENTAL
+// WithForceDirectDial constructs a new context with an option that instructs the network
+// to attempt to force a direct connection to a peer via a dial even if a proxied connection to it already exists.
+func WithForceDirectDial(ctx context.Context, reason string) context.Context {
+ return context.WithValue(ctx, forceDirectDial, reason)
+}
+
+// EXPERIMENTAL
+// GetForceDirectDial returns true if the force direct dial option is set in the context.
+func GetForceDirectDial(ctx context.Context) (forceDirect bool, reason string) {
+ v := ctx.Value(forceDirectDial)
+ if v != nil {
+ return true, v.(string)
+ }
+
+ return false, ""
+}
+
+// WithSimultaneousConnect constructs a new context with an option that instructs the transport
+// to apply hole punching logic where applicable.
+// EXPERIMENTAL
+func WithSimultaneousConnect(ctx context.Context, isClient bool, reason string) context.Context {
+ if isClient {
+ return context.WithValue(ctx, simConnectIsClient, reason)
+ }
+ return context.WithValue(ctx, simConnectIsServer, reason)
+}
+
+// GetSimultaneousConnect returns true if the simultaneous connect option is set in the context.
+// EXPERIMENTAL
+func GetSimultaneousConnect(ctx context.Context) (simconnect bool, isClient bool, reason string) {
+ if v := ctx.Value(simConnectIsClient); v != nil {
+ return true, true, v.(string)
+ }
+ if v := ctx.Value(simConnectIsServer); v != nil {
+ return true, false, v.(string)
+ }
+ return false, false, ""
+}
+
+// WithNoDial constructs a new context with an option that instructs the network
+// to not attempt a new dial when opening a stream.
+func WithNoDial(ctx context.Context, reason string) context.Context {
+ return context.WithValue(ctx, noDial, reason)
+}
+
+// GetNoDial returns true if the no dial option is set in the context.
+func GetNoDial(ctx context.Context) (nodial bool, reason string) {
+ v := ctx.Value(noDial)
+ if v != nil {
+ return true, v.(string)
+ }
+
+ return false, ""
+}
+
+// GetDialPeerTimeout returns the current DialPeer timeout (or the default).
+func GetDialPeerTimeout(ctx context.Context) time.Duration {
+ if to, ok := ctx.Value(dialPeerTimeoutCtxKey{}).(time.Duration); ok {
+ return to
+ }
+ return DialPeerTimeout
+}
+
+// WithDialPeerTimeout returns a new context with the DialPeer timeout applied.
+//
+// This timeout overrides the default DialPeerTimeout and applies per-dial
+// independently.
+func WithDialPeerTimeout(ctx context.Context, timeout time.Duration) context.Context {
+ return context.WithValue(ctx, dialPeerTimeoutCtxKey{}, timeout)
+}
+
+// WithAllowLimitedConn constructs a new context with an option that instructs
+// the network that it is acceptable to use a limited connection when opening a
+// new stream.
+func WithAllowLimitedConn(ctx context.Context, reason string) context.Context {
+ return context.WithValue(ctx, allowLimitedConn, reason)
+}
+
+// WithUseTransient constructs a new context with an option that instructs the network
+// that it is acceptable to use a transient connection when opening a new stream.
+//
+// Deprecated: Use WithAllowLimitedConn instead.
+func WithUseTransient(ctx context.Context, reason string) context.Context {
+ return context.WithValue(ctx, allowLimitedConn, reason)
+}
+
+// GetAllowLimitedConn returns true if the allow limited conn option is set in the context.
+func GetAllowLimitedConn(ctx context.Context) (usetransient bool, reason string) {
+ v := ctx.Value(allowLimitedConn)
+ if v != nil {
+ return true, v.(string)
+ }
+ return false, ""
+}
+
+// GetUseTransient returns true if the use transient option is set in the context.
+//
+// Deprecated: Use GetAllowLimitedConn instead.
+func GetUseTransient(ctx context.Context) (usetransient bool, reason string) {
+ v := ctx.Value(allowLimitedConn)
+ if v != nil {
+ return true, v.(string)
+ }
+ return false, ""
+}
diff --git a/core/network/context_test.go b/core/network/context_test.go
new file mode 100644
index 0000000000..65a865498d
--- /dev/null
+++ b/core/network/context_test.go
@@ -0,0 +1,59 @@
+package network
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestDefaultTimeout(t *testing.T) {
+ ctx := context.Background()
+ dur := GetDialPeerTimeout(ctx)
+ if dur != DialPeerTimeout {
+ t.Fatal("expected default peer timeout")
+ }
+}
+
+func TestNonDefaultTimeout(t *testing.T) {
+ customTimeout := time.Duration(1)
+ ctx := context.WithValue(
+ context.Background(),
+ dialPeerTimeoutCtxKey{},
+ customTimeout,
+ )
+ dur := GetDialPeerTimeout(ctx)
+ if dur != customTimeout {
+ t.Fatal("peer timeout doesn't match set timeout")
+ }
+}
+
+func TestSettingTimeout(t *testing.T) {
+ customTimeout := time.Duration(1)
+ ctx := WithDialPeerTimeout(
+ context.Background(),
+ customTimeout,
+ )
+ dur := GetDialPeerTimeout(ctx)
+ if dur != customTimeout {
+ t.Fatal("peer timeout doesn't match set timeout")
+ }
+}
+
+func TestSimultaneousConnect(t *testing.T) {
+ t.Run("for the server", func(t *testing.T) {
+ serverCtx := WithSimultaneousConnect(context.Background(), false, "foobar")
+ ok, isClient, reason := GetSimultaneousConnect(serverCtx)
+ require.True(t, ok)
+ require.False(t, isClient)
+ require.Equal(t, "foobar", reason)
+ })
+ t.Run("for the client", func(t *testing.T) {
+ serverCtx := WithSimultaneousConnect(context.Background(), true, "foo")
+ ok, isClient, reason := GetSimultaneousConnect(serverCtx)
+ require.True(t, ok)
+ require.True(t, isClient)
+ require.Equal(t, "foo", reason)
+ })
+}
diff --git a/core/network/errors.go b/core/network/errors.go
new file mode 100644
index 0000000000..0f98cd5a28
--- /dev/null
+++ b/core/network/errors.go
@@ -0,0 +1,39 @@
+package network
+
+import (
+ "errors"
+ "net"
+)
+
+type temporaryError string
+
+func (e temporaryError) Error() string { return string(e) }
+func (e temporaryError) Temporary() bool { return true }
+func (e temporaryError) Timeout() bool { return false }
+
+var _ net.Error = temporaryError("")
+
+// ErrNoRemoteAddrs is returned when there are no addresses associated with a peer during a dial.
+var ErrNoRemoteAddrs = errors.New("no remote addresses")
+
+// ErrNoConn is returned when attempting to open a stream to a peer with the NoDial
+// option and no usable connection is available.
+var ErrNoConn = errors.New("no usable connection to peer")
+
+// ErrTransientConn is returned when attempting to open a stream to a peer with only a transient
+// connection, without specifying the UseTransient option.
+//
+// Deprecated: Use ErrLimitedConn instead.
+var ErrTransientConn = ErrLimitedConn
+
+// ErrLimitedConn is returned when attempting to open a stream to a peer with only a conn
+// connection, without specifying the AllowLimitedConn option.
+var ErrLimitedConn = errors.New("limited connection to peer")
+
+// ErrResourceLimitExceeded is returned when attempting to perform an operation that would
+// exceed system resource limits.
+var ErrResourceLimitExceeded = temporaryError("resource limit exceeded")
+
+// ErrResourceScopeClosed is returned when attempting to reserve resources in a closed resource
+// scope.
+var ErrResourceScopeClosed = errors.New("resource scope closed")
diff --git a/core/network/mocks/mock_conn_management_scope.go b/core/network/mocks/mock_conn_management_scope.go
new file mode 100644
index 0000000000..649da0aa0f
--- /dev/null
+++ b/core/network/mocks/mock_conn_management_scope.go
@@ -0,0 +1,137 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/network (interfaces: ConnManagementScope)
+//
+// Generated by this command:
+//
+// mockgen -package mocknetwork -destination mock_conn_management_scope.go github.com/libp2p/go-libp2p/core/network ConnManagementScope
+//
+
+// Package mocknetwork is a generated GoMock package.
+package mocknetwork
+
+import (
+ reflect "reflect"
+
+ network "github.com/libp2p/go-libp2p/core/network"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockConnManagementScope is a mock of ConnManagementScope interface.
+type MockConnManagementScope struct {
+ ctrl *gomock.Controller
+ recorder *MockConnManagementScopeMockRecorder
+ isgomock struct{}
+}
+
+// MockConnManagementScopeMockRecorder is the mock recorder for MockConnManagementScope.
+type MockConnManagementScopeMockRecorder struct {
+ mock *MockConnManagementScope
+}
+
+// NewMockConnManagementScope creates a new mock instance.
+func NewMockConnManagementScope(ctrl *gomock.Controller) *MockConnManagementScope {
+ mock := &MockConnManagementScope{ctrl: ctrl}
+ mock.recorder = &MockConnManagementScopeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockConnManagementScope) EXPECT() *MockConnManagementScopeMockRecorder {
+ return m.recorder
+}
+
+// BeginSpan mocks base method.
+func (m *MockConnManagementScope) BeginSpan() (network.ResourceScopeSpan, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeginSpan")
+ ret0, _ := ret[0].(network.ResourceScopeSpan)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeginSpan indicates an expected call of BeginSpan.
+func (mr *MockConnManagementScopeMockRecorder) BeginSpan() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginSpan", reflect.TypeOf((*MockConnManagementScope)(nil).BeginSpan))
+}
+
+// Done mocks base method.
+func (m *MockConnManagementScope) Done() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Done")
+}
+
+// Done indicates an expected call of Done.
+func (mr *MockConnManagementScopeMockRecorder) Done() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockConnManagementScope)(nil).Done))
+}
+
+// PeerScope mocks base method.
+func (m *MockConnManagementScope) PeerScope() network.PeerScope {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PeerScope")
+ ret0, _ := ret[0].(network.PeerScope)
+ return ret0
+}
+
+// PeerScope indicates an expected call of PeerScope.
+func (mr *MockConnManagementScopeMockRecorder) PeerScope() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerScope", reflect.TypeOf((*MockConnManagementScope)(nil).PeerScope))
+}
+
+// ReleaseMemory mocks base method.
+func (m *MockConnManagementScope) ReleaseMemory(size int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ReleaseMemory", size)
+}
+
+// ReleaseMemory indicates an expected call of ReleaseMemory.
+func (mr *MockConnManagementScopeMockRecorder) ReleaseMemory(size any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseMemory", reflect.TypeOf((*MockConnManagementScope)(nil).ReleaseMemory), size)
+}
+
+// ReserveMemory mocks base method.
+func (m *MockConnManagementScope) ReserveMemory(size int, prio uint8) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReserveMemory", size, prio)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReserveMemory indicates an expected call of ReserveMemory.
+func (mr *MockConnManagementScopeMockRecorder) ReserveMemory(size, prio any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveMemory", reflect.TypeOf((*MockConnManagementScope)(nil).ReserveMemory), size, prio)
+}
+
+// SetPeer mocks base method.
+func (m *MockConnManagementScope) SetPeer(arg0 peer.ID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetPeer", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SetPeer indicates an expected call of SetPeer.
+func (mr *MockConnManagementScopeMockRecorder) SetPeer(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPeer", reflect.TypeOf((*MockConnManagementScope)(nil).SetPeer), arg0)
+}
+
+// Stat mocks base method.
+func (m *MockConnManagementScope) Stat() network.ScopeStat {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stat")
+ ret0, _ := ret[0].(network.ScopeStat)
+ return ret0
+}
+
+// Stat indicates an expected call of Stat.
+func (mr *MockConnManagementScopeMockRecorder) Stat() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockConnManagementScope)(nil).Stat))
+}
diff --git a/core/network/mocks/mock_peer_scope.go b/core/network/mocks/mock_peer_scope.go
new file mode 100644
index 0000000000..e5d0e70c1f
--- /dev/null
+++ b/core/network/mocks/mock_peer_scope.go
@@ -0,0 +1,111 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/network (interfaces: PeerScope)
+//
+// Generated by this command:
+//
+// mockgen -package mocknetwork -destination mock_peer_scope.go github.com/libp2p/go-libp2p/core/network PeerScope
+//
+
+// Package mocknetwork is a generated GoMock package.
+package mocknetwork
+
+import (
+ reflect "reflect"
+
+ network "github.com/libp2p/go-libp2p/core/network"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockPeerScope is a mock of PeerScope interface.
+type MockPeerScope struct {
+ ctrl *gomock.Controller
+ recorder *MockPeerScopeMockRecorder
+ isgomock struct{}
+}
+
+// MockPeerScopeMockRecorder is the mock recorder for MockPeerScope.
+type MockPeerScopeMockRecorder struct {
+ mock *MockPeerScope
+}
+
+// NewMockPeerScope creates a new mock instance.
+func NewMockPeerScope(ctrl *gomock.Controller) *MockPeerScope {
+ mock := &MockPeerScope{ctrl: ctrl}
+ mock.recorder = &MockPeerScopeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockPeerScope) EXPECT() *MockPeerScopeMockRecorder {
+ return m.recorder
+}
+
+// BeginSpan mocks base method.
+func (m *MockPeerScope) BeginSpan() (network.ResourceScopeSpan, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeginSpan")
+ ret0, _ := ret[0].(network.ResourceScopeSpan)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeginSpan indicates an expected call of BeginSpan.
+func (mr *MockPeerScopeMockRecorder) BeginSpan() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginSpan", reflect.TypeOf((*MockPeerScope)(nil).BeginSpan))
+}
+
+// Peer mocks base method.
+func (m *MockPeerScope) Peer() peer.ID {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Peer")
+ ret0, _ := ret[0].(peer.ID)
+ return ret0
+}
+
+// Peer indicates an expected call of Peer.
+func (mr *MockPeerScopeMockRecorder) Peer() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peer", reflect.TypeOf((*MockPeerScope)(nil).Peer))
+}
+
+// ReleaseMemory mocks base method.
+func (m *MockPeerScope) ReleaseMemory(size int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ReleaseMemory", size)
+}
+
+// ReleaseMemory indicates an expected call of ReleaseMemory.
+func (mr *MockPeerScopeMockRecorder) ReleaseMemory(size any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseMemory", reflect.TypeOf((*MockPeerScope)(nil).ReleaseMemory), size)
+}
+
+// ReserveMemory mocks base method.
+func (m *MockPeerScope) ReserveMemory(size int, prio uint8) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReserveMemory", size, prio)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReserveMemory indicates an expected call of ReserveMemory.
+func (mr *MockPeerScopeMockRecorder) ReserveMemory(size, prio any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveMemory", reflect.TypeOf((*MockPeerScope)(nil).ReserveMemory), size, prio)
+}
+
+// Stat mocks base method.
+func (m *MockPeerScope) Stat() network.ScopeStat {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stat")
+ ret0, _ := ret[0].(network.ScopeStat)
+ return ret0
+}
+
+// Stat indicates an expected call of Stat.
+func (mr *MockPeerScopeMockRecorder) Stat() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockPeerScope)(nil).Stat))
+}
diff --git a/core/network/mocks/mock_protocol_scope.go b/core/network/mocks/mock_protocol_scope.go
new file mode 100644
index 0000000000..237c3daf0a
--- /dev/null
+++ b/core/network/mocks/mock_protocol_scope.go
@@ -0,0 +1,111 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/network (interfaces: ProtocolScope)
+//
+// Generated by this command:
+//
+// mockgen -package mocknetwork -destination mock_protocol_scope.go github.com/libp2p/go-libp2p/core/network ProtocolScope
+//
+
+// Package mocknetwork is a generated GoMock package.
+package mocknetwork
+
+import (
+ reflect "reflect"
+
+ network "github.com/libp2p/go-libp2p/core/network"
+ protocol "github.com/libp2p/go-libp2p/core/protocol"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockProtocolScope is a mock of ProtocolScope interface.
+type MockProtocolScope struct {
+ ctrl *gomock.Controller
+ recorder *MockProtocolScopeMockRecorder
+ isgomock struct{}
+}
+
+// MockProtocolScopeMockRecorder is the mock recorder for MockProtocolScope.
+type MockProtocolScopeMockRecorder struct {
+ mock *MockProtocolScope
+}
+
+// NewMockProtocolScope creates a new mock instance.
+func NewMockProtocolScope(ctrl *gomock.Controller) *MockProtocolScope {
+ mock := &MockProtocolScope{ctrl: ctrl}
+ mock.recorder = &MockProtocolScopeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockProtocolScope) EXPECT() *MockProtocolScopeMockRecorder {
+ return m.recorder
+}
+
+// BeginSpan mocks base method.
+func (m *MockProtocolScope) BeginSpan() (network.ResourceScopeSpan, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeginSpan")
+ ret0, _ := ret[0].(network.ResourceScopeSpan)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeginSpan indicates an expected call of BeginSpan.
+func (mr *MockProtocolScopeMockRecorder) BeginSpan() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginSpan", reflect.TypeOf((*MockProtocolScope)(nil).BeginSpan))
+}
+
+// Protocol mocks base method.
+func (m *MockProtocolScope) Protocol() protocol.ID {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Protocol")
+ ret0, _ := ret[0].(protocol.ID)
+ return ret0
+}
+
+// Protocol indicates an expected call of Protocol.
+func (mr *MockProtocolScopeMockRecorder) Protocol() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Protocol", reflect.TypeOf((*MockProtocolScope)(nil).Protocol))
+}
+
+// ReleaseMemory mocks base method.
+func (m *MockProtocolScope) ReleaseMemory(size int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ReleaseMemory", size)
+}
+
+// ReleaseMemory indicates an expected call of ReleaseMemory.
+func (mr *MockProtocolScopeMockRecorder) ReleaseMemory(size any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseMemory", reflect.TypeOf((*MockProtocolScope)(nil).ReleaseMemory), size)
+}
+
+// ReserveMemory mocks base method.
+func (m *MockProtocolScope) ReserveMemory(size int, prio uint8) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReserveMemory", size, prio)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReserveMemory indicates an expected call of ReserveMemory.
+func (mr *MockProtocolScopeMockRecorder) ReserveMemory(size, prio any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveMemory", reflect.TypeOf((*MockProtocolScope)(nil).ReserveMemory), size, prio)
+}
+
+// Stat mocks base method.
+func (m *MockProtocolScope) Stat() network.ScopeStat {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stat")
+ ret0, _ := ret[0].(network.ScopeStat)
+ return ret0
+}
+
+// Stat indicates an expected call of Stat.
+func (mr *MockProtocolScopeMockRecorder) Stat() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockProtocolScope)(nil).Stat))
+}
diff --git a/core/network/mocks/mock_resource_manager.go b/core/network/mocks/mock_resource_manager.go
new file mode 100644
index 0000000000..db06023a7e
--- /dev/null
+++ b/core/network/mocks/mock_resource_manager.go
@@ -0,0 +1,173 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/network (interfaces: ResourceManager)
+//
+// Generated by this command:
+//
+// mockgen -package mocknetwork -destination mock_resource_manager.go github.com/libp2p/go-libp2p/core/network ResourceManager
+//
+
+// Package mocknetwork is a generated GoMock package.
+package mocknetwork
+
+import (
+ net "net"
+ reflect "reflect"
+
+ network "github.com/libp2p/go-libp2p/core/network"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ protocol "github.com/libp2p/go-libp2p/core/protocol"
+ multiaddr "github.com/multiformats/go-multiaddr"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockResourceManager is a mock of ResourceManager interface.
+type MockResourceManager struct {
+ ctrl *gomock.Controller
+ recorder *MockResourceManagerMockRecorder
+ isgomock struct{}
+}
+
+// MockResourceManagerMockRecorder is the mock recorder for MockResourceManager.
+type MockResourceManagerMockRecorder struct {
+ mock *MockResourceManager
+}
+
+// NewMockResourceManager creates a new mock instance.
+func NewMockResourceManager(ctrl *gomock.Controller) *MockResourceManager {
+ mock := &MockResourceManager{ctrl: ctrl}
+ mock.recorder = &MockResourceManagerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockResourceManager) EXPECT() *MockResourceManagerMockRecorder {
+ return m.recorder
+}
+
+// Close mocks base method.
+func (m *MockResourceManager) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close.
+func (mr *MockResourceManagerMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockResourceManager)(nil).Close))
+}
+
+// OpenConnection mocks base method.
+func (m *MockResourceManager) OpenConnection(dir network.Direction, usefd bool, endpoint multiaddr.Multiaddr) (network.ConnManagementScope, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "OpenConnection", dir, usefd, endpoint)
+ ret0, _ := ret[0].(network.ConnManagementScope)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// OpenConnection indicates an expected call of OpenConnection.
+func (mr *MockResourceManagerMockRecorder) OpenConnection(dir, usefd, endpoint any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenConnection", reflect.TypeOf((*MockResourceManager)(nil).OpenConnection), dir, usefd, endpoint)
+}
+
+// OpenStream mocks base method.
+func (m *MockResourceManager) OpenStream(p peer.ID, dir network.Direction) (network.StreamManagementScope, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "OpenStream", p, dir)
+ ret0, _ := ret[0].(network.StreamManagementScope)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// OpenStream indicates an expected call of OpenStream.
+func (mr *MockResourceManagerMockRecorder) OpenStream(p, dir any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenStream", reflect.TypeOf((*MockResourceManager)(nil).OpenStream), p, dir)
+}
+
+// VerifySourceAddress mocks base method.
+func (m *MockResourceManager) VerifySourceAddress(addr net.Addr) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "VerifySourceAddress", addr)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// VerifySourceAddress indicates an expected call of VerifySourceAddress.
+func (mr *MockResourceManagerMockRecorder) VerifySourceAddress(addr any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifySourceAddress", reflect.TypeOf((*MockResourceManager)(nil).VerifySourceAddress), addr)
+}
+
+// ViewPeer mocks base method.
+func (m *MockResourceManager) ViewPeer(arg0 peer.ID, arg1 func(network.PeerScope) error) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ViewPeer", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ViewPeer indicates an expected call of ViewPeer.
+func (mr *MockResourceManagerMockRecorder) ViewPeer(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewPeer", reflect.TypeOf((*MockResourceManager)(nil).ViewPeer), arg0, arg1)
+}
+
+// ViewProtocol mocks base method.
+func (m *MockResourceManager) ViewProtocol(arg0 protocol.ID, arg1 func(network.ProtocolScope) error) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ViewProtocol", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ViewProtocol indicates an expected call of ViewProtocol.
+func (mr *MockResourceManagerMockRecorder) ViewProtocol(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewProtocol", reflect.TypeOf((*MockResourceManager)(nil).ViewProtocol), arg0, arg1)
+}
+
+// ViewService mocks base method.
+func (m *MockResourceManager) ViewService(arg0 string, arg1 func(network.ServiceScope) error) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ViewService", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ViewService indicates an expected call of ViewService.
+func (mr *MockResourceManagerMockRecorder) ViewService(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewService", reflect.TypeOf((*MockResourceManager)(nil).ViewService), arg0, arg1)
+}
+
+// ViewSystem mocks base method.
+func (m *MockResourceManager) ViewSystem(arg0 func(network.ResourceScope) error) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ViewSystem", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ViewSystem indicates an expected call of ViewSystem.
+func (mr *MockResourceManagerMockRecorder) ViewSystem(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewSystem", reflect.TypeOf((*MockResourceManager)(nil).ViewSystem), arg0)
+}
+
+// ViewTransient mocks base method.
+func (m *MockResourceManager) ViewTransient(arg0 func(network.ResourceScope) error) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ViewTransient", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ViewTransient indicates an expected call of ViewTransient.
+func (mr *MockResourceManagerMockRecorder) ViewTransient(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ViewTransient", reflect.TypeOf((*MockResourceManager)(nil).ViewTransient), arg0)
+}
diff --git a/core/network/mocks/mock_resource_scope_span.go b/core/network/mocks/mock_resource_scope_span.go
new file mode 100644
index 0000000000..0a1db10953
--- /dev/null
+++ b/core/network/mocks/mock_resource_scope_span.go
@@ -0,0 +1,108 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/network (interfaces: ResourceScopeSpan)
+//
+// Generated by this command:
+//
+// mockgen -package mocknetwork -destination mock_resource_scope_span.go github.com/libp2p/go-libp2p/core/network ResourceScopeSpan
+//
+
+// Package mocknetwork is a generated GoMock package.
+package mocknetwork
+
+import (
+ reflect "reflect"
+
+ network "github.com/libp2p/go-libp2p/core/network"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockResourceScopeSpan is a mock of ResourceScopeSpan interface.
+type MockResourceScopeSpan struct {
+ ctrl *gomock.Controller
+ recorder *MockResourceScopeSpanMockRecorder
+ isgomock struct{}
+}
+
+// MockResourceScopeSpanMockRecorder is the mock recorder for MockResourceScopeSpan.
+type MockResourceScopeSpanMockRecorder struct {
+ mock *MockResourceScopeSpan
+}
+
+// NewMockResourceScopeSpan creates a new mock instance.
+func NewMockResourceScopeSpan(ctrl *gomock.Controller) *MockResourceScopeSpan {
+ mock := &MockResourceScopeSpan{ctrl: ctrl}
+ mock.recorder = &MockResourceScopeSpanMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockResourceScopeSpan) EXPECT() *MockResourceScopeSpanMockRecorder {
+ return m.recorder
+}
+
+// BeginSpan mocks base method.
+func (m *MockResourceScopeSpan) BeginSpan() (network.ResourceScopeSpan, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeginSpan")
+ ret0, _ := ret[0].(network.ResourceScopeSpan)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeginSpan indicates an expected call of BeginSpan.
+func (mr *MockResourceScopeSpanMockRecorder) BeginSpan() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginSpan", reflect.TypeOf((*MockResourceScopeSpan)(nil).BeginSpan))
+}
+
+// Done mocks base method.
+func (m *MockResourceScopeSpan) Done() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Done")
+}
+
+// Done indicates an expected call of Done.
+func (mr *MockResourceScopeSpanMockRecorder) Done() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockResourceScopeSpan)(nil).Done))
+}
+
+// ReleaseMemory mocks base method.
+func (m *MockResourceScopeSpan) ReleaseMemory(size int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ReleaseMemory", size)
+}
+
+// ReleaseMemory indicates an expected call of ReleaseMemory.
+func (mr *MockResourceScopeSpanMockRecorder) ReleaseMemory(size any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseMemory", reflect.TypeOf((*MockResourceScopeSpan)(nil).ReleaseMemory), size)
+}
+
+// ReserveMemory mocks base method.
+func (m *MockResourceScopeSpan) ReserveMemory(size int, prio uint8) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReserveMemory", size, prio)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReserveMemory indicates an expected call of ReserveMemory.
+func (mr *MockResourceScopeSpanMockRecorder) ReserveMemory(size, prio any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveMemory", reflect.TypeOf((*MockResourceScopeSpan)(nil).ReserveMemory), size, prio)
+}
+
+// Stat mocks base method.
+func (m *MockResourceScopeSpan) Stat() network.ScopeStat {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stat")
+ ret0, _ := ret[0].(network.ScopeStat)
+ return ret0
+}
+
+// Stat indicates an expected call of Stat.
+func (mr *MockResourceScopeSpanMockRecorder) Stat() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockResourceScopeSpan)(nil).Stat))
+}
diff --git a/core/network/mocks/mock_stream_management_scope.go b/core/network/mocks/mock_stream_management_scope.go
new file mode 100644
index 0000000000..9336cac2f4
--- /dev/null
+++ b/core/network/mocks/mock_stream_management_scope.go
@@ -0,0 +1,179 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/network (interfaces: StreamManagementScope)
+//
+// Generated by this command:
+//
+// mockgen -package mocknetwork -destination mock_stream_management_scope.go github.com/libp2p/go-libp2p/core/network StreamManagementScope
+//
+
+// Package mocknetwork is a generated GoMock package.
+package mocknetwork
+
+import (
+ reflect "reflect"
+
+ network "github.com/libp2p/go-libp2p/core/network"
+ protocol "github.com/libp2p/go-libp2p/core/protocol"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockStreamManagementScope is a mock of StreamManagementScope interface.
+type MockStreamManagementScope struct {
+ ctrl *gomock.Controller
+ recorder *MockStreamManagementScopeMockRecorder
+ isgomock struct{}
+}
+
+// MockStreamManagementScopeMockRecorder is the mock recorder for MockStreamManagementScope.
+type MockStreamManagementScopeMockRecorder struct {
+ mock *MockStreamManagementScope
+}
+
+// NewMockStreamManagementScope creates a new mock instance.
+func NewMockStreamManagementScope(ctrl *gomock.Controller) *MockStreamManagementScope {
+ mock := &MockStreamManagementScope{ctrl: ctrl}
+ mock.recorder = &MockStreamManagementScopeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockStreamManagementScope) EXPECT() *MockStreamManagementScopeMockRecorder {
+ return m.recorder
+}
+
+// BeginSpan mocks base method.
+func (m *MockStreamManagementScope) BeginSpan() (network.ResourceScopeSpan, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BeginSpan")
+ ret0, _ := ret[0].(network.ResourceScopeSpan)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BeginSpan indicates an expected call of BeginSpan.
+func (mr *MockStreamManagementScopeMockRecorder) BeginSpan() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginSpan", reflect.TypeOf((*MockStreamManagementScope)(nil).BeginSpan))
+}
+
+// Done mocks base method.
+func (m *MockStreamManagementScope) Done() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Done")
+}
+
+// Done indicates an expected call of Done.
+func (mr *MockStreamManagementScopeMockRecorder) Done() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Done", reflect.TypeOf((*MockStreamManagementScope)(nil).Done))
+}
+
+// PeerScope mocks base method.
+func (m *MockStreamManagementScope) PeerScope() network.PeerScope {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PeerScope")
+ ret0, _ := ret[0].(network.PeerScope)
+ return ret0
+}
+
+// PeerScope indicates an expected call of PeerScope.
+func (mr *MockStreamManagementScopeMockRecorder) PeerScope() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerScope", reflect.TypeOf((*MockStreamManagementScope)(nil).PeerScope))
+}
+
+// ProtocolScope mocks base method.
+func (m *MockStreamManagementScope) ProtocolScope() network.ProtocolScope {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ProtocolScope")
+ ret0, _ := ret[0].(network.ProtocolScope)
+ return ret0
+}
+
+// ProtocolScope indicates an expected call of ProtocolScope.
+func (mr *MockStreamManagementScopeMockRecorder) ProtocolScope() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProtocolScope", reflect.TypeOf((*MockStreamManagementScope)(nil).ProtocolScope))
+}
+
+// ReleaseMemory mocks base method.
+func (m *MockStreamManagementScope) ReleaseMemory(size int) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ReleaseMemory", size)
+}
+
+// ReleaseMemory indicates an expected call of ReleaseMemory.
+func (mr *MockStreamManagementScopeMockRecorder) ReleaseMemory(size any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseMemory", reflect.TypeOf((*MockStreamManagementScope)(nil).ReleaseMemory), size)
+}
+
+// ReserveMemory mocks base method.
+func (m *MockStreamManagementScope) ReserveMemory(size int, prio uint8) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ReserveMemory", size, prio)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ReserveMemory indicates an expected call of ReserveMemory.
+func (mr *MockStreamManagementScopeMockRecorder) ReserveMemory(size, prio any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReserveMemory", reflect.TypeOf((*MockStreamManagementScope)(nil).ReserveMemory), size, prio)
+}
+
+// ServiceScope mocks base method.
+func (m *MockStreamManagementScope) ServiceScope() network.ServiceScope {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ServiceScope")
+ ret0, _ := ret[0].(network.ServiceScope)
+ return ret0
+}
+
+// ServiceScope indicates an expected call of ServiceScope.
+func (mr *MockStreamManagementScopeMockRecorder) ServiceScope() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceScope", reflect.TypeOf((*MockStreamManagementScope)(nil).ServiceScope))
+}
+
+// SetProtocol mocks base method.
+func (m *MockStreamManagementScope) SetProtocol(proto protocol.ID) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetProtocol", proto)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SetProtocol indicates an expected call of SetProtocol.
+func (mr *MockStreamManagementScopeMockRecorder) SetProtocol(proto any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetProtocol", reflect.TypeOf((*MockStreamManagementScope)(nil).SetProtocol), proto)
+}
+
+// SetService mocks base method.
+func (m *MockStreamManagementScope) SetService(srv string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SetService", srv)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SetService indicates an expected call of SetService.
+func (mr *MockStreamManagementScopeMockRecorder) SetService(srv any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetService", reflect.TypeOf((*MockStreamManagementScope)(nil).SetService), srv)
+}
+
+// Stat mocks base method.
+func (m *MockStreamManagementScope) Stat() network.ScopeStat {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Stat")
+ ret0, _ := ret[0].(network.ScopeStat)
+ return ret0
+}
+
+// Stat indicates an expected call of Stat.
+func (mr *MockStreamManagementScopeMockRecorder) Stat() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockStreamManagementScope)(nil).Stat))
+}
diff --git a/core/network/mocks/network.go b/core/network/mocks/network.go
new file mode 100644
index 0000000000..73025bd288
--- /dev/null
+++ b/core/network/mocks/network.go
@@ -0,0 +1,8 @@
+package mocknetwork
+
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package mocknetwork -destination mock_resource_manager.go github.com/libp2p/go-libp2p/core/network ResourceManager"
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package mocknetwork -destination mock_conn_management_scope.go github.com/libp2p/go-libp2p/core/network ConnManagementScope"
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package mocknetwork -destination mock_stream_management_scope.go github.com/libp2p/go-libp2p/core/network StreamManagementScope"
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package mocknetwork -destination mock_peer_scope.go github.com/libp2p/go-libp2p/core/network PeerScope"
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package mocknetwork -destination mock_protocol_scope.go github.com/libp2p/go-libp2p/core/network ProtocolScope"
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package mocknetwork -destination mock_resource_scope_span.go github.com/libp2p/go-libp2p/core/network ResourceScopeSpan"
diff --git a/core/network/mux.go b/core/network/mux.go
new file mode 100644
index 0000000000..be61ccf62a
--- /dev/null
+++ b/core/network/mux.go
@@ -0,0 +1,148 @@
+package network
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "time"
+)
+
+// ErrReset is returned when reading or writing on a reset stream.
+var ErrReset = errors.New("stream reset")
+
+type StreamErrorCode uint32
+
+type StreamError struct {
+ ErrorCode StreamErrorCode
+ Remote bool
+ TransportError error
+}
+
+func (s *StreamError) Error() string {
+ side := "local"
+ if s.Remote {
+ side = "remote"
+ }
+ if s.TransportError != nil {
+ return fmt.Sprintf("stream reset (%s): code: 0x%x: transport error: %s", side, s.ErrorCode, s.TransportError)
+ }
+ return fmt.Sprintf("stream reset (%s): code: 0x%x", side, s.ErrorCode)
+}
+
+func (s *StreamError) Is(target error) bool {
+ if tse, ok := target.(*StreamError); ok {
+ return tse.ErrorCode == s.ErrorCode && tse.Remote == s.Remote
+ }
+ return false
+}
+
+func (s *StreamError) Unwrap() []error {
+ return []error{ErrReset, s.TransportError}
+}
+
+const (
+ StreamNoError StreamErrorCode = 0
+ StreamProtocolNegotiationFailed StreamErrorCode = 0x1001
+ StreamResourceLimitExceeded StreamErrorCode = 0x1002
+ StreamRateLimited StreamErrorCode = 0x1003
+ StreamProtocolViolation StreamErrorCode = 0x1004
+ StreamSupplanted StreamErrorCode = 0x1005
+ StreamGarbageCollected StreamErrorCode = 0x1006
+ StreamShutdown StreamErrorCode = 0x1007
+ StreamGated StreamErrorCode = 0x1008
+ StreamCodeOutOfRange StreamErrorCode = 0x1009
+)
+
+// MuxedStream is a bidirectional io pipe within a connection.
+type MuxedStream interface {
+ io.Reader
+ io.Writer
+
+ // Close closes the stream.
+ //
+ // * Any buffered data for writing will be flushed.
+ // * Future reads will fail.
+ // * Any in-progress reads/writes will be interrupted.
+ //
+ // Close may be asynchronous and _does not_ guarantee receipt of the
+ // data.
+ //
+ // Close closes the stream for both reading and writing.
+ // Close is equivalent to calling `CloseRead` and `CloseWrite`. Importantly, Close will not wait for any form of acknowledgment.
+ // If acknowledgment is required, the caller must call `CloseWrite`, then wait on the stream for a response (or an EOF),
+ // then call Close() to free the stream object.
+ //
+ // When done with a stream, the user must call either Close() or `Reset()` to discard the stream, even after calling `CloseRead` and/or `CloseWrite`.
+ io.Closer
+
+ // CloseWrite closes the stream for writing but leaves it open for
+ // reading.
+ //
+ // CloseWrite does not free the stream, users must still call Close or
+ // Reset.
+ CloseWrite() error
+
+ // CloseRead closes the stream for reading but leaves it open for
+ // writing.
+ //
+ // When CloseRead is called, all in-progress Read calls are interrupted with a non-EOF error and
+ // no further calls to Read will succeed.
+ //
+ // The handling of new incoming data on the stream after calling this function is implementation defined.
+ //
+ // CloseRead does not free the stream, users must still call Close or
+ // Reset.
+ CloseRead() error
+
+ // Reset closes both ends of the stream. Use this to tell the remote
+ // side to hang up and go away.
+ Reset() error
+
+ // ResetWithError aborts both ends of the stream with `errCode`. `errCode` is sent
+ // to the peer on a best effort basis. For transports that do not support sending
+ // error codes to remote peer, the behavior is identical to calling Reset
+ ResetWithError(errCode StreamErrorCode) error
+
+ SetDeadline(time.Time) error
+ SetReadDeadline(time.Time) error
+ SetWriteDeadline(time.Time) error
+}
+
+// MuxedConn represents a connection to a remote peer that has been
+// extended to support stream multiplexing.
+//
+// A MuxedConn allows a single net.Conn connection to carry many logically
+// independent bidirectional streams of binary data.
+//
+// Together with network.ConnSecurity, MuxedConn is a component of the
+// transport.CapableConn interface, which represents a "raw" network
+// connection that has been "upgraded" to support the libp2p capabilities
+// of secure communication and stream multiplexing.
+type MuxedConn interface {
+ // Close closes the stream muxer and the the underlying net.Conn.
+ io.Closer
+
+ // CloseWithError closes the connection with errCode. The errCode is sent
+ // to the peer.
+ CloseWithError(errCode ConnErrorCode) error
+
+ // IsClosed returns whether a connection is fully closed, so it can
+ // be garbage collected.
+ IsClosed() bool
+
+ // OpenStream creates a new stream.
+ OpenStream(context.Context) (MuxedStream, error)
+
+ // AcceptStream accepts a stream opened by the other side.
+ AcceptStream() (MuxedStream, error)
+}
+
+// Multiplexer wraps a net.Conn with a stream multiplexing
+// implementation and returns a MuxedConn that supports opening
+// multiple streams over the underlying net.Conn
+type Multiplexer interface {
+ // NewConn constructs a new connection
+ NewConn(c net.Conn, isServer bool, scope PeerScope) (MuxedConn, error)
+}
diff --git a/core/network/nattype.go b/core/network/nattype.go
new file mode 100644
index 0000000000..ab62886afb
--- /dev/null
+++ b/core/network/nattype.go
@@ -0,0 +1,77 @@
+package network
+
+// NATDeviceType indicates the type of the NAT device.
+type NATDeviceType int
+
+const (
+ // NATDeviceTypeUnknown indicates that the type of the NAT device is unknown.
+ NATDeviceTypeUnknown NATDeviceType = iota
+
+ // NATDeviceTypeEndpointIndependent is a NAT device that maps addresses
+ // independent of the destination address. An EndpointIndependent NAT is
+ // a NAT where all outgoing connections from the same source IP address
+ // and port are mapped by the NAT device to the same IP address and port
+ // irrespective of the destination endpoint.
+ //
+ // NAT traversal with hole punching is possible with an
+ // EndpointIndependent NAT ONLY if the remote peer is ALSO behind an
+ // EndpointIndependent NAT. If the remote peer is behind an
+ // EndpointDependent NAT, hole punching will fail.
+ NATDeviceTypeEndpointIndependent
+
+ // NATDeviceTypeEndpointDependent is a NAT device that maps addresses
+ // depending on the destination address. An EndpointDependent NAT maps
+ // outgoing connections with different destination addresses to
+ // different IP addresses and ports, even if they originate from the
+ // same source IP address and port.
+ //
+ // NAT traversal with hole-punching is currently NOT possible in libp2p
+ // with EndpointDependent NATs irrespective of the remote peer's NAT
+ // type.
+ NATDeviceTypeEndpointDependent
+)
+
+const (
+ // NATDeviceTypeCone is the same as endpoint independent
+ //
+ // Deprecated: Use NATDeviceTypeEndpointIndependent
+ NATDeviceTypeCone = NATDeviceTypeEndpointIndependent
+ // NATDeviceTypeSymmetric is the same as endpoint dependent
+ //
+ // Deprecated: Use NATDeviceTypeEndpointDependent
+ NATDeviceTypeSymmetric = NATDeviceTypeEndpointDependent
+)
+
+func (r NATDeviceType) String() string {
+ switch r {
+ case 0:
+ return "Unknown"
+ case 1:
+ return "Endpoint Independent"
+ case 2:
+ return "Endpoint Dependent"
+ default:
+ return "unrecognized"
+ }
+}
+
+// NATTransportProtocol is the transport protocol for which the NAT Device Type has been determined.
+type NATTransportProtocol int
+
+const (
+ // NATTransportUDP means that the NAT Device Type has been determined for the UDP Protocol.
+ NATTransportUDP NATTransportProtocol = iota
+ // NATTransportTCP means that the NAT Device Type has been determined for the TCP Protocol.
+ NATTransportTCP
+)
+
+func (n NATTransportProtocol) String() string {
+ switch n {
+ case 0:
+ return "UDP"
+ case 1:
+ return "TCP"
+ default:
+ return "unrecognized"
+ }
+}
diff --git a/core/network/network.go b/core/network/network.go
new file mode 100644
index 0000000000..7c0ad949e2
--- /dev/null
+++ b/core/network/network.go
@@ -0,0 +1,218 @@
+// Package network provides core networking abstractions for libp2p.
+//
+// The network package provides the high-level Network interface for interacting
+// with other libp2p peers, which is the primary public API for initiating and
+// accepting connections to remote peers.
+package network
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// MessageSizeMax is a soft (recommended) maximum for network messages.
+// One can write more, as the interface is a stream. But it is useful
+// to bunch it up into multiple read/writes when the whole message is
+// a single, large serialized object.
+const MessageSizeMax = 1 << 22 // 4 MB
+
+// Direction represents which peer in a stream initiated a connection.
+type Direction int
+
+const (
+ // DirUnknown is the default direction.
+ DirUnknown Direction = iota
+ // DirInbound is for when the remote peer initiated a connection.
+ DirInbound
+ // DirOutbound is for when the local peer initiated a connection.
+ DirOutbound
+)
+
+const unrecognized = "(unrecognized)"
+
+func (d Direction) String() string {
+ str := [...]string{"Unknown", "Inbound", "Outbound"}
+ if d < 0 || int(d) >= len(str) {
+ return unrecognized
+ }
+ return str[d]
+}
+
+// Connectedness signals the capacity for a connection with a given node.
+// It is used to signal to services and other peers whether a node is reachable.
+type Connectedness int
+
+const (
+ // NotConnected means no connection to peer, and no extra information (default)
+ NotConnected Connectedness = iota
+
+ // Connected means has an open, live connection to peer
+ Connected
+
+ // Deprecated: CanConnect is deprecated and will be removed in a future release.
+ //
+ // CanConnect means recently connected to peer, terminated gracefully
+ CanConnect
+
+ // Deprecated: CannotConnect is deprecated and will be removed in a future release.
+ //
+ // CannotConnect means recently attempted connecting but failed to connect.
+ // (should signal "made effort, failed")
+ CannotConnect
+
+ // Limited means we have a transient connection to the peer, but aren't fully connected.
+ Limited
+)
+
+func (c Connectedness) String() string {
+ str := [...]string{"NotConnected", "Connected", "CanConnect", "CannotConnect", "Limited"}
+ if c < 0 || int(c) >= len(str) {
+ return unrecognized
+ }
+ return str[c]
+}
+
+// Reachability indicates how reachable a node is.
+type Reachability int
+
+const (
+ // ReachabilityUnknown indicates that the reachability status of the
+ // node is unknown.
+ ReachabilityUnknown Reachability = iota
+
+ // ReachabilityPublic indicates that the node is reachable from the
+ // public internet.
+ ReachabilityPublic
+
+ // ReachabilityPrivate indicates that the node is not reachable from the
+ // public internet.
+ //
+ // NOTE: This node may _still_ be reachable via relays.
+ ReachabilityPrivate
+)
+
+func (r Reachability) String() string {
+ str := [...]string{"Unknown", "Public", "Private"}
+ if r < 0 || int(r) >= len(str) {
+ return unrecognized
+ }
+ return str[r]
+}
+
+// ConnStats stores metadata pertaining to a given Conn.
+type ConnStats struct {
+ Stats
+ // NumStreams is the number of streams on the connection.
+ NumStreams int
+}
+
+// Stats stores metadata pertaining to a given Stream / Conn.
+type Stats struct {
+ // Direction specifies whether this is an inbound or an outbound connection.
+ Direction Direction
+ // Opened is the timestamp when this connection was opened.
+ Opened time.Time
+ // Limited indicates that this connection is Limited. It maybe limited by
+ // bytes or time. In practice, this is a connection formed over a circuit v2
+ // relay.
+ Limited bool
+ // Extra stores additional metadata about this connection.
+ Extra map[interface{}]interface{}
+}
+
+// StreamHandler is the type of function used to listen for
+// streams opened by the remote side.
+type StreamHandler func(Stream)
+
+// Network is the interface used to connect to the outside world.
+// It dials and listens for connections. it uses a Swarm to pool
+// connections (see swarm pkg, and peerstream.Swarm). Connections
+// are encrypted with a TLS-like protocol.
+type Network interface {
+ Dialer
+ io.Closer
+
+ // SetStreamHandler sets the handler for new streams opened by the
+ // remote side. This operation is thread-safe.
+ SetStreamHandler(StreamHandler)
+
+ // NewStream returns a new stream to given peer p.
+ // If there is no connection to p, attempts to create one.
+ NewStream(context.Context, peer.ID) (Stream, error)
+
+ // Listen tells the network to start listening on given multiaddrs.
+ Listen(...ma.Multiaddr) error
+
+ // ListenAddresses returns a list of addresses at which this network listens.
+ ListenAddresses() []ma.Multiaddr
+
+ // InterfaceListenAddresses returns a list of addresses at which this network
+ // listens. It expands "any interface" addresses (/ip4/0.0.0.0, /ip6/::) to
+ // use the known local interfaces.
+ InterfaceListenAddresses() ([]ma.Multiaddr, error)
+
+ // ResourceManager returns the ResourceManager associated with this network
+ ResourceManager() ResourceManager
+}
+
+type MultiaddrDNSResolver interface {
+ // ResolveDNSAddr resolves the first /dnsaddr component in a multiaddr.
+ // Recurisvely resolves DNSADDRs up to the recursion limit
+ ResolveDNSAddr(ctx context.Context, expectedPeerID peer.ID, maddr ma.Multiaddr, recursionLimit, outputLimit int) ([]ma.Multiaddr, error)
+ // ResolveDNSComponent resolves the first /{dns,dns4,dns6} component in a multiaddr.
+ ResolveDNSComponent(ctx context.Context, maddr ma.Multiaddr, outputLimit int) ([]ma.Multiaddr, error)
+}
+
+// Dialer represents a service that can dial out to peers
+// (this is usually just a Network, but other services may not need the whole
+// stack, and thus it becomes easier to mock)
+type Dialer interface {
+ // Peerstore returns the internal peerstore
+ // This is useful to tell the dialer about a new address for a peer.
+ // Or use one of the public keys found out over the network.
+ Peerstore() peerstore.Peerstore
+
+ // LocalPeer returns the local peer associated with this network
+ LocalPeer() peer.ID
+
+ // DialPeer establishes a connection to a given peer
+ DialPeer(context.Context, peer.ID) (Conn, error)
+
+ // ClosePeer closes the connection to a given peer
+ ClosePeer(peer.ID) error
+
+ // Connectedness returns a state signaling connection capabilities
+ Connectedness(peer.ID) Connectedness
+
+ // Peers returns the peers connected
+ Peers() []peer.ID
+
+ // Conns returns the connections in this Network
+ Conns() []Conn
+
+ // ConnsToPeer returns the connections in this Network for given peer.
+ ConnsToPeer(p peer.ID) []Conn
+
+ // Notify/StopNotify register and unregister a notifiee for signals
+ Notify(Notifiee)
+ StopNotify(Notifiee)
+
+ // CanDial returns whether the dialer can dial peer p at addr
+ CanDial(p peer.ID, addr ma.Multiaddr) bool
+}
+
+// AddrDelay provides an address along with the delay after which the address
+// should be dialed
+type AddrDelay struct {
+ Addr ma.Multiaddr
+ Delay time.Duration
+}
+
+// DialRanker provides a schedule of dialing the provided addresses
+type DialRanker func([]ma.Multiaddr) []AddrDelay
diff --git a/core/network/notifee.go b/core/network/notifee.go
new file mode 100644
index 0000000000..525190e59a
--- /dev/null
+++ b/core/network/notifee.go
@@ -0,0 +1,67 @@
+package network
+
+import (
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// Notifiee is an interface for an object wishing to receive
+// notifications from a Network.
+type Notifiee interface {
+ Listen(Network, ma.Multiaddr) // called when network starts listening on an addr
+ ListenClose(Network, ma.Multiaddr) // called when network stops listening on an addr
+ Connected(Network, Conn) // called when a connection opened
+ Disconnected(Network, Conn) // called when a connection closed
+}
+
+// NotifyBundle implements Notifiee by calling any of the functions set on it,
+// and nop'ing if they are unset. This is the easy way to register for
+// notifications.
+type NotifyBundle struct {
+ ListenF func(Network, ma.Multiaddr)
+ ListenCloseF func(Network, ma.Multiaddr)
+
+ ConnectedF func(Network, Conn)
+ DisconnectedF func(Network, Conn)
+}
+
+var _ Notifiee = (*NotifyBundle)(nil)
+
+// Listen calls ListenF if it is not null.
+func (nb *NotifyBundle) Listen(n Network, a ma.Multiaddr) {
+ if nb.ListenF != nil {
+ nb.ListenF(n, a)
+ }
+}
+
+// ListenClose calls ListenCloseF if it is not null.
+func (nb *NotifyBundle) ListenClose(n Network, a ma.Multiaddr) {
+ if nb.ListenCloseF != nil {
+ nb.ListenCloseF(n, a)
+ }
+}
+
+// Connected calls ConnectedF if it is not null.
+func (nb *NotifyBundle) Connected(n Network, c Conn) {
+ if nb.ConnectedF != nil {
+ nb.ConnectedF(n, c)
+ }
+}
+
+// Disconnected calls DisconnectedF if it is not null.
+func (nb *NotifyBundle) Disconnected(n Network, c Conn) {
+ if nb.DisconnectedF != nil {
+ nb.DisconnectedF(n, c)
+ }
+}
+
+// Global noop notifiee. Do not change.
+var GlobalNoopNotifiee = &NoopNotifiee{}
+
+type NoopNotifiee struct{}
+
+var _ Notifiee = (*NoopNotifiee)(nil)
+
+func (nn *NoopNotifiee) Connected(_ Network, _ Conn) {}
+func (nn *NoopNotifiee) Disconnected(_ Network, _ Conn) {}
+func (nn *NoopNotifiee) Listen(_ Network, _ ma.Multiaddr) {}
+func (nn *NoopNotifiee) ListenClose(_ Network, _ ma.Multiaddr) {}
diff --git a/core/network/notifee_test.go b/core/network/notifee_test.go
new file mode 100644
index 0000000000..12090d1f19
--- /dev/null
+++ b/core/network/notifee_test.go
@@ -0,0 +1,87 @@
+package network
+
+import (
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestListen(T *testing.T) {
+ var notifee NotifyBundle
+ addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
+ if err != nil {
+ T.Fatal("unexpected multiaddr error")
+ }
+ notifee.Listen(nil, addr)
+
+ called := false
+ notifee.ListenF = func(Network, ma.Multiaddr) {
+ called = true
+ }
+ if called {
+ T.Fatal("called should be false")
+ }
+
+ notifee.Listen(nil, addr)
+ if !called {
+ T.Fatal("Listen should have been called")
+ }
+}
+
+func TestListenClose(T *testing.T) {
+ var notifee NotifyBundle
+ addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
+ if err != nil {
+ T.Fatal("unexpected multiaddr error")
+ }
+ notifee.ListenClose(nil, addr)
+
+ called := false
+ notifee.ListenCloseF = func(Network, ma.Multiaddr) {
+ called = true
+ }
+ if called {
+ T.Fatal("called should be false")
+ }
+
+ notifee.ListenClose(nil, addr)
+ if !called {
+ T.Fatal("ListenClose should have been called")
+ }
+}
+
+func TestConnected(T *testing.T) {
+ var notifee NotifyBundle
+ notifee.Connected(nil, nil)
+
+ called := false
+ notifee.ConnectedF = func(Network, Conn) {
+ called = true
+ }
+ if called {
+ T.Fatal("called should be false")
+ }
+
+ notifee.Connected(nil, nil)
+ if !called {
+ T.Fatal("Connected should have been called")
+ }
+}
+
+func TestDisconnected(T *testing.T) {
+ var notifee NotifyBundle
+ notifee.Disconnected(nil, nil)
+
+ called := false
+ notifee.DisconnectedF = func(Network, Conn) {
+ called = true
+ }
+ if called {
+ T.Fatal("called should be false")
+ }
+
+ notifee.Disconnected(nil, nil)
+ if !called {
+ T.Fatal("Disconnected should have been called")
+ }
+}
diff --git a/core/network/rcmgr.go b/core/network/rcmgr.go
new file mode 100644
index 0000000000..83bbf4f1f4
--- /dev/null
+++ b/core/network/rcmgr.go
@@ -0,0 +1,360 @@
+package network
+
+import (
+ "context"
+ "errors"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/multiformats/go-multiaddr"
+)
+
+// ResourceManager is the interface to the network resource management subsystem.
+// The ResourceManager tracks and accounts for resource usage in the stack, from the internals
+// to the application, and provides a mechanism to limit resource usage according to a user
+// configurable policy.
+//
+// Resource Management through the ResourceManager is based on the concept of Resource
+// Management Scopes, whereby resource usage is constrained by a DAG of scopes,
+// The following diagram illustrates the structure of the resource constraint DAG:
+// System
+//
+// +------------> Transient.............+................+
+// | . .
+// +------------> Service------------- . ----------+ .
+// | . | .
+// +-------------> Protocol----------- . ----------+ .
+// | . | .
+// +--------------> Peer \ | .
+// +------------> Connection | .
+// | \ \
+// +---------------------------> Stream
+//
+// The basic resources accounted by the ResourceManager include memory, streams, connections,
+// and file descriptors. These account for both space and time used by
+// the stack, as each resource has a direct effect on the system
+// availability and performance.
+//
+// The modus operandi of the resource manager is to restrict resource usage at the time of
+// reservation. When a component of the stack needs to use a resource, it reserves it in the
+// appropriate scope. The resource manager gates the reservation against the scope applicable
+// limits; if the limit is exceeded, then an error (wrapping ErrResourceLimitExceeded) and it
+// is up the component to act accordingly. At the lower levels of the stack, this will normally
+// signal a failure of some sorts, like failing to opening a stream or a connection, which will
+// propagate to the programmer. Some components may be able to handle resource reservation failure
+// more gracefully; for instance a muxer trying to grow a buffer for a window change, will simply
+// retain the existing window size and continue to operate normally albeit with some degraded
+// throughput.
+// All resources reserved in some scope are released when the scope is closed. For low level
+// scopes, mainly Connection and Stream scopes, this happens when the connection or stream is
+// closed.
+//
+// Service programmers will typically use the resource manager to reserve memory
+// for their subsystem.
+// This happens with two avenues: the programmer can attach a stream to a service, whereby
+// resources reserved by the stream are automatically accounted in the service budget; or the
+// programmer may directly interact with the service scope, by using ViewService through the
+// resource manager interface.
+//
+// Application programmers can also directly reserve memory in some applicable scope. In order
+// to facilitate control flow delimited resource accounting, all scopes defined in the system
+// allow for the user to create spans. Spans are temporary scopes rooted at some
+// other scope and release their resources when the programmer is done with them. Span
+// scopes can form trees, with nested spans.
+//
+// Typical Usage:
+// - Low level components of the system (transports, muxers) all have access to the resource
+// manager and create connection and stream scopes through it. These scopes are accessible
+// to the user, albeit with a narrower interface, through Conn and Stream objects who have
+// a Scope method.
+// - Services typically center around streams, where the programmer can attach streams to a
+// particular service. They can also directly reserve memory for a service by accessing the
+// service scope using the ResourceManager interface.
+// - Applications that want to account for their network resource usage can reserve memory,
+// typically using a span, directly in the System or a Service scope; they can also
+// opt to use appropriate stream scopes for streams that they create or own.
+//
+// User Serviceable Parts: the user has the option to specify their own implementation of the
+// interface. We provide a canonical implementation in the go-libp2p-resource-manager package.
+// The user of that package can specify limits for the various scopes, which can be static
+// or dynamic.
+//
+// WARNING The ResourceManager interface is considered experimental and subject to change
+// in subsequent releases.
+type ResourceManager interface {
+ ResourceScopeViewer
+
+ // OpenConnection creates a new connection scope not yet associated with any peer; the connection
+ // is scoped at the transient scope.
+ // The caller owns the returned scope and is responsible for calling Done in order to signify
+ // the end of the scope's span.
+ OpenConnection(dir Direction, usefd bool, endpoint multiaddr.Multiaddr) (ConnManagementScope, error)
+
+ // VerifySourceAddress tells the transport to verify the source address for an incoming connection
+ // before gating the connection with OpenConnection.
+ VerifySourceAddress(addr net.Addr) bool
+
+ // OpenStream creates a new stream scope, initially unnegotiated.
+ // An unnegotiated stream will be initially unattached to any protocol scope
+ // and constrained by the transient scope.
+ // The caller owns the returned scope and is responsible for calling Done in order to signify
+ // the end of th scope's span.
+ OpenStream(p peer.ID, dir Direction) (StreamManagementScope, error)
+
+ // Close closes the resource manager
+ Close() error
+}
+
+// ResourceScopeViewer is a mixin interface providing view methods for accessing top level
+// scopes.
+type ResourceScopeViewer interface {
+ // ViewSystem views the system-wide resource scope.
+ // The system scope is the top level scope that accounts for global
+ // resource usage at all levels of the system. This scope constrains all
+ // other scopes and institutes global hard limits.
+ ViewSystem(func(ResourceScope) error) error
+
+ // ViewTransient views the transient (DMZ) resource scope.
+ // The transient scope accounts for resources that are in the process of
+ // full establishment. For instance, a new connection prior to the
+ // handshake does not belong to any peer, but it still needs to be
+ // constrained as this opens an avenue for attacks in transient resource
+ // usage. Similarly, a stream that has not negotiated a protocol yet is
+ // constrained by the transient scope.
+ ViewTransient(func(ResourceScope) error) error
+
+ // ViewService retrieves a service-specific scope.
+ ViewService(string, func(ServiceScope) error) error
+
+ // ViewProtocol views the resource management scope for a specific protocol.
+ ViewProtocol(protocol.ID, func(ProtocolScope) error) error
+
+ // ViewPeer views the resource management scope for a specific peer.
+ ViewPeer(peer.ID, func(PeerScope) error) error
+}
+
+const (
+ // ReservationPriorityLow is a reservation priority that indicates a reservation if the scope
+ // memory utilization is at 40% or less.
+ ReservationPriorityLow uint8 = 101
+ // Reservation PriorityMedium is a reservation priority that indicates a reservation if the scope
+ // memory utilization is at 60% or less.
+ ReservationPriorityMedium uint8 = 152
+ // ReservationPriorityHigh is a reservation priority that indicates a reservation if the scope
+ // memory utilization is at 80% or less.
+ ReservationPriorityHigh uint8 = 203
+ // ReservationPriorityAlways is a reservation priority that indicates a reservation if there is
+ // enough memory, regardless of scope utilization.
+ ReservationPriorityAlways uint8 = 255
+)
+
+// ResourceScope is the interface for all scopes.
+type ResourceScope interface {
+ // ReserveMemory reserves memory/buffer space in the scope; the unit is bytes.
+ //
+ // If ReserveMemory returns an error, then no memory was reserved and the caller should handle
+ // the failure condition.
+ //
+ // The priority argument indicates the priority of the memory reservation. A reservation
+ // will fail if the available memory is less than (1+prio)/256 of the scope limit, providing
+ // a mechanism to gracefully handle optional reservations that might overload the system.
+ // For instance, a muxer growing a window buffer will use a low priority and only grow the buffer
+ // if there is no memory pressure in the system.
+ //
+ // There are 4 predefined priority levels, Low, Medium, High and Always,
+ // capturing common patterns, but the user is free to use any granularity applicable to his case.
+ ReserveMemory(size int, prio uint8) error
+
+ // ReleaseMemory explicitly releases memory previously reserved with ReserveMemory
+ ReleaseMemory(size int)
+
+ // Stat retrieves current resource usage for the scope.
+ Stat() ScopeStat
+
+ // BeginSpan creates a new span scope rooted at this scope
+ BeginSpan() (ResourceScopeSpan, error)
+}
+
+// ResourceScopeSpan is a ResourceScope with a delimited span.
+// Span scopes are control flow delimited and release all their associated resources
+// when the programmer calls Done.
+//
+// Example:
+//
+// s, err := someScope.BeginSpan()
+// if err != nil { ... }
+// defer s.Done()
+//
+// if err := s.ReserveMemory(...); err != nil { ... }
+// // ... use memory
+type ResourceScopeSpan interface {
+ ResourceScope
+ // Done ends the span and releases associated resources.
+ Done()
+}
+
+// ServiceScope is the interface for service resource scopes
+type ServiceScope interface {
+ ResourceScope
+
+ // Name returns the name of this service
+ Name() string
+}
+
+// ProtocolScope is the interface for protocol resource scopes.
+type ProtocolScope interface {
+ ResourceScope
+
+ // Protocol returns the protocol for this scope
+ Protocol() protocol.ID
+}
+
+// PeerScope is the interface for peer resource scopes.
+type PeerScope interface {
+ ResourceScope
+
+ // Peer returns the peer ID for this scope
+ Peer() peer.ID
+}
+
+// ConnManagementScope is the low level interface for connection resource scopes.
+// This interface is used by the low level components of the system who create and own
+// the span of a connection scope.
+type ConnManagementScope interface {
+ ResourceScopeSpan
+
+ // PeerScope returns the peer scope associated with this connection.
+ // It returns nil if the connection is not yet associated with any peer.
+ PeerScope() PeerScope
+
+ // SetPeer sets the peer for a previously unassociated connection
+ SetPeer(peer.ID) error
+}
+
+// ConnScope is the user view of a connection scope
+type ConnScope interface {
+ ResourceScope
+}
+
+// StreamManagementScope is the interface for stream resource scopes.
+// This interface is used by the low level components of the system who create and own
+// the span of a stream scope.
+type StreamManagementScope interface {
+ ResourceScopeSpan
+
+ // ProtocolScope returns the protocol resource scope associated with this stream.
+ // It returns nil if the stream is not associated with any protocol scope.
+ ProtocolScope() ProtocolScope
+ // SetProtocol sets the protocol for a previously unnegotiated stream
+ SetProtocol(proto protocol.ID) error
+
+ // ServiceScope returns the service owning the stream, if any.
+ ServiceScope() ServiceScope
+ // SetService sets the service owning this stream.
+ SetService(srv string) error
+
+ // PeerScope returns the peer resource scope associated with this stream.
+ PeerScope() PeerScope
+}
+
+// StreamScope is the user view of a StreamScope.
+type StreamScope interface {
+ ResourceScope
+
+ // SetService sets the service owning this stream.
+ SetService(srv string) error
+}
+
+// ScopeStat is a struct containing resource accounting information.
+type ScopeStat struct {
+ NumStreamsInbound int
+ NumStreamsOutbound int
+ NumConnsInbound int
+ NumConnsOutbound int
+ NumFD int
+
+ Memory int64
+}
+
+// connManagementScopeKey is the key to store Scope in contexts
+type connManagementScopeKey struct{}
+
+func WithConnManagementScope(ctx context.Context, scope ConnManagementScope) context.Context {
+ return context.WithValue(ctx, connManagementScopeKey{}, scope)
+}
+
+func UnwrapConnManagementScope(ctx context.Context) (ConnManagementScope, error) {
+ v := ctx.Value(connManagementScopeKey{})
+ if v == nil {
+ return nil, errors.New("context has no ConnManagementScope")
+ }
+ scope, ok := v.(ConnManagementScope)
+ if !ok {
+ return nil, errors.New("context has no ConnManagementScope")
+ }
+ return scope, nil
+}
+
+// NullResourceManager is a stub for tests and initialization of default values
+type NullResourceManager struct{}
+
+var _ ResourceManager = (*NullResourceManager)(nil)
+
+var _ ResourceScope = (*NullScope)(nil)
+var _ ResourceScopeSpan = (*NullScope)(nil)
+var _ ServiceScope = (*NullScope)(nil)
+var _ ProtocolScope = (*NullScope)(nil)
+var _ PeerScope = (*NullScope)(nil)
+var _ ConnManagementScope = (*NullScope)(nil)
+var _ ConnScope = (*NullScope)(nil)
+var _ StreamManagementScope = (*NullScope)(nil)
+var _ StreamScope = (*NullScope)(nil)
+
+// NullScope is a stub for tests and initialization of default values
+type NullScope struct{}
+
+func (n *NullResourceManager) ViewSystem(f func(ResourceScope) error) error {
+ return f(&NullScope{})
+}
+func (n *NullResourceManager) ViewTransient(f func(ResourceScope) error) error {
+ return f(&NullScope{})
+}
+func (n *NullResourceManager) ViewService(_ string, f func(ServiceScope) error) error {
+ return f(&NullScope{})
+}
+func (n *NullResourceManager) ViewProtocol(_ protocol.ID, f func(ProtocolScope) error) error {
+ return f(&NullScope{})
+}
+func (n *NullResourceManager) ViewPeer(_ peer.ID, f func(PeerScope) error) error {
+ return f(&NullScope{})
+}
+func (n *NullResourceManager) OpenConnection(_ Direction, _ bool, _ multiaddr.Multiaddr) (ConnManagementScope, error) {
+ return &NullScope{}, nil
+}
+func (n *NullResourceManager) OpenStream(_ peer.ID, _ Direction) (StreamManagementScope, error) {
+ return &NullScope{}, nil
+}
+func (*NullResourceManager) VerifySourceAddress(_ net.Addr) bool {
+ return false
+}
+
+func (n *NullResourceManager) Close() error {
+ return nil
+}
+
+func (n *NullScope) ReserveMemory(_ int, _ uint8) error { return nil }
+func (n *NullScope) ReleaseMemory(_ int) {}
+func (n *NullScope) Stat() ScopeStat { return ScopeStat{} }
+func (n *NullScope) BeginSpan() (ResourceScopeSpan, error) { return &NullScope{}, nil }
+func (n *NullScope) Done() {}
+func (n *NullScope) Name() string { return "" }
+func (n *NullScope) Protocol() protocol.ID { return "" }
+func (n *NullScope) Peer() peer.ID { return "" }
+func (n *NullScope) PeerScope() PeerScope { return &NullScope{} }
+func (n *NullScope) SetPeer(peer.ID) error { return nil }
+func (n *NullScope) ProtocolScope() ProtocolScope { return &NullScope{} }
+func (n *NullScope) SetProtocol(_ protocol.ID) error { return nil }
+func (n *NullScope) ServiceScope() ServiceScope { return &NullScope{} }
+func (n *NullScope) SetService(_ string) error { return nil }
+func (n *NullScope) VerifySourceAddress(_ net.Addr) bool { return false }
diff --git a/core/network/stream.go b/core/network/stream.go
new file mode 100644
index 0000000000..f2b6cbcb88
--- /dev/null
+++ b/core/network/stream.go
@@ -0,0 +1,34 @@
+package network
+
+import (
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// Stream represents a bidirectional channel between two agents in
+// a libp2p network. "agent" is as granular as desired, potentially
+// being a "request -> reply" pair, or whole protocols.
+//
+// Streams are backed by a multiplexer underneath the hood.
+type Stream interface {
+ MuxedStream
+
+ // ID returns an identifier that uniquely identifies this Stream within this
+ // host, during this run. Stream IDs may repeat across restarts.
+ ID() string
+
+ Protocol() protocol.ID
+ SetProtocol(id protocol.ID) error
+
+ // Stat returns metadata pertaining to this stream.
+ Stat() Stats
+
+ // Conn returns the connection this stream is part of.
+ Conn() Conn
+
+ // Scope returns the user's view of this stream's resource scope
+ Scope() StreamScope
+
+ // ResetWithError closes both ends of the stream with errCode. The errCode is sent
+ // to the peer.
+ ResetWithError(errCode StreamErrorCode) error
+}
diff --git a/core/peer/addrinfo.go b/core/peer/addrinfo.go
new file mode 100644
index 0000000000..0c0d34fdae
--- /dev/null
+++ b/core/peer/addrinfo.go
@@ -0,0 +1,135 @@
+package peer
+
+import (
+ "fmt"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// AddrInfo is a small struct used to pass around a peer with
+// a set of addresses (and later, keys?).
+type AddrInfo struct {
+ ID ID
+ Addrs []ma.Multiaddr
+}
+
+var _ fmt.Stringer = AddrInfo{}
+
+func (pi AddrInfo) String() string {
+ return fmt.Sprintf("{%v: %v}", pi.ID, pi.Addrs)
+}
+
+var ErrInvalidAddr = fmt.Errorf("invalid p2p multiaddr")
+
+// AddrInfosFromP2pAddrs converts a set of Multiaddrs to a set of AddrInfos.
+func AddrInfosFromP2pAddrs(maddrs ...ma.Multiaddr) ([]AddrInfo, error) {
+ m := make(map[ID][]ma.Multiaddr)
+ for _, maddr := range maddrs {
+ transport, id := SplitAddr(maddr)
+ if id == "" {
+ return nil, ErrInvalidAddr
+ }
+ if transport == nil {
+ if _, ok := m[id]; !ok {
+ m[id] = nil
+ }
+ } else {
+ m[id] = append(m[id], transport)
+ }
+ }
+ ais := make([]AddrInfo, 0, len(m))
+ for id, maddrs := range m {
+ ais = append(ais, AddrInfo{ID: id, Addrs: maddrs})
+ }
+ return ais, nil
+}
+
+// SplitAddr splits a p2p Multiaddr into a transport multiaddr and a peer ID.
+//
+// * Returns a nil transport if the address only contains a /p2p part.
+// * Returns an empty peer ID if the address doesn't contain a /p2p part.
+func SplitAddr(m ma.Multiaddr) (transport ma.Multiaddr, id ID) {
+ if m == nil {
+ return nil, ""
+ }
+
+ transport, p2ppart := ma.SplitLast(m)
+ if p2ppart == nil || p2ppart.Protocol().Code != ma.P_P2P {
+ return m, ""
+ }
+ id = ID(p2ppart.RawValue()) // already validated by the multiaddr library.
+ return transport, id
+}
+
+// IDFromP2PAddr extracts the peer ID from a p2p Multiaddr
+func IDFromP2PAddr(m ma.Multiaddr) (ID, error) {
+ if m == nil {
+ return "", ErrInvalidAddr
+ }
+ var lastComponent ma.Component
+ ma.ForEach(m, func(c ma.Component) bool {
+ lastComponent = c
+ return true
+ })
+ if lastComponent.Protocol().Code != ma.P_P2P {
+ return "", ErrInvalidAddr
+ }
+
+ id := ID(lastComponent.RawValue()) // already validated by the multiaddr library.
+ return id, nil
+}
+
+// AddrInfoFromString builds an AddrInfo from the string representation of a Multiaddr
+func AddrInfoFromString(s string) (*AddrInfo, error) {
+ a, err := ma.NewMultiaddr(s)
+ if err != nil {
+ return nil, err
+ }
+
+ return AddrInfoFromP2pAddr(a)
+}
+
+// AddrInfoFromP2pAddr converts a Multiaddr to an AddrInfo.
+func AddrInfoFromP2pAddr(m ma.Multiaddr) (*AddrInfo, error) {
+ transport, id := SplitAddr(m)
+ if id == "" {
+ return nil, ErrInvalidAddr
+ }
+ info := &AddrInfo{ID: id}
+ if transport != nil {
+ info.Addrs = []ma.Multiaddr{transport}
+ }
+ return info, nil
+}
+
+// AddrInfoToP2pAddrs converts an AddrInfo to a list of Multiaddrs.
+func AddrInfoToP2pAddrs(pi *AddrInfo) ([]ma.Multiaddr, error) {
+ p2ppart, err := ma.NewComponent("p2p", pi.ID.String())
+ if err != nil {
+ return nil, err
+ }
+ if len(pi.Addrs) == 0 {
+ return []ma.Multiaddr{p2ppart.Multiaddr()}, nil
+ }
+ addrs := make([]ma.Multiaddr, 0, len(pi.Addrs))
+ for _, addr := range pi.Addrs {
+ addrs = append(addrs, addr.Encapsulate(p2ppart))
+ }
+ return addrs, nil
+}
+
+func (pi *AddrInfo) Loggable() map[string]interface{} {
+ return map[string]interface{}{
+ "peerID": pi.ID.String(),
+ "addrs": pi.Addrs,
+ }
+}
+
+// AddrInfosToIDs extracts the peer IDs from the passed AddrInfos and returns them in-order.
+func AddrInfosToIDs(pis []AddrInfo) []ID {
+ ps := make([]ID, len(pis))
+ for i, pi := range pis {
+ ps[i] = pi.ID
+ }
+ return ps
+}
diff --git a/core/peer/addrinfo_serde.go b/core/peer/addrinfo_serde.go
new file mode 100644
index 0000000000..a848ea6c66
--- /dev/null
+++ b/core/peer/addrinfo_serde.go
@@ -0,0 +1,48 @@
+package peer
+
+import (
+ "encoding/json"
+
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// Helper struct for decoding as we can't unmarshal into an interface (Multiaddr).
+type addrInfoJson struct {
+ ID ID
+ Addrs []string
+}
+
+func (pi AddrInfo) MarshalJSON() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "libp2p addr info marshal") }()
+
+ addrs := make([]string, len(pi.Addrs))
+ for i, addr := range pi.Addrs {
+ addrs[i] = addr.String()
+ }
+ return json.Marshal(&addrInfoJson{
+ ID: pi.ID,
+ Addrs: addrs,
+ })
+}
+
+func (pi *AddrInfo) UnmarshalJSON(b []byte) (err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "libp2p addr info unmarshal") }()
+ var data addrInfoJson
+ if err := json.Unmarshal(b, &data); err != nil {
+ return err
+ }
+ addrs := make([]ma.Multiaddr, len(data.Addrs))
+ for i, addr := range data.Addrs {
+ maddr, err := ma.NewMultiaddr(addr)
+ if err != nil {
+ return err
+ }
+ addrs[i] = maddr
+ }
+
+ pi.ID = data.ID
+ pi.Addrs = addrs
+ return nil
+}
diff --git a/core/peer/addrinfo_test.go b/core/peer/addrinfo_test.go
new file mode 100644
index 0000000000..614747fa69
--- /dev/null
+++ b/core/peer/addrinfo_test.go
@@ -0,0 +1,166 @@
+package peer_test
+
+import (
+ "testing"
+
+ . "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/stretchr/testify/require"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var (
+ testID ID
+ maddrFull, maddrTpt, maddrPeer ma.Multiaddr
+)
+
+func init() {
+ var err error
+ testID, err = Decode("QmS3zcG7LhYZYSJMhyRZvTddvbNUqtt8BJpaSs6mi1K5Va")
+ if err != nil {
+ panic(err)
+ }
+ maddrPeer = ma.StringCast("/p2p/" + testID.String())
+ maddrTpt = ma.StringCast("/ip4/127.0.0.1/tcp/1234")
+ maddrFull = maddrTpt.Encapsulate(maddrPeer)
+}
+
+func TestSplitAddr(t *testing.T) {
+ tpt, id := SplitAddr(maddrFull)
+ if !tpt.Equal(maddrTpt) {
+ t.Fatal("expected transport")
+ }
+ if id != testID {
+ t.Fatalf("%s != %s", id, testID)
+ }
+
+ tpt, id = SplitAddr(maddrPeer)
+ if tpt != nil {
+ t.Fatal("expected no transport")
+ }
+ if id != testID {
+ t.Fatalf("%s != %s", id, testID)
+ }
+
+ tpt, id = SplitAddr(maddrTpt)
+ if !tpt.Equal(maddrTpt) {
+ t.Fatal("expected a transport")
+ }
+ if id != "" {
+ t.Fatal("expected no peer ID")
+ }
+}
+
+func TestIDFromP2PAddr(t *testing.T) {
+ id, err := IDFromP2PAddr(maddrFull)
+ require.NoError(t, err)
+ require.Equal(t, testID, id)
+
+ id, err = IDFromP2PAddr(maddrPeer)
+ require.NoError(t, err)
+ require.Equal(t, testID, id)
+
+ _, err = IDFromP2PAddr(maddrTpt)
+ require.ErrorIs(t, err, ErrInvalidAddr)
+}
+
+func TestAddrInfoFromP2pAddr(t *testing.T) {
+ ai, err := AddrInfoFromP2pAddr(maddrFull)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(ai.Addrs) != 1 || !ai.Addrs[0].Equal(maddrTpt) {
+ t.Fatal("expected transport")
+ }
+ if ai.ID != testID {
+ t.Fatalf("%s != %s", ai.ID, testID)
+ }
+
+ ai, err = AddrInfoFromP2pAddr(maddrPeer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(ai.Addrs) != 0 {
+ t.Fatal("expected transport")
+ }
+ if ai.ID != testID {
+ t.Fatalf("%s != %s", ai.ID, testID)
+ }
+
+ _, err = AddrInfoFromP2pAddr(maddrTpt)
+ if err != ErrInvalidAddr {
+ t.Fatalf("wrong error: %s", err)
+ }
+}
+
+func TestAddrInfosFromP2pAddrs(t *testing.T) {
+ infos, err := AddrInfosFromP2pAddrs()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(infos) != 0 {
+ t.Fatal("expected no addrs")
+ }
+ if _, err = AddrInfosFromP2pAddrs(nil); err == nil {
+ t.Fatal("expected nil multiaddr to fail")
+ }
+
+ addrs := []ma.Multiaddr{
+ ma.StringCast("/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64"),
+ ma.StringCast("/ip4/104.236.76.40/tcp/4001/ipfs/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64"),
+
+ ma.StringCast("/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"),
+ ma.StringCast("/ip4/178.62.158.247/tcp/4001/ipfs/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd"),
+
+ ma.StringCast("/ipfs/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM"),
+ }
+ expected := map[string][]ma.Multiaddr{
+ "QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64": {
+ ma.StringCast("/ip4/128.199.219.111/tcp/4001"),
+ ma.StringCast("/ip4/104.236.76.40/tcp/4001"),
+ },
+ "QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd": {
+ ma.StringCast("/ip4/178.62.158.247/tcp/4001"),
+ },
+ "QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM": nil,
+ }
+ infos, err = AddrInfosFromP2pAddrs(addrs...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, info := range infos {
+ exaddrs, ok := expected[info.ID.String()]
+ if !ok {
+ t.Fatalf("didn't expect peer %s", info.ID)
+ }
+ if len(info.Addrs) != len(exaddrs) {
+ t.Fatalf("got %d addrs, expected %d", len(info.Addrs), len(exaddrs))
+ }
+ // AddrInfosFromP2pAddrs preserves order. I'd like to keep this
+ // guarantee for now.
+ for i, addr := range info.Addrs {
+ if !exaddrs[i].Equal(addr) {
+ t.Fatalf("expected %s, got %s", exaddrs[i], addr)
+ }
+ }
+ delete(expected, info.ID.String())
+ }
+}
+
+func TestAddrInfoJSON(t *testing.T) {
+ ai := AddrInfo{ID: testID, Addrs: []ma.Multiaddr{maddrFull}}
+ out, err := ai.MarshalJSON()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var addrInfo AddrInfo
+ if err := addrInfo.UnmarshalJSON(out); err != nil {
+ t.Fatal(err)
+ }
+ if addrInfo.ID != testID {
+ t.Fatalf("expected ID to equal %s, got %s", testID, addrInfo.ID)
+ }
+ if len(addrInfo.Addrs) != 1 || !addrInfo.Addrs[0].Equal(maddrFull) {
+ t.Fatalf("expected addrs to match %v, got %v", maddrFull, addrInfo.Addrs)
+ }
+}
diff --git a/core/peer/pb/peer_record.pb.go b/core/peer/pb/peer_record.pb.go
new file mode 100644
index 0000000000..06dc225341
--- /dev/null
+++ b/core/peer/pb/peer_record.pb.go
@@ -0,0 +1,202 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: core/peer/pb/peer_record.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// PeerRecord messages contain information that is useful to share with other peers.
+// Currently, a PeerRecord contains the public listen addresses for a peer, but this
+// is expected to expand to include other information in the future.
+//
+// PeerRecords are designed to be serialized to bytes and placed inside of
+// SignedEnvelopes before sharing with other peers.
+// See https://github.com/libp2p/go-libp2p/blob/master/core/record/pb/envelope.proto for
+// the SignedEnvelope definition.
+type PeerRecord struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // peer_id contains a libp2p peer id in its binary representation.
+ PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"`
+ // seq contains a monotonically-increasing sequence counter to order PeerRecords in time.
+ Seq uint64 `protobuf:"varint,2,opt,name=seq,proto3" json:"seq,omitempty"`
+ // addresses is a list of public listen addresses for the peer.
+ Addresses []*PeerRecord_AddressInfo `protobuf:"bytes,3,rep,name=addresses,proto3" json:"addresses,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PeerRecord) Reset() {
+ *x = PeerRecord{}
+ mi := &file_core_peer_pb_peer_record_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PeerRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerRecord) ProtoMessage() {}
+
+func (x *PeerRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_core_peer_pb_peer_record_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerRecord.ProtoReflect.Descriptor instead.
+func (*PeerRecord) Descriptor() ([]byte, []int) {
+ return file_core_peer_pb_peer_record_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *PeerRecord) GetPeerId() []byte {
+ if x != nil {
+ return x.PeerId
+ }
+ return nil
+}
+
+func (x *PeerRecord) GetSeq() uint64 {
+ if x != nil {
+ return x.Seq
+ }
+ return 0
+}
+
+func (x *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo {
+ if x != nil {
+ return x.Addresses
+ }
+ return nil
+}
+
+// AddressInfo is a wrapper around a binary multiaddr. It is defined as a
+// separate message to allow us to add per-address metadata in the future.
+type PeerRecord_AddressInfo struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Multiaddr []byte `protobuf:"bytes,1,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *PeerRecord_AddressInfo) Reset() {
+ *x = PeerRecord_AddressInfo{}
+ mi := &file_core_peer_pb_peer_record_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *PeerRecord_AddressInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PeerRecord_AddressInfo) ProtoMessage() {}
+
+func (x *PeerRecord_AddressInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_core_peer_pb_peer_record_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PeerRecord_AddressInfo.ProtoReflect.Descriptor instead.
+func (*PeerRecord_AddressInfo) Descriptor() ([]byte, []int) {
+ return file_core_peer_pb_peer_record_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *PeerRecord_AddressInfo) GetMultiaddr() []byte {
+ if x != nil {
+ return x.Multiaddr
+ }
+ return nil
+}
+
+var File_core_peer_pb_peer_record_proto protoreflect.FileDescriptor
+
+const file_core_peer_pb_peer_record_proto_rawDesc = "" +
+ "\n" +
+ "\x1ecore/peer/pb/peer_record.proto\x12\apeer.pb\"\xa3\x01\n" +
+ "\n" +
+ "PeerRecord\x12\x17\n" +
+ "\apeer_id\x18\x01 \x01(\fR\x06peerId\x12\x10\n" +
+ "\x03seq\x18\x02 \x01(\x04R\x03seq\x12=\n" +
+ "\taddresses\x18\x03 \x03(\v2\x1f.peer.pb.PeerRecord.AddressInfoR\taddresses\x1a+\n" +
+ "\vAddressInfo\x12\x1c\n" +
+ "\tmultiaddr\x18\x01 \x01(\fR\tmultiaddrB*Z(github.com/libp2p/go-libp2p/core/peer/pbb\x06proto3"
+
+var (
+ file_core_peer_pb_peer_record_proto_rawDescOnce sync.Once
+ file_core_peer_pb_peer_record_proto_rawDescData []byte
+)
+
+func file_core_peer_pb_peer_record_proto_rawDescGZIP() []byte {
+ file_core_peer_pb_peer_record_proto_rawDescOnce.Do(func() {
+ file_core_peer_pb_peer_record_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_core_peer_pb_peer_record_proto_rawDesc), len(file_core_peer_pb_peer_record_proto_rawDesc)))
+ })
+ return file_core_peer_pb_peer_record_proto_rawDescData
+}
+
+var file_core_peer_pb_peer_record_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_core_peer_pb_peer_record_proto_goTypes = []any{
+ (*PeerRecord)(nil), // 0: peer.pb.PeerRecord
+ (*PeerRecord_AddressInfo)(nil), // 1: peer.pb.PeerRecord.AddressInfo
+}
+var file_core_peer_pb_peer_record_proto_depIdxs = []int32{
+ 1, // 0: peer.pb.PeerRecord.addresses:type_name -> peer.pb.PeerRecord.AddressInfo
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_core_peer_pb_peer_record_proto_init() }
+func file_core_peer_pb_peer_record_proto_init() {
+ if File_core_peer_pb_peer_record_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_core_peer_pb_peer_record_proto_rawDesc), len(file_core_peer_pb_peer_record_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_core_peer_pb_peer_record_proto_goTypes,
+ DependencyIndexes: file_core_peer_pb_peer_record_proto_depIdxs,
+ MessageInfos: file_core_peer_pb_peer_record_proto_msgTypes,
+ }.Build()
+ File_core_peer_pb_peer_record_proto = out.File
+ file_core_peer_pb_peer_record_proto_goTypes = nil
+ file_core_peer_pb_peer_record_proto_depIdxs = nil
+}
diff --git a/core/peer/pb/peer_record.proto b/core/peer/pb/peer_record.proto
new file mode 100644
index 0000000000..c5022f49ed
--- /dev/null
+++ b/core/peer/pb/peer_record.proto
@@ -0,0 +1,31 @@
+syntax = "proto3";
+
+package peer.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/core/peer/pb";
+
+// PeerRecord messages contain information that is useful to share with other peers.
+// Currently, a PeerRecord contains the public listen addresses for a peer, but this
+// is expected to expand to include other information in the future.
+//
+// PeerRecords are designed to be serialized to bytes and placed inside of
+// SignedEnvelopes before sharing with other peers.
+// See https://github.com/libp2p/go-libp2p/blob/master/core/record/pb/envelope.proto for
+// the SignedEnvelope definition.
+message PeerRecord {
+
+ // AddressInfo is a wrapper around a binary multiaddr. It is defined as a
+ // separate message to allow us to add per-address metadata in the future.
+ message AddressInfo {
+ bytes multiaddr = 1;
+ }
+
+ // peer_id contains a libp2p peer id in its binary representation.
+ bytes peer_id = 1;
+
+ // seq contains a monotonically-increasing sequence counter to order PeerRecords in time.
+ uint64 seq = 2;
+
+ // addresses is a list of public listen addresses for the peer.
+ repeated AddressInfo addresses = 3;
+}
diff --git a/core/peer/peer.go b/core/peer/peer.go
new file mode 100644
index 0000000000..b77fb684ec
--- /dev/null
+++ b/core/peer/peer.go
@@ -0,0 +1,194 @@
+// Package peer implements an object used to represent peers in the libp2p network.
+package peer
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/ipfs/go-cid"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ b58 "github.com/mr-tron/base58/base58"
+ mc "github.com/multiformats/go-multicodec"
+ mh "github.com/multiformats/go-multihash"
+)
+
+var (
+ // ErrEmptyPeerID is an error for empty peer ID.
+ ErrEmptyPeerID = errors.New("empty peer ID")
+ // ErrNoPublicKey is an error for peer IDs that don't embed public keys
+ ErrNoPublicKey = errors.New("public key is not embedded in peer ID")
+)
+
+// AdvancedEnableInlining enables automatically inlining keys shorter than
+// 42 bytes into the peer ID (using the "identity" multihash function).
+//
+// WARNING: This flag will likely be set to false in the future and eventually
+// be removed in favor of using a hash function specified by the key itself.
+// See: https://github.com/libp2p/specs/issues/138
+//
+// DO NOT change this flag unless you know what you're doing.
+//
+// This currently defaults to true for backwards compatibility but will likely
+// be set to false by default when an upgrade path is determined.
+var AdvancedEnableInlining = true
+
+const maxInlineKeyLength = 42
+
+// ID is a libp2p peer identity.
+//
+// Peer IDs are derived by hashing a peer's public key and encoding the
+// hash output as a multihash. See IDFromPublicKey for details.
+type ID string
+
+// Loggable returns a pretty peer ID string in loggable JSON format.
+func (id ID) Loggable() map[string]interface{} {
+ return map[string]interface{}{
+ "peerID": id.String(),
+ }
+}
+
+func (id ID) String() string {
+ return b58.Encode([]byte(id))
+}
+
+// ShortString prints out the peer ID.
+//
+// TODO(brian): ensure correctness at ID generation and
+// enforce this by only exposing functions that generate
+// IDs safely. Then any peer.ID type found in the
+// codebase is known to be correct.
+func (id ID) ShortString() string {
+ pid := id.String()
+ if len(pid) <= 10 {
+ return fmt.Sprintf("", pid)
+ }
+ return fmt.Sprintf("", pid[:2], pid[len(pid)-6:])
+}
+
+// MatchesPrivateKey tests whether this ID was derived from the secret key sk.
+func (id ID) MatchesPrivateKey(sk ic.PrivKey) bool {
+ return id.MatchesPublicKey(sk.GetPublic())
+}
+
+// MatchesPublicKey tests whether this ID was derived from the public key pk.
+func (id ID) MatchesPublicKey(pk ic.PubKey) bool {
+ oid, err := IDFromPublicKey(pk)
+ if err != nil {
+ return false
+ }
+ return oid == id
+}
+
+// ExtractPublicKey attempts to extract the public key from an ID.
+//
+// This method returns ErrNoPublicKey if the peer ID looks valid, but it can't extract
+// the public key.
+func (id ID) ExtractPublicKey() (ic.PubKey, error) {
+ decoded, err := mh.Decode([]byte(id))
+ if err != nil {
+ return nil, err
+ }
+ if decoded.Code != mh.IDENTITY {
+ return nil, ErrNoPublicKey
+ }
+ pk, err := ic.UnmarshalPublicKey(decoded.Digest)
+ if err != nil {
+ return nil, err
+ }
+ return pk, nil
+}
+
+// Validate checks if ID is empty or not.
+func (id ID) Validate() error {
+ if id == ID("") {
+ return ErrEmptyPeerID
+ }
+
+ return nil
+}
+
+// IDFromBytes casts a byte slice to the ID type, and validates
+// the value to make sure it is a multihash.
+func IDFromBytes(b []byte) (ID, error) {
+ if _, err := mh.Cast(b); err != nil {
+ return ID(""), err
+ }
+ return ID(b), nil
+}
+
+// Decode accepts an encoded peer ID and returns the decoded ID if the input is
+// valid.
+//
+// The encoded peer ID can either be a CID of a key or a raw multihash (identity
+// or sha256-256).
+func Decode(s string) (ID, error) {
+ if strings.HasPrefix(s, "Qm") || strings.HasPrefix(s, "1") {
+ // base58 encoded sha256 or identity multihash
+ m, err := mh.FromB58String(s)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse peer ID: %s", err)
+ }
+ return ID(m), nil
+ }
+
+ c, err := cid.Decode(s)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse peer ID: %s", err)
+ }
+ return FromCid(c)
+}
+
+// FromCid converts a CID to a peer ID, if possible.
+func FromCid(c cid.Cid) (ID, error) {
+ code := mc.Code(c.Type())
+ if code != mc.Libp2pKey {
+ return "", fmt.Errorf("can't convert CID of type %q to a peer ID", code)
+ }
+ return ID(c.Hash()), nil
+}
+
+// ToCid encodes a peer ID as a CID of the public key.
+//
+// If the peer ID is invalid (e.g., empty), this will return the empty CID.
+func ToCid(id ID) cid.Cid {
+ m, err := mh.Cast([]byte(id))
+ if err != nil {
+ return cid.Cid{}
+ }
+ return cid.NewCidV1(cid.Libp2pKey, m)
+}
+
+// IDFromPublicKey returns the Peer ID corresponding to the public key pk.
+func IDFromPublicKey(pk ic.PubKey) (ID, error) {
+ b, err := ic.MarshalPublicKey(pk)
+ if err != nil {
+ return "", err
+ }
+ var alg uint64 = mh.SHA2_256
+ if AdvancedEnableInlining && len(b) <= maxInlineKeyLength {
+ alg = mh.IDENTITY
+ }
+ hash, _ := mh.Sum(b, alg, -1)
+ return ID(hash), nil
+}
+
+// IDFromPrivateKey returns the Peer ID corresponding to the secret key sk.
+func IDFromPrivateKey(sk ic.PrivKey) (ID, error) {
+ return IDFromPublicKey(sk.GetPublic())
+}
+
+// IDSlice for sorting peers.
+type IDSlice []ID
+
+func (es IDSlice) Len() int { return len(es) }
+func (es IDSlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
+func (es IDSlice) Less(i, j int) bool { return string(es[i]) < string(es[j]) }
+
+func (es IDSlice) String() string {
+ peersStrings := make([]string, len(es))
+ for i, id := range es {
+ peersStrings[i] = id.String()
+ }
+ return strings.Join(peersStrings, ", ")
+}
diff --git a/core/peer/peer_serde.go b/core/peer/peer_serde.go
new file mode 100644
index 0000000000..3e2f71793e
--- /dev/null
+++ b/core/peer/peer_serde.go
@@ -0,0 +1,73 @@
+// Package peer contains Protobuf and JSON serialization/deserialization methods for peer IDs.
+package peer
+
+import (
+ "encoding"
+ "encoding/json"
+)
+
+// Interface assertions commented out to avoid introducing hard dependencies to protobuf.
+// var _ proto.Marshaler = (*ID)(nil)
+// var _ proto.Unmarshaler = (*ID)(nil)
+var _ json.Marshaler = (*ID)(nil)
+var _ json.Unmarshaler = (*ID)(nil)
+
+var _ encoding.BinaryMarshaler = (*ID)(nil)
+var _ encoding.BinaryUnmarshaler = (*ID)(nil)
+var _ encoding.TextMarshaler = (*ID)(nil)
+var _ encoding.TextUnmarshaler = (*ID)(nil)
+
+func (id ID) Marshal() ([]byte, error) {
+ return []byte(id), nil
+}
+
+// MarshalBinary returns the byte representation of the peer ID.
+func (id ID) MarshalBinary() ([]byte, error) {
+ return id.Marshal()
+}
+
+func (id ID) MarshalTo(data []byte) (n int, err error) {
+ return copy(data, []byte(id)), nil
+}
+
+func (id *ID) Unmarshal(data []byte) (err error) {
+ *id, err = IDFromBytes(data)
+ return err
+}
+
+// UnmarshalBinary sets the ID from its binary representation.
+func (id *ID) UnmarshalBinary(data []byte) error {
+ return id.Unmarshal(data)
+}
+
+func (id ID) Size() int {
+ return len([]byte(id))
+}
+
+func (id ID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(id.String())
+}
+
+func (id *ID) UnmarshalJSON(data []byte) (err error) {
+ var v string
+ if err = json.Unmarshal(data, &v); err != nil {
+ return err
+ }
+ *id, err = Decode(v)
+ return err
+}
+
+// MarshalText returns the text encoding of the ID.
+func (id ID) MarshalText() ([]byte, error) {
+ return []byte(id.String()), nil
+}
+
+// UnmarshalText restores the ID from its text encoding.
+func (id *ID) UnmarshalText(data []byte) error {
+ pid, err := Decode(string(data))
+ if err != nil {
+ return err
+ }
+ *id = pid
+ return nil
+}
diff --git a/core/peer/peer_serde_test.go b/core/peer/peer_serde_test.go
new file mode 100644
index 0000000000..c503d66b9e
--- /dev/null
+++ b/core/peer/peer_serde_test.go
@@ -0,0 +1,83 @@
+package peer_test
+
+import (
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ . "github.com/libp2p/go-libp2p/core/test"
+)
+
+func TestPeerSerdePB(t *testing.T) {
+ id, err := RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := id.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var id2 peer.ID
+ if err = id2.Unmarshal(b); err != nil {
+ t.Fatal(err)
+ }
+ if id != id2 {
+ t.Error("expected equal ids in circular serde test")
+ }
+}
+
+func TestPeerSerdeJSON(t *testing.T) {
+ id, err := RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := id.MarshalJSON()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var id2 peer.ID
+ if err = id2.UnmarshalJSON(b); err != nil {
+ t.Fatal(err)
+ }
+ if id != id2 {
+ t.Error("expected equal ids in circular serde test")
+ }
+}
+
+func TestBinaryMarshaler(t *testing.T) {
+ id, err := RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := id.MarshalBinary()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var id2 peer.ID
+ if err = id2.UnmarshalBinary(b); err != nil {
+ t.Fatal(err)
+ }
+ if id != id2 {
+ t.Error("expected equal ids in circular serde test")
+ }
+}
+
+func TestTextMarshaler(t *testing.T) {
+ id, err := RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := id.MarshalText()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var id2 peer.ID
+ if err = id2.UnmarshalText(b); err != nil {
+ t.Fatal(err)
+ }
+ if id != id2 {
+ t.Error("expected equal ids in circular serde test")
+ }
+}
diff --git a/core/peer/peer_test.go b/core/peer/peer_test.go
new file mode 100644
index 0000000000..d7fa930bcb
--- /dev/null
+++ b/core/peer/peer_test.go
@@ -0,0 +1,300 @@
+package peer_test
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "strings"
+ "testing"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ . "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/test"
+
+ b58 "github.com/mr-tron/base58/base58"
+ mh "github.com/multiformats/go-multihash"
+)
+
+var gen1 keyset // generated
+var gen2 keyset // generated
+var man keyset // manual
+
+func hash(b []byte) []byte {
+ h, _ := mh.Sum(b, mh.SHA2_256, -1)
+ return []byte(h)
+}
+
+func init() {
+ if err := gen1.generate(); err != nil {
+ panic(err)
+ }
+ if err := gen2.generate(); err != nil {
+ panic(err)
+ }
+
+ skManBytes = strings.Replace(skManBytes, "\n", "", -1)
+ if err := man.load(hpkpMan, skManBytes); err != nil {
+ panic(err)
+ }
+}
+
+type keyset struct {
+ sk ic.PrivKey
+ pk ic.PubKey
+ hpk string
+ hpkp string
+}
+
+func (ks *keyset) generate() error {
+ var err error
+ ks.sk, ks.pk, err = test.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ return err
+ }
+
+ bpk, err := ic.MarshalPublicKey(ks.pk)
+ if err != nil {
+ return err
+ }
+
+ ks.hpk = string(hash(bpk))
+ ks.hpkp = b58.Encode([]byte(ks.hpk))
+ return nil
+}
+
+func (ks *keyset) load(hpkp, skBytesStr string) error {
+ skBytes, err := base64.StdEncoding.DecodeString(skBytesStr)
+ if err != nil {
+ return err
+ }
+
+ ks.sk, err = ic.UnmarshalPrivateKey(skBytes)
+ if err != nil {
+ return err
+ }
+
+ ks.pk = ks.sk.GetPublic()
+ bpk, err := ic.MarshalPublicKey(ks.pk)
+ if err != nil {
+ return err
+ }
+
+ ks.hpk = string(hash(bpk))
+ ks.hpkp = b58.Encode([]byte(ks.hpk))
+ if ks.hpkp != hpkp {
+ return fmt.Errorf("hpkp doesn't match key. %s", hpkp)
+ }
+ return nil
+}
+
+func TestIDMatchesPublicKey(t *testing.T) {
+ test := func(ks keyset) {
+ p1, err := Decode(ks.hpkp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ks.hpk != string(p1) {
+ t.Error("p1 and hpk differ")
+ }
+
+ if !p1.MatchesPublicKey(ks.pk) {
+ t.Fatal("p1 does not match pk")
+ }
+
+ p2, err := IDFromPublicKey(ks.pk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p1 != p2 {
+ t.Error("p1 and p2 differ", p1.String(), p2.String())
+ }
+
+ if p2.String() != ks.hpkp {
+ t.Error("hpkp and p2.String differ", ks.hpkp, p2.String())
+ }
+ }
+
+ test(gen1)
+ test(gen2)
+ test(man)
+}
+
+func TestIDMatchesPrivateKey(t *testing.T) {
+
+ test := func(ks keyset) {
+ p1, err := Decode(ks.hpkp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ks.hpk != string(p1) {
+ t.Error("p1 and hpk differ")
+ }
+
+ if !p1.MatchesPrivateKey(ks.sk) {
+ t.Fatal("p1 does not match sk")
+ }
+
+ p2, err := IDFromPrivateKey(ks.sk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p1 != p2 {
+ t.Error("p1 and p2 differ", p1.String(), p2.String())
+ }
+ }
+
+ test(gen1)
+ test(gen2)
+ test(man)
+}
+
+func TestIDEncoding(t *testing.T) {
+ test := func(ks keyset) {
+ p1, err := Decode(ks.hpkp)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if ks.hpk != string(p1) {
+ t.Error("p1 and hpk differ")
+ }
+
+ c := ToCid(p1)
+ p2, err := FromCid(c)
+ if err != nil || p1 != p2 {
+ t.Fatal("failed to round-trip through CID:", err)
+ }
+ p3, err := Decode(c.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p3 != p1 {
+ t.Fatal("failed to round trip through CID string")
+ }
+
+ if ks.hpkp != p1.String() {
+ t.Fatal("should always encode peer IDs as base58 by default")
+ }
+ }
+
+ test(gen1)
+ test(gen2)
+ test(man)
+
+ exampleCid := "bafkreifoybygix7fh3r3g5rqle3wcnhqldgdg4shzf4k3ulyw3gn7mabt4"
+ _, err := Decode(exampleCid)
+ if err == nil {
+ t.Fatal("should refuse to decode a non-peer ID CID")
+ }
+
+ c := ToCid("")
+ if c.Defined() {
+ t.Fatal("cid of empty peer ID should have been undefined")
+ }
+}
+
+func TestPublicKeyExtraction(t *testing.T) {
+ t.Skip("disabled until libp2p/go-libp2p-crypto#51 is fixed")
+ // Happy path
+ _, originalPub, err := ic.GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id, err := IDFromPublicKey(originalPub)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ extractedPub, err := id.ExtractPublicKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if extractedPub == nil {
+ t.Fatal("failed to extract public key")
+ }
+ if !originalPub.Equals(extractedPub) {
+ t.Fatal("extracted public key doesn't match")
+ }
+
+ // Test invalid multihash (invariant of the type of public key)
+ pk, err := ID("").ExtractPublicKey()
+ if err == nil {
+ t.Fatal("expected an error")
+ }
+ if pk != nil {
+ t.Fatal("expected a nil public key")
+ }
+
+ // Shouldn't work for, e.g. RSA keys (too large)
+
+ _, rsaPub, err := ic.GenerateKeyPair(ic.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rsaId, err := IDFromPublicKey(rsaPub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ extractedRsaPub, err := rsaId.ExtractPublicKey()
+ if err != ErrNoPublicKey {
+ t.Fatal(err)
+ }
+ if extractedRsaPub != nil {
+ t.Fatal("expected to fail to extract public key from rsa ID")
+ }
+}
+
+func TestValidate(t *testing.T) {
+ // Empty peer ID invalidates
+ err := ID("").Validate()
+ if err == nil {
+ t.Error("expected error")
+ } else if err != ErrEmptyPeerID {
+ t.Error("expected error message: " + ErrEmptyPeerID.Error())
+ }
+
+ // Non-empty peer ID validates
+ p, err := test.RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = p.Validate()
+ if err != nil {
+ t.Error("expected nil, but found " + err.Error())
+ }
+}
+
+var hpkpMan = `QmcJeseojbPW9hSejUM1sQ1a2QmbrryPK4Z8pWbRUPaYEn`
+var skManBytes = `
+CAASqAkwggSkAgEAAoIBAQC3hjPtPli71gFNzGJ6rUhYdb65BDwW7IrniEaZKi6z
+tW4Iz0MouEJY8GPG1iQfqZKp5w9H2ENh4I1bk2dsezrJ7Nneg4Eqd78CmeHTAgaP
+3PKsxohdMo/TOFNxwl8SkEF8FyVbio2TCoijYNHUuprZuq7MPEAJYr3Z1eEkM/xR
+pMp3YI9S2SYsZQxbmmQ0/GfHOEvYajdow1qttreVTQkvmCppKtNLEU5InpX/W5fe
+aQCj0pd7l74daZgM2WWz3juEUCVG7tdRUPg7ix1TYosbN96CKC3q2MJxe/wJ9gR5
+Jvjnaaaoon+mci5vrKzxdKBDmZ/ZbLiHDfVljMkbdOQLAgMBAAECggEAEULaF3JJ
+vkD+lmamzIsHxuosKhKv5CgTWHuEyFsjUVu7IbD8zBOoidzyRX1WoHO+i6Rj14oL
+rGUGZpqSm61rdhqE01zjBS+GE6SNjN8f5uANIxr5MGrVBDTEBGsXrhNLVXSH2vhJ
+II9ZEqTEl5GFhvz7+9Ge5EMZQCfRqSoKjVMdrs+Rueuusr9p0wNg9PH1myA+cXGt
+iNZA17Rj2IiWVZLDgYNo4DVQUt4mFb+wTJW4NSspGKaFebpn0hf4z21laoGoJqTC
+cNETJw+QwQ0uDaRoYotTLT2/55e8XBFTdcTg5cmbZoKgMyGqZEHfRyD9reVDAZlM
+EZwKtrm41kz94QKBgQDmPp5zVtFXQNONmje1NE0IjCaUKcqURXk4ZiILztfT9XLC
+OXAUCs3TCq21jirCkZZ6gLfo12Wx0xJYmsKlaUOGNTa8FI5Xa7OyheYKixUvV6FW
+J95P/sNuWscTjh7oZHgZk/L3yKrNzNBz7awComwV6qciXW7EP1uACHf5fS/RdQKB
+gQDMDa38W9OeegRDrhCeYGsniJK7btOCzhNooruQKPPXxk+O4dyJm7VBbC/3Ch55
+a83W66T4k0Q7ysLVRT5Vqd5z3AM0sEM3ZoxUKCinG3NwPxVeXcoLasyEiq1vOFK6
+GqZKCMThCj7ZpbkWy0DPJagnYfZGC62lammuj+XQx7mvfwKBgQCTKhka/bXmgD/3
+9UeAIcLPIM2TzDZ4mQNHIjjGtVnMV8kXDaFung06xEuNjSYVoPq+qEFkqTCN/axv
+R9P76BFJ2f93LehhRizggacsvAM5dFhh+i+lj+AYTBuMiz2EKpt9NcyJxhAuZKgk
+QRi9wlU1mPtlArVG6HwylLcil3qV9QKBgQDJHtaU/KEY+2TGnIMuxxP2lEsjyLla
+nOlOYc8C6Qpma8UwrHelfj5p7Eteb6/Xt6Tbp8kjZGuFj3T3plcpMdPbWEgkn3Kw
+4TeBH0/qXUkrolHagBDLrglEvjbxf48ydV/fasM6l9GYzhofWFhZk+EoaArHwWz2
+tGrTrmsynBjt2wKBgErdYe+zZ2Wo+wXQGAoZi4pfcwiw4a97Kdh0dx+WZz7acHms
+h+V20VRmEHm5h8WnJ/Wv5uK94t6NY17wzjQ7y2BN5mY5cA2cZAcpeqtv/N06tH4S
+cn1UEuRB8VpwkjaPUNZhqtYK40qff2OTdJy8taFtQiN7fz9euWTC78zjph2s
+`
diff --git a/core/peer/record.go b/core/peer/record.go
new file mode 100644
index 0000000000..fce69ce00c
--- /dev/null
+++ b/core/peer/record.go
@@ -0,0 +1,251 @@
+package peer
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+ "github.com/libp2p/go-libp2p/core/peer/pb"
+ "github.com/libp2p/go-libp2p/core/record"
+
+ ma "github.com/multiformats/go-multiaddr"
+
+ "google.golang.org/protobuf/proto"
+)
+
+var _ record.Record = (*PeerRecord)(nil)
+
+func init() {
+ record.RegisterType(&PeerRecord{})
+}
+
+// PeerRecordEnvelopeDomain is the domain string used for peer records contained in an Envelope.
+const PeerRecordEnvelopeDomain = "libp2p-peer-record"
+
+// PeerRecordEnvelopePayloadType is the type hint used to identify peer records in an Envelope.
+// Defined in https://github.com/multiformats/multicodec/blob/master/table.csv
+// with name "libp2p-peer-record".
+var PeerRecordEnvelopePayloadType = []byte{0x03, 0x01}
+
+// PeerRecord contains information that is broadly useful to share with other peers,
+// either through a direct exchange (as in the libp2p identify protocol), or through
+// a Peer Routing provider, such as a DHT.
+//
+// Currently, a PeerRecord contains the public listen addresses for a peer, but this
+// is expected to expand to include other information in the future.
+//
+// PeerRecords are ordered in time by their Seq field. Newer PeerRecords must have
+// greater Seq values than older records. The NewPeerRecord function will create
+// a PeerRecord with a timestamp-based Seq value. The other PeerRecord fields should
+// be set by the caller:
+//
+// rec := peer.NewPeerRecord()
+// rec.PeerID = aPeerID
+// rec.Addrs = someAddrs
+//
+// Alternatively, you can construct a PeerRecord struct directly and use the TimestampSeq
+// helper to set the Seq field:
+//
+// rec := peer.PeerRecord{PeerID: aPeerID, Addrs: someAddrs, Seq: peer.TimestampSeq()}
+//
+// Failing to set the Seq field will not result in an error, however, a PeerRecord with a
+// Seq value of zero may be ignored or rejected by other peers.
+//
+// PeerRecords are intended to be shared with other peers inside a signed
+// routing.Envelope, and PeerRecord implements the routing.Record interface
+// to facilitate this.
+//
+// To share a PeerRecord, first call Sign to wrap the record in an Envelope
+// and sign it with the local peer's private key:
+//
+// rec := &PeerRecord{PeerID: myPeerId, Addrs: myAddrs}
+// envelope, err := rec.Sign(myPrivateKey)
+//
+// The resulting record.Envelope can be marshalled to a []byte and shared
+// publicly. As a convenience, the MarshalSigned method will produce the
+// Envelope and marshal it to a []byte in one go:
+//
+// rec := &PeerRecord{PeerID: myPeerId, Addrs: myAddrs}
+// recordBytes, err := rec.MarshalSigned(myPrivateKey)
+//
+// To validate and unmarshal a signed PeerRecord from a remote peer,
+// "consume" the containing envelope, which will return both the
+// routing.Envelope and the inner Record. The Record must be cast to
+// a PeerRecord pointer before use:
+//
+// envelope, untypedRecord, err := ConsumeEnvelope(envelopeBytes, PeerRecordEnvelopeDomain)
+// if err != nil {
+// handleError(err)
+// return
+// }
+// peerRec := untypedRecord.(*PeerRecord)
+type PeerRecord struct {
+ // PeerID is the ID of the peer this record pertains to.
+ PeerID ID
+
+ // Addrs contains the public addresses of the peer this record pertains to.
+ Addrs []ma.Multiaddr
+
+ // Seq is a monotonically-increasing sequence counter that's used to order
+ // PeerRecords in time. The interval between Seq values is unspecified,
+ // but newer PeerRecords MUST have a greater Seq value than older records
+ // for the same peer.
+ Seq uint64
+}
+
+// NewPeerRecord returns a PeerRecord with a timestamp-based sequence number.
+// The returned record is otherwise empty and should be populated by the caller.
+func NewPeerRecord() *PeerRecord {
+ return &PeerRecord{Seq: TimestampSeq()}
+}
+
+// PeerRecordFromAddrInfo creates a PeerRecord from an AddrInfo struct.
+// The returned record will have a timestamp-based sequence number.
+func PeerRecordFromAddrInfo(info AddrInfo) *PeerRecord {
+ rec := NewPeerRecord()
+ rec.PeerID = info.ID
+ rec.Addrs = info.Addrs
+ return rec
+}
+
+// PeerRecordFromProtobuf creates a PeerRecord from a protobuf PeerRecord
+// struct.
+func PeerRecordFromProtobuf(msg *pb.PeerRecord) (*PeerRecord, error) {
+ record := &PeerRecord{}
+
+ var id ID
+ if err := id.UnmarshalBinary(msg.PeerId); err != nil {
+ return nil, err
+ }
+
+ record.PeerID = id
+ record.Addrs = addrsFromProtobuf(msg.Addresses)
+ record.Seq = msg.Seq
+
+ return record, nil
+}
+
+var (
+ lastTimestampMu sync.Mutex
+ lastTimestamp uint64
+)
+
+// TimestampSeq is a helper to generate a timestamp-based sequence number for a PeerRecord.
+func TimestampSeq() uint64 {
+ now := uint64(time.Now().UnixNano())
+ lastTimestampMu.Lock()
+ defer lastTimestampMu.Unlock()
+ // Not all clocks are strictly increasing, but we need these sequence numbers to be strictly
+ // increasing.
+ if now <= lastTimestamp {
+ now = lastTimestamp + 1
+ }
+ lastTimestamp = now
+ return now
+}
+
+// Domain is used when signing and validating PeerRecords contained in Envelopes.
+// It is constant for all PeerRecord instances.
+func (r *PeerRecord) Domain() string {
+ return PeerRecordEnvelopeDomain
+}
+
+// Codec is a binary identifier for the PeerRecord type. It is constant for all PeerRecord instances.
+func (r *PeerRecord) Codec() []byte {
+ return PeerRecordEnvelopePayloadType
+}
+
+// UnmarshalRecord parses a PeerRecord from a byte slice.
+// This method is called automatically when consuming a record.Envelope
+// whose PayloadType indicates that it contains a PeerRecord.
+// It is generally not necessary or recommended to call this method directly.
+func (r *PeerRecord) UnmarshalRecord(bytes []byte) (err error) {
+ if r == nil {
+ return fmt.Errorf("cannot unmarshal PeerRecord to nil receiver")
+ }
+
+ defer func() { catch.HandlePanic(recover(), &err, "libp2p peer record unmarshal") }()
+
+ var msg pb.PeerRecord
+ err = proto.Unmarshal(bytes, &msg)
+ if err != nil {
+ return err
+ }
+
+ rPtr, err := PeerRecordFromProtobuf(&msg)
+ if err != nil {
+ return err
+ }
+ *r = *rPtr
+
+ return nil
+}
+
+// MarshalRecord serializes a PeerRecord to a byte slice.
+// This method is called automatically when constructing a routing.Envelope
+// using Seal or PeerRecord.Sign.
+func (r *PeerRecord) MarshalRecord() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "libp2p peer record marshal") }()
+
+ msg, err := r.ToProtobuf()
+ if err != nil {
+ return nil, err
+ }
+ return proto.Marshal(msg)
+}
+
+// Equal returns true if the other PeerRecord is identical to this one.
+func (r *PeerRecord) Equal(other *PeerRecord) bool {
+ if other == nil {
+ return r == nil
+ }
+ if r.PeerID != other.PeerID {
+ return false
+ }
+ if r.Seq != other.Seq {
+ return false
+ }
+ if len(r.Addrs) != len(other.Addrs) {
+ return false
+ }
+ for i := range r.Addrs {
+ if !r.Addrs[i].Equal(other.Addrs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// ToProtobuf returns the equivalent Protocol Buffer struct object of a PeerRecord.
+func (r *PeerRecord) ToProtobuf() (*pb.PeerRecord, error) {
+ idBytes, err := r.PeerID.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+ return &pb.PeerRecord{
+ PeerId: idBytes,
+ Addresses: addrsToProtobuf(r.Addrs),
+ Seq: r.Seq,
+ }, nil
+}
+
+func addrsFromProtobuf(addrs []*pb.PeerRecord_AddressInfo) []ma.Multiaddr {
+ out := make([]ma.Multiaddr, 0, len(addrs))
+ for _, addr := range addrs {
+ a, err := ma.NewMultiaddrBytes(addr.Multiaddr)
+ if err != nil {
+ continue
+ }
+ out = append(out, a)
+ }
+ return out
+}
+
+func addrsToProtobuf(addrs []ma.Multiaddr) []*pb.PeerRecord_AddressInfo {
+ out := make([]*pb.PeerRecord_AddressInfo, 0, len(addrs))
+ for _, addr := range addrs {
+ out = append(out, &pb.PeerRecord_AddressInfo{Multiaddr: addr.Bytes()})
+ }
+ return out
+}
diff --git a/core/peer/record_test.go b/core/peer/record_test.go
new file mode 100644
index 0000000000..4ac8ffba24
--- /dev/null
+++ b/core/peer/record_test.go
@@ -0,0 +1,67 @@
+package peer_test
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ . "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/core/test"
+)
+
+func TestPeerRecordConstants(t *testing.T) {
+ msgf := "Changing the %s may cause peer records to be incompatible with older versions. " +
+ "If you've already thought that through, please update this test so that it passes with the new values."
+ rec := PeerRecord{}
+ if rec.Domain() != "libp2p-peer-record" {
+ t.Errorf(msgf, "signing domain")
+ }
+ if !bytes.Equal(rec.Codec(), []byte{0x03, 0x01}) {
+ t.Errorf(msgf, "codec value")
+ }
+}
+
+func TestSignedPeerRecordFromEnvelope(t *testing.T) {
+ priv, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ test.AssertNilError(t, err)
+
+ addrs := test.GenerateTestAddrs(10)
+ id, err := IDFromPrivateKey(priv)
+ test.AssertNilError(t, err)
+
+ rec := &PeerRecord{PeerID: id, Addrs: addrs, Seq: TimestampSeq()}
+ envelope, err := record.Seal(rec, priv)
+ test.AssertNilError(t, err)
+
+ t.Run("is unaltered after round-trip serde", func(t *testing.T) {
+ envBytes, err := envelope.Marshal()
+ test.AssertNilError(t, err)
+
+ env2, untypedRecord, err := record.ConsumeEnvelope(envBytes, PeerRecordEnvelopeDomain)
+ test.AssertNilError(t, err)
+ rec2, ok := untypedRecord.(*PeerRecord)
+ if !ok {
+ t.Error("unmarshaled record is not a *PeerRecord")
+ }
+ if !rec.Equal(rec2) {
+ t.Error("expected peer record to be unaltered after round-trip serde")
+ }
+ if !envelope.Equal(env2) {
+ t.Error("expected signed envelope to be unchanged after round-trip serde")
+ }
+ })
+}
+
+// This is pretty much guaranteed to pass on Linux no matter how we implement it, but Windows has
+// low clock precision. This makes sure we never get a duplicate.
+func TestTimestampSeq(t *testing.T) {
+ var last uint64
+ for i := 0; i < 1000; i++ {
+ next := TimestampSeq()
+ if next <= last {
+ t.Errorf("non-increasing timestamp found: %d <= %d", next, last)
+ }
+ last = next
+ }
+}
diff --git a/core/peerstore/helpers.go b/core/peerstore/helpers.go
new file mode 100644
index 0000000000..92e522d580
--- /dev/null
+++ b/core/peerstore/helpers.go
@@ -0,0 +1,14 @@
+package peerstore
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// AddrInfos returns an AddrInfo for each specified peer ID, in-order.
+func AddrInfos(ps Peerstore, peers []peer.ID) []peer.AddrInfo {
+ pi := make([]peer.AddrInfo, len(peers))
+ for i, p := range peers {
+ pi[i] = ps.PeerInfo(p)
+ }
+ return pi
+}
diff --git a/core/peerstore/peerstore.go b/core/peerstore/peerstore.go
new file mode 100644
index 0000000000..6366026c9d
--- /dev/null
+++ b/core/peerstore/peerstore.go
@@ -0,0 +1,234 @@
+// Package peerstore provides types and interfaces for local storage of address information,
+// metadata, and public key material about libp2p peers.
+package peerstore
+
+import (
+ "context"
+ "errors"
+ "io"
+ "math"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var ErrNotFound = errors.New("item not found")
+
+var (
+ // AddressTTL is the expiration time of addresses.
+ AddressTTL = time.Hour
+
+ // TempAddrTTL is the ttl used for a short-lived address.
+ TempAddrTTL = time.Minute * 2
+
+ // RecentlyConnectedAddrTTL is used when we recently connected to a peer.
+ // It means that we are reasonably certain of the peer's address.
+ RecentlyConnectedAddrTTL = time.Minute * 15
+
+ // OwnObservedAddrTTL is used for our own external addresses observed by peers.
+ //
+ // Deprecated: observed addresses are maintained till we disconnect from the peer which provided it
+ OwnObservedAddrTTL = time.Minute * 30
+)
+
+// Permanent TTLs (distinct so we can distinguish between them, constant as they
+// are, in fact, permanent)
+const (
+ // PermanentAddrTTL is the ttl for a "permanent address" (e.g. bootstrap nodes).
+ PermanentAddrTTL = math.MaxInt64 - iota
+
+ // ConnectedAddrTTL is the ttl used for the addresses of a peer to whom
+ // we're connected directly. This is basically permanent, as we will
+ // clear them + re-add under a TempAddrTTL after disconnecting.
+ ConnectedAddrTTL
+)
+
+// Peerstore provides a thread-safe store of Peer related
+// information.
+type Peerstore interface {
+ io.Closer
+
+ AddrBook
+ KeyBook
+ PeerMetadata
+ Metrics
+ ProtoBook
+
+ // PeerInfo returns a peer.PeerInfo struct for given peer.ID.
+ // This is a small slice of the information Peerstore has on
+ // that peer, useful to other services.
+ PeerInfo(peer.ID) peer.AddrInfo
+
+ // Peers returns all the peer IDs stored across all inner stores.
+ Peers() peer.IDSlice
+
+ // RemovePeer removes all the peer related information except its addresses. To remove the
+ // addresses use `AddrBook.ClearAddrs` or set the address ttls to 0.
+ RemovePeer(peer.ID)
+}
+
+// PeerMetadata can handle values of any type. Serializing values is
+// up to the implementation. Dynamic type introspection may not be
+// supported, in which case explicitly enlisting types in the
+// serializer may be required.
+//
+// Refer to the docs of the underlying implementation for more
+// information.
+type PeerMetadata interface {
+ // Get / Put is a simple registry for other peer-related key/value pairs.
+ // If we find something we use often, it should become its own set of
+ // methods. This is a last resort.
+ Get(p peer.ID, key string) (interface{}, error)
+ Put(p peer.ID, key string, val interface{}) error
+
+ // RemovePeer removes all values stored for a peer.
+ RemovePeer(peer.ID)
+}
+
+// AddrBook holds the multiaddrs of peers.
+type AddrBook interface {
+ // AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
+ AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration)
+
+ // AddAddrs gives this AddrBook addresses to use, with a given ttl
+ // (time-to-live), after which the address is no longer valid.
+ // If the manager has a longer TTL, the operation is a no-op for that address
+ AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration)
+
+ // SetAddr calls mgr.SetAddrs(p, addr, ttl)
+ SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration)
+
+ // SetAddrs sets the ttl on addresses. This clears any TTL there previously.
+ // This is used when we receive the best estimate of the validity of an address.
+ SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration)
+
+ // UpdateAddrs updates the addresses associated with the given peer that have
+ // the given oldTTL to have the given newTTL.
+ UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration)
+
+ // Addrs returns all known (and valid) addresses for a given peer.
+ Addrs(p peer.ID) []ma.Multiaddr
+
+ // AddrStream returns a channel that gets all addresses for a given
+ // peer sent on it. If new addresses are added after the call is made
+ // they will be sent along through the channel as well.
+ AddrStream(context.Context, peer.ID) <-chan ma.Multiaddr
+
+ // ClearAddresses removes all previously stored addresses.
+ ClearAddrs(p peer.ID)
+
+ // PeersWithAddrs returns all the peer IDs stored in the AddrBook.
+ PeersWithAddrs() peer.IDSlice
+}
+
+// CertifiedAddrBook manages signed peer records and "self-certified" addresses
+// contained within them.
+// Use this interface with an `AddrBook`.
+//
+// To test whether a given AddrBook / Peerstore implementation supports
+// certified addresses, callers should use the GetCertifiedAddrBook helper or
+// type-assert on the CertifiedAddrBook interface:
+//
+// if cab, ok := aPeerstore.(CertifiedAddrBook); ok {
+// cab.ConsumePeerRecord(signedPeerRecord, aTTL)
+// }
+type CertifiedAddrBook interface {
+ // ConsumePeerRecord stores a signed peer record and the contained addresses for
+ // ttl duration.
+ // The addresses contained in the signed peer record will expire after ttl. If any
+ // address is already present in the peer store, it'll expire at max of existing ttl and
+ // provided ttl.
+ // The signed peer record itself will be expired when all the addresses associated with the peer,
+ // self-certified or not, are removed from the AddrBook.
+ //
+ // To delete the signed peer record, use `AddrBook.UpdateAddrs`,`AddrBook.SetAddrs`, or
+ // `AddrBook.ClearAddrs` with ttl 0.
+ // Note: Future calls to ConsumePeerRecord will not expire self-certified addresses from the
+ // previous calls.
+ //
+ // The `accepted` return value indicates that the record was successfully processed. If
+ // `accepted` is false but no error is returned, it means that the record was ignored, most
+ // likely because a newer record exists for the same peer with a greater seq value.
+ //
+ // The Envelopes containing the signed peer records can be retrieved by calling
+ // GetPeerRecord(peerID).
+ ConsumePeerRecord(s *record.Envelope, ttl time.Duration) (accepted bool, err error)
+
+ // GetPeerRecord returns an Envelope containing a peer record for the
+ // peer, or nil if no record exists.
+ GetPeerRecord(p peer.ID) *record.Envelope
+}
+
+// GetCertifiedAddrBook is a helper to "upcast" an AddrBook to a
+// CertifiedAddrBook by using type assertion. If the given AddrBook
+// is also a CertifiedAddrBook, it will be returned, and the ok return
+// value will be true. Returns (nil, false) if the AddrBook is not a
+// CertifiedAddrBook.
+//
+// Note that since Peerstore embeds the AddrBook interface, you can also
+// call GetCertifiedAddrBook(myPeerstore).
+func GetCertifiedAddrBook(ab AddrBook) (cab CertifiedAddrBook, ok bool) {
+ cab, ok = ab.(CertifiedAddrBook)
+ return cab, ok
+}
+
+// KeyBook tracks the keys of Peers.
+type KeyBook interface {
+ // PubKey returns the public key of a peer.
+ PubKey(peer.ID) ic.PubKey
+
+ // AddPubKey stores the public key of a peer.
+ AddPubKey(peer.ID, ic.PubKey) error
+
+ // PrivKey returns the private key of a peer, if known. Generally this might only be our own
+ // private key, see
+ // https://discuss.libp2p.io/t/what-is-the-purpose-of-having-map-peer-id-privatekey-in-peerstore/74.
+ PrivKey(peer.ID) ic.PrivKey
+
+ // AddPrivKey stores the private key of a peer.
+ AddPrivKey(peer.ID, ic.PrivKey) error
+
+ // PeersWithKeys returns all the peer IDs stored in the KeyBook.
+ PeersWithKeys() peer.IDSlice
+
+ // RemovePeer removes all keys associated with a peer.
+ RemovePeer(peer.ID)
+}
+
+// Metrics tracks metrics across a set of peers.
+type Metrics interface {
+ // RecordLatency records a new latency measurement
+ RecordLatency(peer.ID, time.Duration)
+
+ // LatencyEWMA returns an exponentially-weighted moving avg.
+ // of all measurements of a peer's latency.
+ LatencyEWMA(peer.ID) time.Duration
+
+ // RemovePeer removes all metrics stored for a peer.
+ RemovePeer(peer.ID)
+}
+
+// ProtoBook tracks the protocols supported by peers.
+type ProtoBook interface {
+ GetProtocols(peer.ID) ([]protocol.ID, error)
+ AddProtocols(peer.ID, ...protocol.ID) error
+ SetProtocols(peer.ID, ...protocol.ID) error
+ RemoveProtocols(peer.ID, ...protocol.ID) error
+
+ // SupportsProtocols returns the set of protocols the peer supports from among the given protocols.
+ // If the returned error is not nil, the result is indeterminate.
+ SupportsProtocols(peer.ID, ...protocol.ID) ([]protocol.ID, error)
+
+ // FirstSupportedProtocol returns the first protocol that the peer supports among the given protocols.
+ // If the peer does not support any of the given protocols, this function will return an empty protocol.ID and a nil error.
+ // If the returned error is not nil, the result is indeterminate.
+ FirstSupportedProtocol(peer.ID, ...protocol.ID) (protocol.ID, error)
+
+ // RemovePeer removes all protocols associated with a peer.
+ RemovePeer(peer.ID)
+}
diff --git a/core/pnet/codec.go b/core/pnet/codec.go
new file mode 100644
index 0000000000..2ff1e7628c
--- /dev/null
+++ b/core/pnet/codec.go
@@ -0,0 +1,66 @@
+package pnet
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "io"
+)
+
+var (
+ pathPSKv1 = []byte("/key/swarm/psk/1.0.0/")
+ pathBin = "/bin/"
+ pathBase16 = "/base16/"
+ pathBase64 = "/base64/"
+)
+
+func readHeader(r *bufio.Reader) ([]byte, error) {
+ header, err := r.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ return bytes.TrimRight(header, "\r\n"), nil
+}
+
+func expectHeader(r *bufio.Reader, expected []byte) error {
+ header, err := readHeader(r)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(header, expected) {
+ return fmt.Errorf("expected file header %s, got: %s", expected, header)
+ }
+ return nil
+}
+
+// DecodeV1PSK reads a Multicodec encoded V1 PSK.
+func DecodeV1PSK(in io.Reader) (PSK, error) {
+ reader := bufio.NewReader(in)
+ if err := expectHeader(reader, pathPSKv1); err != nil {
+ return nil, err
+ }
+ header, err := readHeader(reader)
+ if err != nil {
+ return nil, err
+ }
+
+ var decoder io.Reader
+ switch string(header) {
+ case pathBase16:
+ decoder = hex.NewDecoder(reader)
+ case pathBase64:
+ decoder = base64.NewDecoder(base64.StdEncoding, reader)
+ case pathBin:
+ decoder = reader
+ default:
+ return nil, fmt.Errorf("unknown encoding: %s", header)
+ }
+ out := make([]byte, 32)
+ if _, err = io.ReadFull(decoder, out[:]); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
diff --git a/core/pnet/codec_test.go b/core/pnet/codec_test.go
new file mode 100644
index 0000000000..b4b9272d09
--- /dev/null
+++ b/core/pnet/codec_test.go
@@ -0,0 +1,122 @@
+package pnet
+
+import (
+ "bytes"
+ "encoding/base64"
+ "testing"
+)
+
+func bufWithBase(base string, windows bool) *bytes.Buffer {
+ b := &bytes.Buffer{}
+ b.Write(pathPSKv1)
+ if windows {
+ b.WriteString("\r")
+ }
+ b.WriteString("\n")
+ b.WriteString(base)
+ if windows {
+ b.WriteString("\r")
+ }
+ b.WriteString("\n")
+ return b
+}
+
+func TestDecodeHex(t *testing.T) {
+ testDecodeHex(t, true)
+ testDecodeHex(t, false)
+}
+
+func TestDecodeBad(t *testing.T) {
+ testDecodeBad(t, true)
+ testDecodeBad(t, false)
+}
+
+func testDecodeBad(t *testing.T, windows bool) {
+ b := bufWithBase("/verybadbase/", windows)
+ b.WriteString("Have fun decoding that key")
+
+ _, err := DecodeV1PSK(b)
+ if err == nil {
+ t.Fatal("expected 'unknown encoding' got nil")
+ }
+}
+
+func testDecodeHex(t *testing.T, windows bool) {
+ b := bufWithBase("/base16/", windows)
+ for i := 0; i < 32; i++ {
+ b.WriteString("FF")
+ }
+
+ psk, err := DecodeV1PSK(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, b := range psk {
+ if b != 255 {
+ t.Fatal("byte was wrong")
+ }
+ }
+}
+
+func TestDecodeB64(t *testing.T) {
+ testDecodeB64(t, true)
+ testDecodeB64(t, false)
+}
+
+func testDecodeB64(t *testing.T, windows bool) {
+ b := bufWithBase("/base64/", windows)
+ key := make([]byte, 32)
+ for i := 0; i < 32; i++ {
+ key[i] = byte(i)
+ }
+
+ e := base64.NewEncoder(base64.StdEncoding, b)
+ _, err := e.Write(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = e.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ psk, err := DecodeV1PSK(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i, b := range psk {
+ if b != psk[i] {
+ t.Fatal("byte was wrong")
+ }
+ }
+
+}
+
+func TestDecodeBin(t *testing.T) {
+ testDecodeBin(t, true)
+ testDecodeBin(t, false)
+}
+
+func testDecodeBin(t *testing.T, windows bool) {
+ b := bufWithBase("/bin/", windows)
+ key := make([]byte, 32)
+ for i := 0; i < 32; i++ {
+ key[i] = byte(i)
+ }
+
+ b.Write(key)
+
+ psk, err := DecodeV1PSK(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i, b := range psk {
+ if b != psk[i] {
+ t.Fatal("byte was wrong")
+ }
+ }
+
+}
diff --git a/core/pnet/env.go b/core/pnet/env.go
new file mode 100644
index 0000000000..c8db5e3cbd
--- /dev/null
+++ b/core/pnet/env.go
@@ -0,0 +1,19 @@
+package pnet
+
+import "os"
+
+// EnvKey defines environment variable name for forcing usage of PNet in libp2p
+// When environment variable of this name is set to "1" the ForcePrivateNetwork
+// variable will be set to true.
+const EnvKey = "LIBP2P_FORCE_PNET"
+
+// ForcePrivateNetwork is boolean variable that forces usage of PNet in libp2p
+// Setting this variable to true or setting LIBP2P_FORCE_PNET environment variable
+// to true will make libp2p to require private network protector.
+// If no network protector is provided and this variable is set to true libp2p will
+// refuse to connect.
+var ForcePrivateNetwork = false
+
+func init() {
+ ForcePrivateNetwork = os.Getenv(EnvKey) == "1"
+}
diff --git a/core/pnet/error.go b/core/pnet/error.go
new file mode 100644
index 0000000000..184b71d6ac
--- /dev/null
+++ b/core/pnet/error.go
@@ -0,0 +1,34 @@
+package pnet
+
+// ErrNotInPrivateNetwork is an error that should be returned by libp2p when it
+// tries to dial with ForcePrivateNetwork set and no PNet Protector
+var ErrNotInPrivateNetwork = NewError("private network was not configured but" +
+ " is enforced by the environment")
+
+// Error is error type for ease of detecting PNet errors
+type Error interface {
+ IsPNetError() bool
+}
+
+// NewError creates new Error
+func NewError(err string) error {
+ return pnetErr("privnet: " + err)
+}
+
+// IsPNetError checks if given error is PNet Error
+func IsPNetError(err error) bool {
+ v, ok := err.(Error)
+ return ok && v.IsPNetError()
+}
+
+type pnetErr string
+
+var _ Error = (*pnetErr)(nil)
+
+func (p pnetErr) Error() string {
+ return string(p)
+}
+
+func (pnetErr) IsPNetError() bool {
+ return true
+}
diff --git a/core/pnet/error_test.go b/core/pnet/error_test.go
new file mode 100644
index 0000000000..e1fe462b2a
--- /dev/null
+++ b/core/pnet/error_test.go
@@ -0,0 +1,20 @@
+package pnet
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestIsPnetErr(t *testing.T) {
+ err := NewError("test")
+
+ if err.Error() != "privnet: test" {
+ t.Fatalf("expected 'privnet: test' got '%s'", err.Error())
+ }
+ if !IsPNetError(err) {
+ t.Fatal("expected the pnetErr to be detected by IsPnetError")
+ }
+ if IsPNetError(errors.New("not pnet error")) {
+ t.Fatal("expected generic error not to be pnetError")
+ }
+}
diff --git a/core/pnet/protector.go b/core/pnet/protector.go
new file mode 100644
index 0000000000..9d9dce9265
--- /dev/null
+++ b/core/pnet/protector.go
@@ -0,0 +1,7 @@
+// Package pnet provides interfaces for private networking in libp2p.
+package pnet
+
+// A PSK enables private network implementation to be transparent in libp2p.
+// It is used to ensure that peers can only establish connections to other peers
+// that are using the same PSK.
+type PSK []byte
diff --git a/core/protocol/id.go b/core/protocol/id.go
new file mode 100644
index 0000000000..9df3b5bcf1
--- /dev/null
+++ b/core/protocol/id.go
@@ -0,0 +1,29 @@
+package protocol
+
+// ID is an identifier used to write protocol headers in streams.
+type ID string
+
+// These are reserved protocol.IDs.
+const (
+ TestingID ID = "/p2p/_testing"
+)
+
+// ConvertFromStrings is a convenience function that takes a slice of strings and
+// converts it to a slice of protocol.ID.
+func ConvertFromStrings(ids []string) (res []ID) {
+ res = make([]ID, 0, len(ids))
+ for _, id := range ids {
+ res = append(res, ID(id))
+ }
+ return res
+}
+
+// ConvertToStrings is a convenience function that takes a slice of protocol.ID and
+// converts it to a slice of strings.
+func ConvertToStrings(ids []ID) (res []string) {
+ res = make([]string, 0, len(ids))
+ for _, id := range ids {
+ res = append(res, string(id))
+ }
+ return res
+}
diff --git a/core/protocol/switch.go b/core/protocol/switch.go
new file mode 100644
index 0000000000..683ef56fef
--- /dev/null
+++ b/core/protocol/switch.go
@@ -0,0 +1,73 @@
+// Package protocol provides core interfaces for protocol routing and negotiation in libp2p.
+package protocol
+
+import (
+ "io"
+
+ "github.com/multiformats/go-multistream"
+)
+
+// HandlerFunc is a user-provided function used by the Router to
+// handle a protocol/stream.
+//
+// Will be invoked with the protocol ID string as the first argument,
+// which may differ from the ID used for registration if the handler
+// was registered using a match function.
+type HandlerFunc = multistream.HandlerFunc[ID]
+
+// Router is an interface that allows users to add and remove protocol handlers,
+// which will be invoked when incoming stream requests for registered protocols
+// are accepted.
+//
+// Upon receiving an incoming stream request, the Router will check all registered
+// protocol handlers to determine which (if any) is capable of handling the stream.
+// The handlers are checked in order of registration; if multiple handlers are
+// eligible, only the first to be registered will be invoked.
+type Router interface {
+
+ // AddHandler registers the given handler to be invoked for
+ // an exact literal match of the given protocol ID string.
+ AddHandler(protocol ID, handler HandlerFunc)
+
+ // AddHandlerWithFunc registers the given handler to be invoked
+ // when the provided match function returns true.
+ //
+ // The match function will be invoked with an incoming protocol
+ // ID string, and should return true if the handler supports
+ // the protocol. Note that the protocol ID argument is not
+ // used for matching; if you want to match the protocol ID
+ // string exactly, you must check for it in your match function.
+ AddHandlerWithFunc(protocol ID, match func(ID) bool, handler HandlerFunc)
+
+ // RemoveHandler removes the registered handler (if any) for the
+ // given protocol ID string.
+ RemoveHandler(protocol ID)
+
+ // Protocols returns a list of all registered protocol ID strings.
+ // Note that the Router may be able to handle protocol IDs not
+ // included in this list if handlers were added with match functions
+ // using AddHandlerWithFunc.
+ Protocols() []ID
+}
+
+// Negotiator is a component capable of reaching agreement over what protocols
+// to use for inbound streams of communication.
+type Negotiator interface {
+ // Negotiate will return the registered protocol handler to use for a given
+ // inbound stream, returning after the protocol has been determined and the
+ // Negotiator has finished using the stream for negotiation. Returns an
+ // error if negotiation fails.
+ Negotiate(rwc io.ReadWriteCloser) (ID, HandlerFunc, error)
+
+ // Handle calls Negotiate to determine which protocol handler to use for an
+ // inbound stream, then invokes the protocol handler function, passing it
+ // the protocol ID and the stream. Returns an error if negotiation fails.
+ Handle(rwc io.ReadWriteCloser) error
+}
+
+// Switch is the component responsible for "dispatching" incoming stream requests to
+// their corresponding stream handlers. It is both a Negotiator and a Router.
+type Switch interface {
+ Router
+ Negotiator
+}
diff --git a/core/record/envelope.go b/core/record/envelope.go
new file mode 100644
index 0000000000..413a55c9e8
--- /dev/null
+++ b/core/record/envelope.go
@@ -0,0 +1,294 @@
+package record
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+ "github.com/libp2p/go-libp2p/core/record/pb"
+
+ pool "github.com/libp2p/go-buffer-pool"
+
+ "github.com/multiformats/go-varint"
+ "google.golang.org/protobuf/proto"
+)
+
+// Envelope contains an arbitrary []byte payload, signed by a libp2p peer.
+//
+// Envelopes are signed in the context of a particular "domain", which is a
+// string specified when creating and verifying the envelope. You must know the
+// domain string used to produce the envelope in order to verify the signature
+// and access the payload.
+type Envelope struct {
+ // The public key that can be used to verify the signature and derive the peer id of the signer.
+ PublicKey crypto.PubKey
+
+ // A binary identifier that indicates what kind of data is contained in the payload.
+ // TODO(yusef): enforce multicodec prefix
+ PayloadType []byte
+
+ // The envelope payload.
+ RawPayload []byte
+
+ // The signature of the domain string :: type hint :: payload.
+ signature []byte
+
+ // the unmarshalled payload as a Record, cached on first access via the Record accessor method
+ cached Record
+ unmarshalError error
+ unmarshalOnce sync.Once
+}
+
+var ErrEmptyDomain = errors.New("envelope domain must not be empty")
+var ErrEmptyPayloadType = errors.New("payloadType must not be empty")
+var ErrInvalidSignature = errors.New("invalid signature or incorrect domain")
+
+// Seal marshals the given Record, places the marshaled bytes inside an Envelope,
+// and signs with the given private key.
+func Seal(rec Record, privateKey crypto.PrivKey) (*Envelope, error) {
+ payload, err := rec.MarshalRecord()
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling record: %v", err)
+ }
+
+ domain := rec.Domain()
+ payloadType := rec.Codec()
+ if domain == "" {
+ return nil, ErrEmptyDomain
+ }
+
+ if len(payloadType) == 0 {
+ return nil, ErrEmptyPayloadType
+ }
+
+ unsigned, err := makeUnsigned(domain, payloadType, payload)
+ if err != nil {
+ return nil, err
+ }
+ defer pool.Put(unsigned)
+
+ sig, err := privateKey.Sign(unsigned)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Envelope{
+ PublicKey: privateKey.GetPublic(),
+ PayloadType: payloadType,
+ RawPayload: payload,
+ signature: sig,
+ }, nil
+}
+
+// ConsumeEnvelope unmarshals a serialized Envelope and validates its
+// signature using the provided 'domain' string. If validation fails, an error
+// is returned, along with the unmarshalled envelope, so it can be inspected.
+//
+// On success, ConsumeEnvelope returns the Envelope itself, as well as the inner payload,
+// unmarshalled into a concrete Record type. The actual type of the returned Record depends
+// on what has been registered for the Envelope's PayloadType (see RegisterType for details).
+//
+// You can type assert on the returned Record to convert it to an instance of the concrete
+// Record type:
+//
+// envelope, rec, err := ConsumeEnvelope(envelopeBytes, peer.PeerRecordEnvelopeDomain)
+// if err != nil {
+// handleError(envelope, err) // envelope may be non-nil, even if errors occur!
+// return
+// }
+// peerRec, ok := rec.(*peer.PeerRecord)
+// if ok {
+// doSomethingWithPeerRecord(peerRec)
+// }
+//
+// If the Envelope signature is valid, but no Record type is registered for the Envelope's
+// PayloadType, ErrPayloadTypeNotRegistered will be returned, along with the Envelope and
+// a nil Record.
+func ConsumeEnvelope(data []byte, domain string) (envelope *Envelope, rec Record, err error) {
+ e, err := UnmarshalEnvelope(data)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed when unmarshalling the envelope: %w", err)
+ }
+
+ err = e.validate(domain)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to validate envelope: %w", err)
+ }
+
+ rec, err = e.Record()
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err)
+ }
+ return e, rec, nil
+}
+
+// ConsumeTypedEnvelope unmarshals a serialized Envelope and validates its
+// signature. If validation fails, an error is returned, along with the unmarshalled
+// envelope, so it can be inspected.
+//
+// Unlike ConsumeEnvelope, ConsumeTypedEnvelope does not try to automatically determine
+// the type of Record to unmarshal the Envelope's payload into. Instead, the caller provides
+// a destination Record instance, which will unmarshal the Envelope payload. It is the caller's
+// responsibility to determine whether the given Record type is able to unmarshal the payload
+// correctly.
+//
+// rec := &MyRecordType{}
+// envelope, err := ConsumeTypedEnvelope(envelopeBytes, rec)
+// if err != nil {
+// handleError(envelope, err)
+// }
+// doSomethingWithRecord(rec)
+//
+// Important: you MUST check the error value before using the returned Envelope. In some error
+// cases, including when the envelope signature is invalid, both the Envelope and an error will
+// be returned. This allows you to inspect the unmarshalled but invalid Envelope. As a result,
+// you must not assume that any non-nil Envelope returned from this function is valid.
+func ConsumeTypedEnvelope(data []byte, destRecord Record) (envelope *Envelope, err error) {
+ e, err := UnmarshalEnvelope(data)
+ if err != nil {
+ return nil, fmt.Errorf("failed when unmarshalling the envelope: %w", err)
+ }
+
+ err = e.validate(destRecord.Domain())
+ if err != nil {
+ return e, fmt.Errorf("failed to validate envelope: %w", err)
+ }
+
+ err = destRecord.UnmarshalRecord(e.RawPayload)
+ if err != nil {
+ return e, fmt.Errorf("failed to unmarshal envelope payload: %w", err)
+ }
+ e.cached = destRecord
+ return e, nil
+}
+
+// UnmarshalEnvelope unmarshals a serialized Envelope protobuf message,
+// without validating its contents. Most users should use ConsumeEnvelope.
+func UnmarshalEnvelope(data []byte) (*Envelope, error) {
+ var e pb.Envelope
+ if err := proto.Unmarshal(data, &e); err != nil {
+ return nil, err
+ }
+
+ key, err := crypto.PublicKeyFromProto(e.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Envelope{
+ PublicKey: key,
+ PayloadType: e.PayloadType,
+ RawPayload: e.Payload,
+ signature: e.Signature,
+ }, nil
+}
+
+// Marshal returns a byte slice containing a serialized protobuf representation
+// of an Envelope.
+func (e *Envelope) Marshal() (res []byte, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "libp2p envelope marshal") }()
+ key, err := crypto.PublicKeyToProto(e.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ msg := pb.Envelope{
+ PublicKey: key,
+ PayloadType: e.PayloadType,
+ Payload: e.RawPayload,
+ Signature: e.signature,
+ }
+ return proto.Marshal(&msg)
+}
+
+// Equal returns true if the other Envelope has the same public key,
+// payload, payload type, and signature. This implies that they were also
+// created with the same domain string.
+func (e *Envelope) Equal(other *Envelope) bool {
+ if other == nil {
+ return e == nil
+ }
+ return e.PublicKey.Equals(other.PublicKey) &&
+ bytes.Equal(e.PayloadType, other.PayloadType) &&
+ bytes.Equal(e.signature, other.signature) &&
+ bytes.Equal(e.RawPayload, other.RawPayload)
+}
+
+// Record returns the Envelope's payload unmarshalled as a Record.
+// The concrete type of the returned Record depends on which Record
+// type was registered for the Envelope's PayloadType - see record.RegisterType.
+//
+// Once unmarshalled, the Record is cached for future access.
+func (e *Envelope) Record() (Record, error) {
+ e.unmarshalOnce.Do(func() {
+ if e.cached != nil {
+ return
+ }
+ e.cached, e.unmarshalError = unmarshalRecordPayload(e.PayloadType, e.RawPayload)
+ })
+ return e.cached, e.unmarshalError
+}
+
+// TypedRecord unmarshals the Envelope's payload to the given Record instance.
+// It is the caller's responsibility to ensure that the Record type is capable
+// of unmarshalling the Envelope payload. Callers can inspect the Envelope's
+// PayloadType field to determine the correct type of Record to use.
+//
+// This method will always unmarshal the Envelope payload even if a cached record
+// exists.
+func (e *Envelope) TypedRecord(dest Record) error {
+ return dest.UnmarshalRecord(e.RawPayload)
+}
+
+// validate returns nil if the envelope signature is valid for the given 'domain',
+// or an error if signature validation fails.
+func (e *Envelope) validate(domain string) error {
+ unsigned, err := makeUnsigned(domain, e.PayloadType, e.RawPayload)
+ if err != nil {
+ return err
+ }
+ defer pool.Put(unsigned)
+
+ valid, err := e.PublicKey.Verify(unsigned, e.signature)
+ if err != nil {
+ return fmt.Errorf("failed while verifying signature: %w", err)
+ }
+ if !valid {
+ return ErrInvalidSignature
+ }
+ return nil
+}
+
+// makeUnsigned is a helper function that prepares a buffer to sign or verify.
+// It returns a byte slice from a pool. The caller MUST return this slice to the
+// pool.
+func makeUnsigned(domain string, payloadType []byte, payload []byte) ([]byte, error) {
+ var (
+ fields = [][]byte{[]byte(domain), payloadType, payload}
+
+ // fields are prefixed with their length as an unsigned varint. we
+ // compute the lengths before allocating the sig buffer, so we know how
+ // much space to add for the lengths
+ flen = make([][]byte, len(fields))
+ size = 0
+ )
+
+ for i, f := range fields {
+ l := len(f)
+ flen[i] = varint.ToUvarint(uint64(l))
+ size += l + len(flen[i])
+ }
+
+ b := pool.Get(size)
+
+ var s int
+ for i, f := range fields {
+ s += copy(b[s:], flen[i])
+ s += copy(b[s:], f)
+ }
+
+ return b[:s], nil
+}
diff --git a/core/record/envelope_test.go b/core/record/envelope_test.go
new file mode 100644
index 0000000000..a8f8cb5716
--- /dev/null
+++ b/core/record/envelope_test.go
@@ -0,0 +1,314 @@
+package record_test
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ . "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/core/record/pb"
+ "github.com/libp2p/go-libp2p/core/test"
+
+ "google.golang.org/protobuf/proto"
+)
+
+type simpleRecord struct {
+ testDomain *string
+ testCodec []byte
+ message string
+}
+
+func (r *simpleRecord) Domain() string {
+ if r.testDomain != nil {
+ return *r.testDomain
+ }
+ return "libp2p-testing"
+}
+
+func (r *simpleRecord) Codec() []byte {
+ if r.testCodec != nil {
+ return r.testCodec
+ }
+ return []byte("/libp2p/testdata")
+}
+
+func (r *simpleRecord) MarshalRecord() ([]byte, error) {
+ return []byte(r.message), nil
+}
+
+func (r *simpleRecord) UnmarshalRecord(buf []byte) error {
+ r.message = string(buf)
+ return nil
+}
+
+// Make an envelope, verify & open it, marshal & unmarshal it
+func TestEnvelopeHappyPath(t *testing.T) {
+ var (
+ rec = &simpleRecord{message: "hello world!"}
+ priv, pub, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ test.AssertNilError(t, err)
+
+ payload, err := rec.MarshalRecord()
+ test.AssertNilError(t, err)
+
+ envelope, err := Seal(rec, priv)
+ test.AssertNilError(t, err)
+
+ if !envelope.PublicKey.Equals(pub) {
+ t.Error("envelope has unexpected public key")
+ }
+
+ if !bytes.Equal(rec.Codec(), envelope.PayloadType) {
+ t.Error("PayloadType does not match record Codec")
+ }
+
+ serialized, err := envelope.Marshal()
+ test.AssertNilError(t, err)
+
+ RegisterType(&simpleRecord{})
+ deserialized, rec2, err := ConsumeEnvelope(serialized, rec.Domain())
+ test.AssertNilError(t, err)
+
+ if !bytes.Equal(deserialized.RawPayload, payload) {
+ t.Error("payload of envelope does not match input")
+ }
+
+ if !envelope.Equal(deserialized) {
+ t.Error("round-trip serde results in unequal envelope structures")
+ }
+
+ typedRec, ok := rec2.(*simpleRecord)
+ if !ok {
+ t.Error("expected ConsumeEnvelope to return record with type registered for payloadType")
+ }
+ if typedRec.message != "hello world!" {
+ t.Error("unexpected alteration of record")
+ }
+}
+
+func TestConsumeTypedEnvelope(t *testing.T) {
+ var (
+ rec = simpleRecord{message: "hello world!"}
+ priv, _, _ = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ envelope, err := Seal(&rec, priv)
+ test.AssertNilError(t, err)
+
+ envelopeBytes, err := envelope.Marshal()
+ test.AssertNilError(t, err)
+
+ rec2 := &simpleRecord{}
+ _, err = ConsumeTypedEnvelope(envelopeBytes, rec2)
+ test.AssertNilError(t, err)
+
+ if rec2.message != "hello world!" {
+ t.Error("unexpected alteration of record")
+ }
+}
+
+func TestMakeEnvelopeFailsWithEmptyDomain(t *testing.T) {
+ var (
+ rec = simpleRecord{message: "hello world!"}
+ domain = ""
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // override domain with empty string
+ rec.testDomain = &domain
+
+ _, err = Seal(&rec, priv)
+ test.ExpectError(t, err, "making an envelope with an empty domain should fail")
+}
+
+func TestMakeEnvelopeFailsWithEmptyPayloadType(t *testing.T) {
+ var (
+ rec = simpleRecord{message: "hello world!"}
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // override payload with empty slice
+ rec.testCodec = []byte{}
+
+ _, err = Seal(&rec, priv)
+ test.ExpectError(t, err, "making an envelope with an empty payloadType should fail")
+}
+
+type failingRecord struct {
+ allowMarshal bool
+ allowUnmarshal bool
+}
+
+func (r failingRecord) Domain() string {
+ return "testing"
+}
+
+func (r failingRecord) Codec() []byte {
+ return []byte("doesn't matter")
+}
+
+func (r failingRecord) MarshalRecord() ([]byte, error) {
+ if r.allowMarshal {
+ return []byte{}, nil
+ }
+ return nil, errors.New("marshal failed")
+}
+func (r failingRecord) UnmarshalRecord(_ []byte) error {
+ if r.allowUnmarshal {
+ return nil
+ }
+ return errors.New("unmarshal failed")
+}
+
+func TestSealFailsIfRecordMarshalFails(t *testing.T) {
+ var (
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ rec := failingRecord{}
+ _, err = Seal(rec, priv)
+ test.ExpectError(t, err, "Seal should fail if Record fails to marshal")
+}
+
+func TestConsumeEnvelopeFailsIfEnvelopeUnmarshalFails(t *testing.T) {
+ _, _, err := ConsumeEnvelope([]byte("not an Envelope protobuf"), "doesn't-matter")
+ test.ExpectError(t, err, "ConsumeEnvelope should fail if Envelope fails to unmarshal")
+}
+
+func TestConsumeEnvelopeFailsIfRecordUnmarshalFails(t *testing.T) {
+ var (
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ RegisterType(failingRecord{})
+ rec := failingRecord{allowMarshal: true}
+ env, err := Seal(rec, priv)
+ test.AssertNilError(t, err)
+ envBytes, err := env.Marshal()
+ test.AssertNilError(t, err)
+
+ _, _, err = ConsumeEnvelope(envBytes, rec.Domain())
+ test.ExpectError(t, err, "ConsumeEnvelope should fail if Record fails to unmarshal")
+}
+
+func TestConsumeTypedEnvelopeFailsIfRecordUnmarshalFails(t *testing.T) {
+ var (
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ RegisterType(failingRecord{})
+ rec := failingRecord{allowMarshal: true}
+ env, err := Seal(rec, priv)
+ test.AssertNilError(t, err)
+ envBytes, err := env.Marshal()
+ test.AssertNilError(t, err)
+
+ rec2 := failingRecord{}
+ _, err = ConsumeTypedEnvelope(envBytes, rec2)
+ test.ExpectError(t, err, "ConsumeTypedEnvelope should fail if Record fails to unmarshal")
+}
+
+func TestEnvelopeValidateFailsForDifferentDomain(t *testing.T) {
+ var (
+ rec = &simpleRecord{message: "hello world"}
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ test.AssertNilError(t, err)
+
+ envelope, err := Seal(rec, priv)
+ test.AssertNilError(t, err)
+
+ serialized, err := envelope.Marshal()
+ test.AssertNilError(t, err)
+
+ // try to open our modified envelope
+ _, _, err = ConsumeEnvelope(serialized, "wrong-domain")
+ test.ExpectError(t, err, "should not be able to open envelope with incorrect domain")
+}
+
+func TestEnvelopeValidateFailsIfPayloadTypeIsAltered(t *testing.T) {
+ var (
+ rec = &simpleRecord{message: "hello world!"}
+ domain = "libp2p-testing"
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ test.AssertNilError(t, err)
+
+ envelope, err := Seal(rec, priv)
+ test.AssertNilError(t, err)
+
+ serialized := alterMessageAndMarshal(t, envelope, func(msg *pb.Envelope) {
+ msg.PayloadType = []byte("foo")
+ })
+
+ // try to open our modified envelope
+ _, _, err = ConsumeEnvelope(serialized, domain)
+ test.ExpectError(t, err, "should not be able to open envelope with modified PayloadType")
+}
+
+func TestEnvelopeValidateFailsIfContentsAreAltered(t *testing.T) {
+ var (
+ rec = &simpleRecord{message: "hello world!"}
+ domain = "libp2p-testing"
+ priv, _, err = test.RandTestKeyPair(crypto.Ed25519, 256)
+ )
+
+ test.AssertNilError(t, err)
+
+ envelope, err := Seal(rec, priv)
+ test.AssertNilError(t, err)
+
+ serialized := alterMessageAndMarshal(t, envelope, func(msg *pb.Envelope) {
+ msg.Payload = []byte("totally legit, trust me")
+ })
+
+ // try to open our modified envelope
+ _, _, err = ConsumeEnvelope(serialized, domain)
+ test.ExpectError(t, err, "should not be able to open envelope with modified payload")
+}
+
+// Since we're outside of the crypto package (to avoid import cycles with test package),
+// we can't alter the fields in a Envelope directly. This helper marshals
+// the envelope to a protobuf and calls the alterMsg function, which should
+// alter the protobuf message.
+// Returns the serialized altered protobuf message.
+func alterMessageAndMarshal(t *testing.T, envelope *Envelope, alterMsg func(*pb.Envelope)) []byte {
+ t.Helper()
+
+ serialized, err := envelope.Marshal()
+ test.AssertNilError(t, err)
+
+ msg := pb.Envelope{}
+ err = proto.Unmarshal(serialized, &msg)
+ test.AssertNilError(t, err)
+
+ alterMsg(&msg)
+ serialized, err = proto.Marshal(&msg)
+ test.AssertNilError(t, err)
+
+ return serialized
+}
diff --git a/core/record/pb/envelope.pb.go b/core/record/pb/envelope.pb.go
new file mode 100644
index 0000000000..336a5c0407
--- /dev/null
+++ b/core/record/pb/envelope.pb.go
@@ -0,0 +1,168 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: core/record/pb/envelope.proto
+
+package pb
+
+import (
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Envelope encloses a signed payload produced by a peer, along with the public
+// key of the keypair it was signed with so that it can be statelessly validated
+// by the receiver.
+//
+// The payload is prefixed with a byte string that determines the type, so it
+// can be deserialized deterministically. Often, this byte string is a
+// multicodec.
+type Envelope struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // public_key is the public key of the keypair the enclosed payload was
+ // signed with.
+ PublicKey *pb.PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
+ // payload_type encodes the type of payload, so that it can be deserialized
+ // deterministically.
+ PayloadType []byte `protobuf:"bytes,2,opt,name=payload_type,json=payloadType,proto3" json:"payload_type,omitempty"`
+ // payload is the actual payload carried inside this envelope.
+ Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
+ // signature is the signature produced by the private key corresponding to
+ // the enclosed public key, over the payload, prefixing a domain string for
+ // additional security.
+ Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Envelope) Reset() {
+ *x = Envelope{}
+ mi := &file_core_record_pb_envelope_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Envelope) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Envelope) ProtoMessage() {}
+
+func (x *Envelope) ProtoReflect() protoreflect.Message {
+ mi := &file_core_record_pb_envelope_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Envelope.ProtoReflect.Descriptor instead.
+func (*Envelope) Descriptor() ([]byte, []int) {
+ return file_core_record_pb_envelope_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Envelope) GetPublicKey() *pb.PublicKey {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+
+func (x *Envelope) GetPayloadType() []byte {
+ if x != nil {
+ return x.PayloadType
+ }
+ return nil
+}
+
+func (x *Envelope) GetPayload() []byte {
+ if x != nil {
+ return x.Payload
+ }
+ return nil
+}
+
+func (x *Envelope) GetSignature() []byte {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+
+var File_core_record_pb_envelope_proto protoreflect.FileDescriptor
+
+const file_core_record_pb_envelope_proto_rawDesc = "" +
+ "\n" +
+ "\x1dcore/record/pb/envelope.proto\x12\trecord.pb\x1a\x1bcore/crypto/pb/crypto.proto\"\x9a\x01\n" +
+ "\bEnvelope\x123\n" +
+ "\n" +
+ "public_key\x18\x01 \x01(\v2\x14.crypto.pb.PublicKeyR\tpublicKey\x12!\n" +
+ "\fpayload_type\x18\x02 \x01(\fR\vpayloadType\x12\x18\n" +
+ "\apayload\x18\x03 \x01(\fR\apayload\x12\x1c\n" +
+ "\tsignature\x18\x05 \x01(\fR\tsignatureB,Z*github.com/libp2p/go-libp2p/core/record/pbb\x06proto3"
+
+var (
+ file_core_record_pb_envelope_proto_rawDescOnce sync.Once
+ file_core_record_pb_envelope_proto_rawDescData []byte
+)
+
+func file_core_record_pb_envelope_proto_rawDescGZIP() []byte {
+ file_core_record_pb_envelope_proto_rawDescOnce.Do(func() {
+ file_core_record_pb_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_core_record_pb_envelope_proto_rawDesc), len(file_core_record_pb_envelope_proto_rawDesc)))
+ })
+ return file_core_record_pb_envelope_proto_rawDescData
+}
+
+var file_core_record_pb_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_core_record_pb_envelope_proto_goTypes = []any{
+ (*Envelope)(nil), // 0: record.pb.Envelope
+ (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey
+}
+var file_core_record_pb_envelope_proto_depIdxs = []int32{
+ 1, // 0: record.pb.Envelope.public_key:type_name -> crypto.pb.PublicKey
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_core_record_pb_envelope_proto_init() }
+func file_core_record_pb_envelope_proto_init() {
+ if File_core_record_pb_envelope_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_core_record_pb_envelope_proto_rawDesc), len(file_core_record_pb_envelope_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_core_record_pb_envelope_proto_goTypes,
+ DependencyIndexes: file_core_record_pb_envelope_proto_depIdxs,
+ MessageInfos: file_core_record_pb_envelope_proto_msgTypes,
+ }.Build()
+ File_core_record_pb_envelope_proto = out.File
+ file_core_record_pb_envelope_proto_goTypes = nil
+ file_core_record_pb_envelope_proto_depIdxs = nil
+}
diff --git a/core/record/pb/envelope.proto b/core/record/pb/envelope.proto
new file mode 100644
index 0000000000..ff19284489
--- /dev/null
+++ b/core/record/pb/envelope.proto
@@ -0,0 +1,32 @@
+syntax = "proto3";
+
+package record.pb;
+
+import "core/crypto/pb/crypto.proto";
+
+option go_package = "github.com/libp2p/go-libp2p/core/record/pb";
+
+// Envelope encloses a signed payload produced by a peer, along with the public
+// key of the keypair it was signed with so that it can be statelessly validated
+// by the receiver.
+//
+// The payload is prefixed with a byte string that determines the type, so it
+// can be deserialized deterministically. Often, this byte string is a
+// multicodec.
+message Envelope {
+ // public_key is the public key of the keypair the enclosed payload was
+ // signed with.
+ crypto.pb.PublicKey public_key = 1;
+
+ // payload_type encodes the type of payload, so that it can be deserialized
+ // deterministically.
+ bytes payload_type = 2;
+
+ // payload is the actual payload carried inside this envelope.
+ bytes payload = 3;
+
+ // signature is the signature produced by the private key corresponding to
+ // the enclosed public key, over the payload, prefixing a domain string for
+ // additional security.
+ bytes signature = 5;
+}
diff --git a/core/record/record.go b/core/record/record.go
new file mode 100644
index 0000000000..9b98f04f52
--- /dev/null
+++ b/core/record/record.go
@@ -0,0 +1,105 @@
+package record
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/libp2p/go-libp2p/core/internal/catch"
+)
+
+var (
+ // ErrPayloadTypeNotRegistered is returned from ConsumeEnvelope when the Envelope's
+ // PayloadType does not match any registered Record types.
+ ErrPayloadTypeNotRegistered = errors.New("payload type is not registered")
+
+ payloadTypeRegistry = make(map[string]reflect.Type)
+)
+
+// Record represents a data type that can be used as the payload of an Envelope.
+// The Record interface defines the methods used to marshal and unmarshal a Record
+// type to a byte slice.
+//
+// Record types may be "registered" as the default for a given Envelope.PayloadType
+// using the RegisterType function. Once a Record type has been registered,
+// an instance of that type will be created and used to unmarshal the payload of
+// any Envelope with the registered PayloadType when the Envelope is opened using
+// the ConsumeEnvelope function.
+//
+// To use an unregistered Record type instead, use ConsumeTypedEnvelope and pass in
+// an instance of the Record type that you'd like the Envelope's payload to be
+// unmarshaled into.
+type Record interface {
+
+ // Domain is the "signature domain" used when signing and verifying a particular
+ // Record type. The Domain string should be unique to your Record type, and all
+ // instances of the Record type must have the same Domain string.
+ Domain() string
+
+ // Codec is a binary identifier for this type of record, ideally a registered multicodec
+ // (see https://github.com/multiformats/multicodec).
+ // When a Record is put into an Envelope (see record.Seal), the Codec value will be used
+ // as the Envelope's PayloadType. When the Envelope is later unsealed, the PayloadType
+ // will be used to look up the correct Record type to unmarshal the Envelope payload into.
+ Codec() []byte
+
+ // MarshalRecord converts a Record instance to a []byte, so that it can be used as an
+ // Envelope payload.
+ MarshalRecord() ([]byte, error)
+
+ // UnmarshalRecord unmarshals a []byte payload into an instance of a particular Record type.
+ UnmarshalRecord([]byte) error
+}
+
+// RegisterType associates a binary payload type identifier with a concrete
+// Record type. This is used to automatically unmarshal Record payloads from Envelopes
+// when using ConsumeEnvelope, and to automatically marshal Records and determine the
+// correct PayloadType when calling Seal.
+//
+// Callers must provide an instance of the record type to be registered, which must be
+// a pointer type. Registration should be done in the init function of the package
+// where the Record type is defined:
+//
+// package hello_record
+// import record "github.com/libp2p/go-libp2p/core/record"
+//
+// func init() {
+// record.RegisterType(&HelloRecord{})
+// }
+//
+// type HelloRecord struct { } // etc..
+func RegisterType(prototype Record) {
+ payloadTypeRegistry[string(prototype.Codec())] = getValueType(prototype)
+}
+
+func unmarshalRecordPayload(payloadType []byte, payloadBytes []byte) (_rec Record, err error) {
+ defer func() { catch.HandlePanic(recover(), &err, "libp2p envelope record unmarshal") }()
+
+ rec, err := blankRecordForPayloadType(payloadType)
+ if err != nil {
+ return nil, err
+ }
+ err = rec.UnmarshalRecord(payloadBytes)
+ if err != nil {
+ return nil, err
+ }
+ return rec, nil
+}
+
+func blankRecordForPayloadType(payloadType []byte) (Record, error) {
+ valueType, ok := payloadTypeRegistry[string(payloadType)]
+ if !ok {
+ return nil, ErrPayloadTypeNotRegistered
+ }
+
+ val := reflect.New(valueType)
+ asRecord := val.Interface().(Record)
+ return asRecord, nil
+}
+
+func getValueType(i interface{}) reflect.Type {
+ valueType := reflect.TypeOf(i)
+ if valueType.Kind() == reflect.Ptr {
+ valueType = valueType.Elem()
+ }
+ return valueType
+}
diff --git a/core/record/record_test.go b/core/record/record_test.go
new file mode 100644
index 0000000000..033ee7fed7
--- /dev/null
+++ b/core/record/record_test.go
@@ -0,0 +1,51 @@
+package record
+
+import "testing"
+
+var testPayloadType = []byte("/libp2p/test/record/payload-type")
+
+type testPayload struct {
+ unmarshalPayloadCalled bool
+}
+
+func (p *testPayload) Domain() string {
+ return "testing"
+}
+
+func (p *testPayload) Codec() []byte {
+ return testPayloadType
+}
+
+func (p *testPayload) MarshalRecord() ([]byte, error) {
+ return []byte("hello"), nil
+}
+
+func (p *testPayload) UnmarshalRecord(_ []byte) error {
+ p.unmarshalPayloadCalled = true
+ return nil
+}
+
+func TestUnmarshalPayload(t *testing.T) {
+ t.Run("fails if payload type is unregistered", func(t *testing.T) {
+ _, err := unmarshalRecordPayload([]byte("unknown type"), []byte{})
+ if err != ErrPayloadTypeNotRegistered {
+ t.Error("Expected error when unmarshalling payload with unregistered payload type")
+ }
+ })
+
+ t.Run("calls UnmarshalRecord on concrete Record type", func(t *testing.T) {
+ RegisterType(&testPayload{})
+
+ payload, err := unmarshalRecordPayload(testPayloadType, []byte{})
+ if err != nil {
+ t.Errorf("unexpected error unmarshalling registered payload type: %v", err)
+ }
+ typedPayload, ok := payload.(*testPayload)
+ if !ok {
+ t.Error("expected unmarshalled payload to be of the correct type")
+ }
+ if !typedPayload.unmarshalPayloadCalled {
+ t.Error("expected UnmarshalRecord to be called on concrete Record instance")
+ }
+ })
+}
diff --git a/core/routing/options.go b/core/routing/options.go
new file mode 100644
index 0000000000..4b235cbfc0
--- /dev/null
+++ b/core/routing/options.go
@@ -0,0 +1,50 @@
+package routing
+
+// Option is a single routing option.
+type Option func(opts *Options) error
+
+// Options is a set of routing options
+type Options struct {
+ // Allow expired values.
+ Expired bool
+ Offline bool
+ // Other (ValueStore implementation specific) options.
+ Other map[interface{}]interface{}
+}
+
+// Apply applies the given options to this Options
+func (opts *Options) Apply(options ...Option) error {
+ for _, o := range options {
+ if err := o(opts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ToOption converts this Options to a single Option.
+func (opts *Options) ToOption() Option {
+ return func(nopts *Options) error {
+ *nopts = *opts
+ if opts.Other != nil {
+ nopts.Other = make(map[interface{}]interface{}, len(opts.Other))
+ for k, v := range opts.Other {
+ nopts.Other[k] = v
+ }
+ }
+ return nil
+ }
+}
+
+// Expired is an option that tells the routing system to return expired records
+// when no newer records are known.
+var Expired Option = func(opts *Options) error {
+ opts.Expired = true
+ return nil
+}
+
+// Offline is an option that tells the routing system to operate offline (i.e., rely on cached/local data only).
+var Offline Option = func(opts *Options) error {
+ opts.Offline = true
+ return nil
+}
diff --git a/core/routing/query.go b/core/routing/query.go
new file mode 100644
index 0000000000..a99eccaef0
--- /dev/null
+++ b/core/routing/query.go
@@ -0,0 +1,111 @@
+package routing
+
+import (
+ "context"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// QueryEventType indicates the query event's type.
+type QueryEventType int
+
+// Number of events to buffer.
+var QueryEventBufferSize = 16
+
+const (
+ // Sending a query to a peer.
+ SendingQuery QueryEventType = iota
+ // Got a response from a peer.
+ PeerResponse
+ // Found a "closest" peer (not currently used).
+ FinalPeer
+ // Got an error when querying.
+ QueryError
+ // Found a provider.
+ Provider
+ // Found a value.
+ Value
+ // Adding a peer to the query.
+ AddingPeer
+ // Dialing a peer.
+ DialingPeer
+)
+
+// QueryEvent is emitted for every notable event that happens during a DHT query.
+type QueryEvent struct {
+ ID peer.ID
+ Type QueryEventType
+ Responses []*peer.AddrInfo
+ Extra string
+}
+
+type routingQueryKey struct{}
+type eventChannel struct {
+ mu sync.Mutex
+ ctx context.Context
+ ch chan<- *QueryEvent
+}
+
+// waitThenClose is spawned in a goroutine when the channel is registered. This
+// safely cleans up the channel when the context has been canceled.
+func (e *eventChannel) waitThenClose() {
+ <-e.ctx.Done()
+ e.mu.Lock()
+ close(e.ch)
+ // 1. Signals that we're done.
+ // 2. Frees memory (in case we end up hanging on to this for a while).
+ e.ch = nil
+ e.mu.Unlock()
+}
+
+// send sends an event on the event channel, aborting if either the passed or
+// the internal context expire.
+func (e *eventChannel) send(ctx context.Context, ev *QueryEvent) {
+ e.mu.Lock()
+ // Closed.
+ if e.ch == nil {
+ e.mu.Unlock()
+ return
+ }
+ // in case the passed context is unrelated, wait on both.
+ select {
+ case e.ch <- ev:
+ case <-e.ctx.Done():
+ case <-ctx.Done():
+ }
+ e.mu.Unlock()
+}
+
+// RegisterForQueryEvents registers a query event channel with the given
+// context. The returned context can be passed to DHT queries to receive query
+// events on the returned channels.
+//
+// The passed context MUST be canceled when the caller is no longer interested
+// in query events.
+func RegisterForQueryEvents(ctx context.Context) (context.Context, <-chan *QueryEvent) {
+ ch := make(chan *QueryEvent, QueryEventBufferSize)
+ ech := &eventChannel{ch: ch, ctx: ctx}
+ go ech.waitThenClose()
+ return context.WithValue(ctx, routingQueryKey{}, ech), ch
+}
+
+// PublishQueryEvent publishes a query event to the query event channel
+// associated with the given context, if any.
+func PublishQueryEvent(ctx context.Context, ev *QueryEvent) {
+ ich := ctx.Value(routingQueryKey{})
+ if ich == nil {
+ return
+ }
+
+ // We *want* to panic here.
+ ech := ich.(*eventChannel)
+ ech.send(ctx, ev)
+}
+
+// SubscribesToQueryEvents returns true if the context subscribes to query
+// events. If this function returns false, calling `PublishQueryEvent` on the
+// context will be a no-op.
+func SubscribesToQueryEvents(ctx context.Context) bool {
+ return ctx.Value(routingQueryKey{}) != nil
+}
diff --git a/core/routing/query_serde.go b/core/routing/query_serde.go
new file mode 100644
index 0000000000..6b566e0ca8
--- /dev/null
+++ b/core/routing/query_serde.go
@@ -0,0 +1,40 @@
+package routing
+
+import (
+ "encoding/json"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+func (qe *QueryEvent) MarshalJSON() ([]byte, error) {
+ return json.Marshal(map[string]interface{}{
+ "ID": qe.ID.String(),
+ "Type": int(qe.Type),
+ "Responses": qe.Responses,
+ "Extra": qe.Extra,
+ })
+}
+
+func (qe *QueryEvent) UnmarshalJSON(b []byte) error {
+ temp := struct {
+ ID string
+ Type int
+ Responses []*peer.AddrInfo
+ Extra string
+ }{}
+ err := json.Unmarshal(b, &temp)
+ if err != nil {
+ return err
+ }
+ if len(temp.ID) > 0 {
+ pid, err := peer.Decode(temp.ID)
+ if err != nil {
+ return err
+ }
+ qe.ID = pid
+ }
+ qe.Type = QueryEventType(temp.Type)
+ qe.Responses = temp.Responses
+ qe.Extra = temp.Extra
+ return nil
+}
diff --git a/core/routing/query_test.go b/core/routing/query_test.go
new file mode 100644
index 0000000000..15b4846dbb
--- /dev/null
+++ b/core/routing/query_test.go
@@ -0,0 +1,44 @@
+package routing
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+)
+
+func TestEventsCancel(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ ctx, events := RegisterForQueryEvents(ctx)
+ goch := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 100; i++ {
+ PublishQueryEvent(ctx, &QueryEvent{Extra: fmt.Sprint(i)})
+ }
+ close(goch)
+ for i := 100; i < 1000; i++ {
+ PublishQueryEvent(ctx, &QueryEvent{Extra: fmt.Sprint(i)})
+ }
+ }()
+ go func() {
+ defer wg.Done()
+ i := 0
+ for e := range events {
+ if i < 100 {
+ if e.Extra != fmt.Sprint(i) {
+ t.Errorf("expected %d, got %s", i, e.Extra)
+ }
+ }
+ i++
+ }
+ if i < 100 {
+ t.Errorf("expected at least 100 events, got %d", i)
+ }
+ }()
+ <-goch
+ cancel()
+ wg.Wait()
+}
diff --git a/core/routing/routing.go b/core/routing/routing.go
new file mode 100644
index 0000000000..bb8de71541
--- /dev/null
+++ b/core/routing/routing.go
@@ -0,0 +1,138 @@
+// Package routing provides interfaces for peer routing and content routing in libp2p.
+package routing
+
+import (
+ "context"
+ "errors"
+
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ cid "github.com/ipfs/go-cid"
+)
+
+// ErrNotFound is returned when the router fails to find the requested record.
+var ErrNotFound = errors.New("routing: not found")
+
+// ErrNotSupported is returned when the router doesn't support the given record
+// type/operation.
+var ErrNotSupported = errors.New("routing: operation or key not supported")
+
+// ContentProviding is able to announce where to find content on the Routing
+// system.
+type ContentProviding interface {
+ // Provide adds the given cid to the content routing system. If 'true' is
+ // passed, it also announces it, otherwise it is just kept in the local
+ // accounting of which objects are being provided.
+ Provide(context.Context, cid.Cid, bool) error
+}
+
+// ContentDiscovery is able to retrieve providers for a given CID using
+// the Routing system.
+type ContentDiscovery interface {
+ // Search for peers who are able to provide a given key
+ //
+ // When count is 0, this method will return an unbounded number of
+ // results.
+ FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.AddrInfo
+}
+
+// ContentRouting is a value provider layer of indirection. It is used to find
+// information about who has what content.
+//
+// Content is identified by CID (content identifier), which encodes a hash
+// of the identified content in a future-proof manner.
+type ContentRouting interface {
+ ContentProviding
+ ContentDiscovery
+}
+
+// PeerRouting is a way to find address information about certain peers.
+// This can be implemented by a simple lookup table, a tracking server,
+// or even a DHT.
+type PeerRouting interface {
+ // FindPeer searches for a peer with given ID, returns a peer.AddrInfo
+ // with relevant addresses.
+ FindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
+}
+
+// ValueStore is a basic Put/Get interface.
+type ValueStore interface {
+
+ // PutValue adds value corresponding to given Key.
+ PutValue(context.Context, string, []byte, ...Option) error
+
+ // GetValue searches for the value corresponding to given Key.
+ GetValue(context.Context, string, ...Option) ([]byte, error)
+
+ // SearchValue searches for better and better values from this value
+ // store corresponding to the given Key. By default, implementations must
+ // stop the search after a good value is found. A 'good' value is a value
+ // that would be returned from GetValue.
+ //
+ // Useful when you want a result *now* but still want to hear about
+ // better/newer results.
+ //
+ // Implementations of this methods won't return ErrNotFound. When a value
+ // couldn't be found, the channel will get closed without passing any results
+ SearchValue(context.Context, string, ...Option) (<-chan []byte, error)
+}
+
+// Routing is the combination of different routing types supported by libp2p.
+// It can be satisfied by a single item (such as a DHT) or multiple different
+// pieces that are more optimized to each task.
+type Routing interface {
+ ContentRouting
+ PeerRouting
+ ValueStore
+
+ // Bootstrap allows callers to hint to the routing system to get into a
+ // Bootstrapped state and remain there. It is not a synchronous call.
+ Bootstrap(context.Context) error
+
+ // TODO expose io.Closer or plain-old Close error
+}
+
+// PubKeyFetcher is an interfaces that should be implemented by value stores
+// that can optimize retrieval of public keys.
+//
+// TODO(steb): Consider removing, see https://github.com/libp2p/go-libp2p-routing/issues/22.
+type PubKeyFetcher interface {
+ // GetPublicKey returns the public key for the given peer.
+ GetPublicKey(context.Context, peer.ID) (ci.PubKey, error)
+}
+
+// KeyForPublicKey returns the key used to retrieve public keys
+// from a value store.
+func KeyForPublicKey(id peer.ID) string {
+ return "/pk/" + string(id)
+}
+
+// GetPublicKey retrieves the public key associated with the given peer ID from
+// the value store.
+//
+// If the ValueStore is also a PubKeyFetcher, this method will call GetPublicKey
+// (which may be better optimized) instead of GetValue.
+func GetPublicKey(r ValueStore, ctx context.Context, p peer.ID) (ci.PubKey, error) {
+ switch k, err := p.ExtractPublicKey(); err {
+ case peer.ErrNoPublicKey:
+ // check the datastore
+ case nil:
+ return k, nil
+ default:
+ return nil, err
+ }
+
+ if dht, ok := r.(PubKeyFetcher); ok {
+ // If we have a DHT as our routing system, use optimized fetcher
+ return dht.GetPublicKey(ctx, p)
+ }
+ key := KeyForPublicKey(p)
+ pkval, err := r.GetValue(ctx, key)
+ if err != nil {
+ return nil, err
+ }
+
+ // get PublicKey from node.Data
+ return ci.UnmarshalPublicKey(pkval)
+}
diff --git a/core/sec/insecure/insecure.go b/core/sec/insecure/insecure.go
new file mode 100644
index 0000000000..3377b320c2
--- /dev/null
+++ b/core/sec/insecure/insecure.go
@@ -0,0 +1,231 @@
+// Package insecure provides an insecure, unencrypted implementation of the SecureConn and SecureTransport interfaces.
+//
+// Recommended only for testing and other non-production usage.
+package insecure
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net"
+
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure/pb"
+
+ "github.com/libp2p/go-msgio"
+
+ "google.golang.org/protobuf/proto"
+)
+
+// ID is the multistream-select protocol ID that should be used when identifying
+// this security transport.
+const ID = "/plaintext/2.0.0"
+
+// Transport is a no-op stream security transport. It provides no
+// security and simply mocks the security methods. Identity methods
+// return the local peer's ID and private key, and whatever the remote
+// peer presents as their ID and public key.
+// No authentication of the remote identity is performed.
+type Transport struct {
+ id peer.ID
+ key ci.PrivKey
+ protocolID protocol.ID
+}
+
+var _ sec.SecureTransport = &Transport{}
+
+// NewWithIdentity constructs a new insecure transport. The public key is sent to
+// remote peers. No security is provided.
+func NewWithIdentity(protocolID protocol.ID, id peer.ID, key ci.PrivKey) *Transport {
+ return &Transport{
+ protocolID: protocolID,
+ id: id,
+ key: key,
+ }
+}
+
+// LocalPeer returns the transport's local peer ID.
+func (t *Transport) LocalPeer() peer.ID {
+ return t.id
+}
+
+// SecureInbound *pretends to secure* an inbound connection to the given peer.
+// It sends the local peer's ID and public key, and receives the same from the remote peer.
+// No validation is performed as to the authenticity or ownership of the provided public key,
+// and the key exchange provides no security.
+//
+// SecureInbound may fail if the remote peer sends an ID and public key that are inconsistent
+// with each other, or if a network error occurs during the ID exchange.
+func (t *Transport) SecureInbound(_ context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ conn := &Conn{
+ Conn: insecure,
+ local: t.id,
+ localPubKey: t.key.GetPublic(),
+ }
+
+ if err := conn.runHandshakeSync(); err != nil {
+ return nil, err
+ }
+
+ if p != "" && p != conn.remote {
+ return nil, fmt.Errorf("remote peer sent unexpected peer ID. expected=%s received=%s", p, conn.remote)
+ }
+
+ return conn, nil
+}
+
+// SecureOutbound *pretends to secure* an outbound connection to the given peer.
+// It sends the local peer's ID and public key, and receives the same from the remote peer.
+// No validation is performed as to the authenticity or ownership of the provided public key,
+// and the key exchange provides no security.
+//
+// SecureOutbound may fail if the remote peer sends an ID and public key that are inconsistent
+// with each other, or if the ID sent by the remote peer does not match the one dialed. It may
+// also fail if a network error occurs during the ID exchange.
+func (t *Transport) SecureOutbound(_ context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ conn := &Conn{
+ Conn: insecure,
+ local: t.id,
+ localPubKey: t.key.GetPublic(),
+ }
+
+ if err := conn.runHandshakeSync(); err != nil {
+ return nil, err
+ }
+
+ if p != conn.remote {
+ return nil, fmt.Errorf("remote peer sent unexpected peer ID. expected=%s received=%s",
+ p, conn.remote)
+ }
+
+ return conn, nil
+}
+
+func (t *Transport) ID() protocol.ID { return t.protocolID }
+
+// Conn is the connection type returned by the insecure transport.
+type Conn struct {
+ net.Conn
+
+ local, remote peer.ID
+ localPubKey, remotePubKey ci.PubKey
+}
+
+func makeExchangeMessage(pubkey ci.PubKey) (*pb.Exchange, error) {
+ keyMsg, err := ci.PublicKeyToProto(pubkey)
+ if err != nil {
+ return nil, err
+ }
+ id, err := peer.IDFromPublicKey(pubkey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &pb.Exchange{
+ Id: []byte(id),
+ Pubkey: keyMsg,
+ }, nil
+}
+
+func (ic *Conn) runHandshakeSync() error {
+ // If we were initialized without keys, behave as in plaintext/1.0.0 (do nothing)
+ if ic.localPubKey == nil {
+ return nil
+ }
+
+ // Generate an Exchange message
+ msg, err := makeExchangeMessage(ic.localPubKey)
+ if err != nil {
+ return err
+ }
+
+ // Send our Exchange and read theirs
+ remoteMsg, err := readWriteMsg(ic.Conn, msg)
+ if err != nil {
+ return err
+ }
+
+ // Pull remote ID and public key from message
+ remotePubkey, err := ci.PublicKeyFromProto(remoteMsg.Pubkey)
+ if err != nil {
+ return err
+ }
+
+ remoteID, err := peer.IDFromBytes(remoteMsg.Id)
+ if err != nil {
+ return err
+ }
+
+ // Validate that ID matches public key
+ if !remoteID.MatchesPublicKey(remotePubkey) {
+ calculatedID, _ := peer.IDFromPublicKey(remotePubkey)
+ return fmt.Errorf("remote peer id does not match public key. id=%s calculated_id=%s",
+ remoteID, calculatedID)
+ }
+
+ // Add remote ID and key to conn state
+ ic.remotePubKey = remotePubkey
+ ic.remote = remoteID
+ return nil
+}
+
+// read and write a message at the same time.
+func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) {
+ const maxMessageSize = 1 << 16
+
+ outBytes, err := proto.Marshal(out)
+ if err != nil {
+ return nil, err
+ }
+ wresult := make(chan error)
+ go func() {
+ w := msgio.NewVarintWriter(rw)
+ wresult <- w.WriteMsg(outBytes)
+ }()
+
+ r := msgio.NewVarintReaderSize(rw, maxMessageSize)
+ b, err1 := r.ReadMsg()
+
+ // Always wait for the read to finish.
+ err2 := <-wresult
+
+ if err1 != nil {
+ return nil, err1
+ }
+ if err2 != nil {
+ r.ReleaseMsg(b)
+ return nil, err2
+ }
+ inMsg := new(pb.Exchange)
+ err = proto.Unmarshal(b, inMsg)
+ return inMsg, err
+}
+
+// LocalPeer returns the local peer ID.
+func (ic *Conn) LocalPeer() peer.ID {
+ return ic.local
+}
+
+// RemotePeer returns the remote peer ID if we initiated the dial. Otherwise, it
+// returns "" (because this connection isn't actually secure).
+func (ic *Conn) RemotePeer() peer.ID {
+ return ic.remote
+}
+
+// RemotePublicKey returns whatever public key was given by the remote peer.
+// Note that no verification of ownership is done, as this connection is not secure.
+func (ic *Conn) RemotePublicKey() ci.PubKey {
+ return ic.remotePubKey
+}
+
+// ConnState returns the security connection's state information.
+func (ic *Conn) ConnState() network.ConnectionState {
+ return network.ConnectionState{}
+}
+
+var _ sec.SecureTransport = (*Transport)(nil)
+var _ sec.SecureConn = (*Conn)(nil)
diff --git a/core/sec/insecure/insecure_test.go b/core/sec/insecure/insecure_test.go
new file mode 100644
index 0000000000..f6ceba4124
--- /dev/null
+++ b/core/sec/insecure/insecure_test.go
@@ -0,0 +1,129 @@
+package insecure
+
+import (
+ "context"
+ "io"
+ "net"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+
+ "github.com/stretchr/testify/require"
+)
+
+// Run a set of sessions through the session setup and verification.
+func TestConnections(t *testing.T) {
+ clientTpt := newTestTransport(t, crypto.RSA, 2048)
+ serverTpt := newTestTransport(t, crypto.Ed25519, 1024)
+
+ clientConn, serverConn, clientErr, serverErr := connect(t, clientTpt, serverTpt, serverTpt.LocalPeer(), "")
+ require.NoError(t, clientErr)
+ require.NoError(t, serverErr)
+ testIDs(t, clientTpt, serverTpt, clientConn, serverConn)
+ testKeys(t, clientTpt, serverTpt, clientConn, serverConn)
+ testReadWrite(t, clientConn, serverConn)
+}
+
+func TestPeerIdMatchInbound(t *testing.T) {
+ clientTpt := newTestTransport(t, crypto.RSA, 2048)
+ serverTpt := newTestTransport(t, crypto.Ed25519, 1024)
+
+ clientConn, serverConn, clientErr, serverErr := connect(t, clientTpt, serverTpt, serverTpt.LocalPeer(), clientTpt.LocalPeer())
+ require.NoError(t, clientErr)
+ require.NoError(t, serverErr)
+ testIDs(t, clientTpt, serverTpt, clientConn, serverConn)
+ testKeys(t, clientTpt, serverTpt, clientConn, serverConn)
+ testReadWrite(t, clientConn, serverConn)
+}
+
+func TestPeerIDMismatchInbound(t *testing.T) {
+ clientTpt := newTestTransport(t, crypto.RSA, 2048)
+ serverTpt := newTestTransport(t, crypto.Ed25519, 1024)
+
+ _, _, _, serverErr := connect(t, clientTpt, serverTpt, serverTpt.LocalPeer(), "a-random-peer")
+ require.Error(t, serverErr)
+ require.Contains(t, serverErr.Error(), "remote peer sent unexpected peer ID")
+}
+
+func TestPeerIDMismatchOutbound(t *testing.T) {
+ clientTpt := newTestTransport(t, crypto.RSA, 2048)
+ serverTpt := newTestTransport(t, crypto.Ed25519, 1024)
+
+ _, _, clientErr, _ := connect(t, clientTpt, serverTpt, "a random peer", "")
+ require.Error(t, clientErr)
+ require.Contains(t, clientErr.Error(), "remote peer sent unexpected peer ID")
+}
+
+func newTestTransport(t *testing.T, typ, bits int) *Transport {
+ priv, pub, err := crypto.GenerateKeyPair(typ, bits)
+ require.NoError(t, err)
+ id, err := peer.IDFromPublicKey(pub)
+ require.NoError(t, err)
+ return NewWithIdentity("/test/1.0.0", id, priv)
+}
+
+// Create a new pair of connected TCP sockets.
+func newConnPair(t *testing.T) (net.Conn, net.Conn) {
+ lstnr, err := net.Listen("tcp", "localhost:0")
+ require.NoError(t, err, "failed to listen")
+
+ var clientErr error
+ var client net.Conn
+ done := make(chan struct{})
+
+ go func() {
+ defer close(done)
+ addr := lstnr.Addr()
+ client, clientErr = net.Dial(addr.Network(), addr.String())
+ }()
+
+ server, err := lstnr.Accept()
+ require.NoError(t, err, "failed to accept")
+
+ <-done
+ lstnr.Close()
+ require.NoError(t, clientErr, "failed to connect")
+ return client, server
+}
+
+func connect(t *testing.T, clientTpt, serverTpt *Transport, clientExpectsID, serverExpectsID peer.ID) (clientConn sec.SecureConn, serverConn sec.SecureConn, clientErr, serverErr error) {
+ client, server := newConnPair(t)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ clientConn, clientErr = clientTpt.SecureOutbound(context.TODO(), client, clientExpectsID)
+ }()
+ serverConn, serverErr = serverTpt.SecureInbound(context.TODO(), server, serverExpectsID)
+ <-done
+ return
+}
+
+// Check the peer IDs
+func testIDs(t *testing.T, clientTpt, serverTpt *Transport, clientConn, serverConn sec.SecureConn) {
+ t.Helper()
+ require.Equal(t, clientConn.LocalPeer(), clientTpt.LocalPeer(), "Client Local Peer ID mismatch.")
+ require.Equal(t, clientConn.RemotePeer(), serverTpt.LocalPeer(), "Client Remote Peer ID mismatch.")
+ require.Equal(t, clientConn.LocalPeer(), serverConn.RemotePeer(), "Server Local Peer ID mismatch.")
+}
+
+// Check the keys
+func testKeys(t *testing.T, clientTpt, serverTpt *Transport, clientConn, serverConn sec.SecureConn) {
+ t.Helper()
+ require.True(t, clientConn.RemotePublicKey().Equals(serverTpt.key.GetPublic()), "client conn key mismatch")
+ require.True(t, serverConn.RemotePublicKey().Equals(clientTpt.key.GetPublic()), "server conn key mismatch")
+}
+
+// Check sending and receiving messages
+func testReadWrite(t *testing.T, clientConn, serverConn sec.SecureConn) {
+ before := []byte("hello world")
+ _, err := clientConn.Write(before)
+ require.NoError(t, err)
+
+ after := make([]byte, len(before))
+ _, err = io.ReadFull(serverConn, after)
+ require.NoError(t, err)
+ require.Equal(t, before, after, "message mismatch")
+}
diff --git a/core/sec/insecure/pb/plaintext.pb.go b/core/sec/insecure/pb/plaintext.pb.go
new file mode 100644
index 0000000000..bb3ebf5438
--- /dev/null
+++ b/core/sec/insecure/pb/plaintext.pb.go
@@ -0,0 +1,134 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: core/sec/insecure/pb/plaintext.proto
+
+package pb
+
+import (
+ pb "github.com/libp2p/go-libp2p/core/crypto/pb"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Exchange struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Pubkey *pb.PublicKey `protobuf:"bytes,2,opt,name=pubkey" json:"pubkey,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Exchange) Reset() {
+ *x = Exchange{}
+ mi := &file_core_sec_insecure_pb_plaintext_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Exchange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exchange) ProtoMessage() {}
+
+func (x *Exchange) ProtoReflect() protoreflect.Message {
+ mi := &file_core_sec_insecure_pb_plaintext_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exchange.ProtoReflect.Descriptor instead.
+func (*Exchange) Descriptor() ([]byte, []int) {
+ return file_core_sec_insecure_pb_plaintext_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Exchange) GetId() []byte {
+ if x != nil {
+ return x.Id
+ }
+ return nil
+}
+
+func (x *Exchange) GetPubkey() *pb.PublicKey {
+ if x != nil {
+ return x.Pubkey
+ }
+ return nil
+}
+
+var File_core_sec_insecure_pb_plaintext_proto protoreflect.FileDescriptor
+
+const file_core_sec_insecure_pb_plaintext_proto_rawDesc = "" +
+ "\n" +
+ "$core/sec/insecure/pb/plaintext.proto\x12\fplaintext.pb\x1a\x1bcore/crypto/pb/crypto.proto\"H\n" +
+ "\bExchange\x12\x0e\n" +
+ "\x02id\x18\x01 \x01(\fR\x02id\x12,\n" +
+ "\x06pubkey\x18\x02 \x01(\v2\x14.crypto.pb.PublicKeyR\x06pubkeyB2Z0github.com/libp2p/go-libp2p/core/sec/insecure/pb"
+
+var (
+ file_core_sec_insecure_pb_plaintext_proto_rawDescOnce sync.Once
+ file_core_sec_insecure_pb_plaintext_proto_rawDescData []byte
+)
+
+func file_core_sec_insecure_pb_plaintext_proto_rawDescGZIP() []byte {
+ file_core_sec_insecure_pb_plaintext_proto_rawDescOnce.Do(func() {
+ file_core_sec_insecure_pb_plaintext_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_core_sec_insecure_pb_plaintext_proto_rawDesc), len(file_core_sec_insecure_pb_plaintext_proto_rawDesc)))
+ })
+ return file_core_sec_insecure_pb_plaintext_proto_rawDescData
+}
+
+var file_core_sec_insecure_pb_plaintext_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_core_sec_insecure_pb_plaintext_proto_goTypes = []any{
+ (*Exchange)(nil), // 0: plaintext.pb.Exchange
+ (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey
+}
+var file_core_sec_insecure_pb_plaintext_proto_depIdxs = []int32{
+ 1, // 0: plaintext.pb.Exchange.pubkey:type_name -> crypto.pb.PublicKey
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_core_sec_insecure_pb_plaintext_proto_init() }
+func file_core_sec_insecure_pb_plaintext_proto_init() {
+ if File_core_sec_insecure_pb_plaintext_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_core_sec_insecure_pb_plaintext_proto_rawDesc), len(file_core_sec_insecure_pb_plaintext_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_core_sec_insecure_pb_plaintext_proto_goTypes,
+ DependencyIndexes: file_core_sec_insecure_pb_plaintext_proto_depIdxs,
+ MessageInfos: file_core_sec_insecure_pb_plaintext_proto_msgTypes,
+ }.Build()
+ File_core_sec_insecure_pb_plaintext_proto = out.File
+ file_core_sec_insecure_pb_plaintext_proto_goTypes = nil
+ file_core_sec_insecure_pb_plaintext_proto_depIdxs = nil
+}
diff --git a/core/sec/insecure/pb/plaintext.proto b/core/sec/insecure/pb/plaintext.proto
new file mode 100644
index 0000000000..6b14764bec
--- /dev/null
+++ b/core/sec/insecure/pb/plaintext.proto
@@ -0,0 +1,12 @@
+syntax = "proto2";
+
+package plaintext.pb;
+
+import "core/crypto/pb/crypto.proto";
+
+option go_package = "github.com/libp2p/go-libp2p/core/sec/insecure/pb";
+
+message Exchange {
+ optional bytes id = 1;
+ optional crypto.pb.PublicKey pubkey = 2;
+}
diff --git a/core/sec/security.go b/core/sec/security.go
new file mode 100644
index 0000000000..d9e9183298
--- /dev/null
+++ b/core/sec/security.go
@@ -0,0 +1,43 @@
+// Package sec provides secure connection and transport interfaces for libp2p.
+package sec
+
+import (
+ "context"
+ "fmt"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// SecureConn is an authenticated, encrypted connection.
+type SecureConn interface {
+ net.Conn
+ network.ConnSecurity
+}
+
+// A SecureTransport turns inbound and outbound unauthenticated,
+// plain-text, native connections into authenticated, encrypted connections.
+type SecureTransport interface {
+ // SecureInbound secures an inbound connection.
+ // If p is empty, connections from any peer are accepted.
+ SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (SecureConn, error)
+
+ // SecureOutbound secures an outbound connection.
+ SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (SecureConn, error)
+
+ // ID is the protocol ID of the security protocol.
+ ID() protocol.ID
+}
+
+type ErrPeerIDMismatch struct {
+ Expected peer.ID
+ Actual peer.ID
+}
+
+func (e ErrPeerIDMismatch) Error() string {
+ return fmt.Sprintf("peer id mismatch: expected %s, but remote key matches %s", e.Expected, e.Actual)
+}
+
+var _ error = (*ErrPeerIDMismatch)(nil)
diff --git a/core/test/addrs.go b/core/test/addrs.go
new file mode 100644
index 0000000000..e18849c487
--- /dev/null
+++ b/core/test/addrs.go
@@ -0,0 +1,42 @@
+package test
+
+import (
+ "fmt"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func GenerateTestAddrs(n int) []ma.Multiaddr {
+ out := make([]ma.Multiaddr, n)
+ for i := 0; i < n; i++ {
+ a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i))
+ if err != nil {
+ continue
+ }
+ out[i] = a
+ }
+ return out
+}
+
+func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) {
+ t.Helper()
+ if len(exp) != len(act) {
+ t.Fatalf("lengths not the same. expected %d, got %d\n", len(exp), len(act))
+ }
+
+ for _, a := range exp {
+ found := false
+
+ for _, b := range act {
+ if a.Equal(b) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("expected address %s not found", a)
+ }
+ }
+}
diff --git a/core/test/crypto.go b/core/test/crypto.go
new file mode 100644
index 0000000000..ff09cfac44
--- /dev/null
+++ b/core/test/crypto.go
@@ -0,0 +1,21 @@
+package test
+
+import (
+ "math/rand"
+ "sync/atomic"
+
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+)
+
+var globalSeed atomic.Int64
+
+func RandTestKeyPair(typ, bits int) (ci.PrivKey, ci.PubKey, error) {
+ // workaround for low time resolution
+ seed := globalSeed.Add(1)
+ return SeededTestKeyPair(typ, bits, seed)
+}
+
+func SeededTestKeyPair(typ, bits int, seed int64) (ci.PrivKey, ci.PubKey, error) {
+ r := rand.New(rand.NewSource(seed))
+ return ci.GenerateKeyPairWithReader(typ, bits, r)
+}
diff --git a/core/test/errors.go b/core/test/errors.go
new file mode 100644
index 0000000000..82a3e696f2
--- /dev/null
+++ b/core/test/errors.go
@@ -0,0 +1,19 @@
+package test
+
+import (
+ "testing"
+)
+
+func AssertNilError(t *testing.T, err error) {
+ t.Helper()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+}
+
+func ExpectError(t *testing.T, err error, msg string) {
+ t.Helper()
+ if err == nil {
+ t.Error(msg)
+ }
+}
diff --git a/core/test/mockclock.go b/core/test/mockclock.go
new file mode 100644
index 0000000000..4916465586
--- /dev/null
+++ b/core/test/mockclock.go
@@ -0,0 +1,136 @@
+package test
+
+import (
+ "sort"
+ "sync"
+ "time"
+)
+
+type MockClock struct {
+ mu sync.Mutex
+ now time.Time
+ timers []*mockInstantTimer
+ advanceBySem chan struct{}
+}
+
+type mockInstantTimer struct {
+ c *MockClock
+ mu sync.Mutex
+ when time.Time
+ active bool
+ ch chan time.Time
+}
+
+func (t *mockInstantTimer) Ch() <-chan time.Time {
+ return t.ch
+}
+
+func (t *mockInstantTimer) Reset(d time.Time) bool {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ wasActive := t.active
+ t.active = true
+ t.when = d
+
+ // Schedule any timers that need to run. This will run this timer if t.when is before c.now
+ go t.c.AdvanceBy(0)
+
+ return wasActive
+}
+
+func (t *mockInstantTimer) Stop() bool {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ wasActive := t.active
+ t.active = false
+ return wasActive
+}
+
+func NewMockClock() *MockClock {
+ return &MockClock{now: time.Unix(0, 0), advanceBySem: make(chan struct{}, 1)}
+}
+
+// InstantTimer implements a timer that triggers at a fixed instant in time as opposed to after a
+// fixed duration from the moment of creation/reset.
+//
+// In test environments, when using a Timer which fires after a duration, there is a race between
+// the goroutine moving time forward using `clock.Advanceby` and the goroutine resetting the
+// timer by doing `timer.Reset(desiredInstant.Sub(time.Now()))`. The value of
+// `desiredInstance.sub(time.Now())` is different depending on whether `clock.AdvanceBy` finishes
+// before or after the timer reset.
+func (c *MockClock) InstantTimer(when time.Time) *mockInstantTimer {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ t := &mockInstantTimer{
+ c: c,
+ when: when,
+ ch: make(chan time.Time, 1),
+ active: true,
+ }
+ c.timers = append(c.timers, t)
+ return t
+}
+
+// Since implements autorelay.ClockWithInstantTimer
+func (c *MockClock) Since(t time.Time) time.Duration {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.now.Sub(t)
+}
+
+func (c *MockClock) Now() time.Time {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.now
+}
+
+func (c *MockClock) AdvanceBy(dur time.Duration) {
+ c.advanceBySem <- struct{}{}
+ defer func() { <-c.advanceBySem }()
+
+ c.mu.Lock()
+ now := c.now
+ endTime := c.now.Add(dur)
+ c.mu.Unlock()
+
+ // sort timers by when
+ if len(c.timers) > 1 {
+ sort.Slice(c.timers, func(i, j int) bool {
+ c.timers[i].mu.Lock()
+ c.timers[j].mu.Lock()
+ defer c.timers[i].mu.Unlock()
+ defer c.timers[j].mu.Unlock()
+ return c.timers[i].when.Before(c.timers[j].when)
+ })
+ }
+
+ for _, t := range c.timers {
+ t.mu.Lock()
+ if !t.active {
+ t.mu.Unlock()
+ continue
+ }
+ if !t.when.After(now) {
+ t.active = false
+ t.mu.Unlock()
+ // This may block if the channel is full, but that's intended. This way our mock clock never gets too far ahead of consumer.
+ // This also prevents us from dropping times because we're advancing too fast.
+ t.ch <- now
+ } else if !t.when.After(endTime) {
+ now = t.when
+ c.mu.Lock()
+ c.now = now
+ c.mu.Unlock()
+
+ t.active = false
+ t.mu.Unlock()
+ // This may block if the channel is full, but that's intended. See comment above
+ t.ch <- c.now
+ } else {
+ t.mu.Unlock()
+ }
+ }
+ c.mu.Lock()
+ c.now = endTime
+ c.mu.Unlock()
+}
diff --git a/core/test/mockclock_test.go b/core/test/mockclock_test.go
new file mode 100644
index 0000000000..5fa0d888af
--- /dev/null
+++ b/core/test/mockclock_test.go
@@ -0,0 +1,44 @@
+package test
+
+import (
+ "testing"
+ "time"
+)
+
+func TestMockClock(t *testing.T) {
+ cl := NewMockClock()
+ t1 := cl.InstantTimer(cl.Now().Add(2 * time.Second))
+ t2 := cl.InstantTimer(cl.Now().Add(time.Second))
+
+ // Advance the clock by 500ms
+ cl.AdvanceBy(time.Millisecond * 500)
+
+ // No event
+ select {
+ case <-t1.Ch():
+ t.Fatal("t1 fired early")
+ case <-t2.Ch():
+ t.Fatal("t2 fired early")
+ default:
+ }
+
+ // Advance the clock by 500ms
+ cl.AdvanceBy(time.Millisecond * 500)
+
+ // t2 fires
+ select {
+ case <-t1.Ch():
+ t.Fatal("t1 fired early")
+ case <-t2.Ch():
+ }
+
+ // Advance the clock by 2s
+ cl.AdvanceBy(time.Second * 2)
+
+ // t1 fires
+ select {
+ case <-t1.Ch():
+ case <-t2.Ch():
+ t.Fatal("t2 fired again")
+ }
+}
diff --git a/core/test/peer.go b/core/test/peer.go
new file mode 100644
index 0000000000..58d7041037
--- /dev/null
+++ b/core/test/peer.go
@@ -0,0 +1,25 @@
+package test
+
+import (
+ "crypto/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ mh "github.com/multiformats/go-multihash"
+)
+
+func RandPeerID() (peer.ID, error) {
+ buf := make([]byte, 16)
+ rand.Read(buf)
+ h, _ := mh.Sum(buf, mh.SHA2_256, -1)
+ return peer.ID(h), nil
+}
+
+func RandPeerIDFatal(t testing.TB) peer.ID {
+ p, err := RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return p
+}
diff --git a/core/transport/transport.go b/core/transport/transport.go
new file mode 100644
index 0000000000..e9e59b67ef
--- /dev/null
+++ b/core/transport/transport.go
@@ -0,0 +1,213 @@
+// Package transport provides the Transport interface, which represents
+// the devices and network protocols used to send and receive data.
+package transport
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// A CapableConn represents a connection that has offers the basic
+// capabilities required by libp2p: stream multiplexing, encryption and
+// peer authentication.
+//
+// These capabilities may be natively provided by the transport, or they
+// may be shimmed via the "connection upgrade" process, which converts a
+// "raw" network connection into one that supports such capabilities by
+// layering an encryption channel and a stream multiplexer.
+//
+// CapableConn provides accessors for the local and remote multiaddrs used to
+// establish the connection and an accessor for the underlying Transport.
+type CapableConn interface {
+ network.MuxedConn
+ network.ConnSecurity
+ network.ConnMultiaddrs
+ network.ConnScoper
+
+ // Transport returns the transport to which this connection belongs.
+ Transport() Transport
+}
+
+// Transport represents any device by which you can connect to and accept
+// connections from other peers.
+//
+// The Transport interface allows you to open connections to other peers
+// by dialing them, and also lets you listen for incoming connections.
+//
+// Connections returned by Dial and passed into Listeners are of type
+// CapableConn, which means that they have been upgraded to support
+// stream multiplexing and connection security (encryption and authentication).
+//
+// If a transport implements `io.Closer` (optional), libp2p will call `Close` on
+// shutdown. NOTE: `Dial` and `Listen` may be called after or concurrently with
+// `Close`.
+//
+// In addition to the Transport interface, transports may implement
+// Resolver or SkipResolver interface. When wrapping/embedding a transport, you should
+// ensure that the Resolver/SkipResolver interface is handled correctly.
+//
+// For a conceptual overview, see https://docs.libp2p.io/concepts/transport/
+type Transport interface {
+ // Dial dials a remote peer. It should try to reuse local listener
+ // addresses if possible, but it may choose not to.
+ Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (CapableConn, error)
+
+ // CanDial returns true if this transport knows how to dial the given
+ // multiaddr.
+ //
+ // Returning true does not guarantee that dialing this multiaddr will
+ // succeed. This function should *only* be used to preemptively filter
+ // out addresses that we can't dial.
+ CanDial(addr ma.Multiaddr) bool
+
+ // Listen listens on the passed multiaddr.
+ Listen(laddr ma.Multiaddr) (Listener, error)
+
+ // Protocol returns the set of protocols handled by this transport.
+ //
+ // See the Network interface for an explanation of how this is used.
+ Protocols() []int
+
+ // Proxy returns true if this is a proxy transport.
+ //
+ // See the Network interface for an explanation of how this is used.
+ // TODO: Make this a part of the go-multiaddr protocol instead?
+ Proxy() bool
+}
+
+// Resolver can be optionally implemented by transports that want to resolve or transform the
+// multiaddr.
+type Resolver interface {
+ Resolve(ctx context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error)
+}
+
+// SkipResolver can be optionally implemented by transports that don't want to
+// resolve or transform the multiaddr. Useful for transports that indirectly
+// wrap other transports (e.g. p2p-circuit). This lets the inner transport
+// specify how a multiaddr is resolved later.
+type SkipResolver interface {
+ SkipResolve(ctx context.Context, maddr ma.Multiaddr) bool
+}
+
+// Listener is an interface closely resembling the net.Listener interface. The
+// only real difference is that Accept() returns Conn's of the type in this
+// package, and also exposes a Multiaddr method as opposed to a regular Addr
+// method
+type Listener interface {
+ Accept() (CapableConn, error)
+ Close() error
+ Addr() net.Addr
+ Multiaddr() ma.Multiaddr
+}
+
+// ErrListenerClosed is returned by Listener.Accept when the listener is gracefully closed.
+var ErrListenerClosed = errors.New("listener closed")
+
+// TransportNetwork is an inet.Network with methods for managing transports.
+type TransportNetwork interface {
+ network.Network
+
+ // AddTransport adds a transport to this Network.
+ //
+ // When dialing, this Network will iterate over the protocols in the
+ // remote multiaddr and pick the first protocol registered with a proxy
+ // transport, if any. Otherwise, it'll pick the transport registered to
+ // handle the last protocol in the multiaddr.
+ //
+ // When listening, this Network will iterate over the protocols in the
+ // local multiaddr and pick the *last* protocol registered with a proxy
+ // transport, if any. Otherwise, it'll pick the transport registered to
+ // handle the last protocol in the multiaddr.
+ AddTransport(t Transport) error
+}
+
+// GatedMaListener is listener that listens for raw(unsecured and non-multiplexed) incoming connections,
+// gates them with a `connmgr.ConnGater`and creates a resource management scope for them.
+// It can be upgraded to a full libp2p transport listener by the Upgrader.
+//
+// Compared to manet.Listener, this listener creates the resource management scope for the accepted connection.
+type GatedMaListener interface {
+ // Accept waits for and returns the next connection to the listener.
+ Accept() (manet.Conn, network.ConnManagementScope, error)
+
+ // Close closes the listener.
+ // Any blocked Accept operations will be unblocked and return errors.
+ Close() error
+
+ // Multiaddr returns the listener's (local) Multiaddr.
+ Multiaddr() ma.Multiaddr
+
+ // Addr returns the net.Listener's network address.
+ Addr() net.Addr
+}
+
+// Upgrader is a multistream upgrader that can upgrade an underlying connection
+// to a full transport connection (secure and multiplexed).
+type Upgrader interface {
+ // UpgradeListener upgrades the passed multiaddr-net listener into a full libp2p-transport listener.
+ //
+ // Deprecated: Use UpgradeGatedMaListener(upgrader.GateMaListener(manet.Listener)) instead.
+ UpgradeListener(Transport, manet.Listener) Listener
+
+ // GateMaListener creates a GatedMaListener from a manet.Listener. It gates the accepted connection
+ // and creates a resource scope for it.
+ GateMaListener(manet.Listener) GatedMaListener
+
+ // UpgradeGatedMaListener upgrades the passed GatedMaListener into a full libp2p-transport listener.
+ UpgradeGatedMaListener(Transport, GatedMaListener) Listener
+
+ // Upgrade upgrades the multiaddr/net connection into a full libp2p-transport connection.
+ Upgrade(ctx context.Context, t Transport, maconn manet.Conn, dir network.Direction, p peer.ID, scope network.ConnManagementScope) (CapableConn, error)
+}
+
+// DialUpdater provides updates on in progress dials.
+type DialUpdater interface {
+ // DialWithUpdates dials a remote peer and provides updates on the passed channel.
+ DialWithUpdates(context.Context, ma.Multiaddr, peer.ID, chan<- DialUpdate) (CapableConn, error)
+}
+
+// DialUpdateKind indicates the type of DialUpdate event.
+type DialUpdateKind int
+
+const (
+ // UpdateKindDialFailed indicates dial failed.
+ UpdateKindDialFailed DialUpdateKind = iota
+ // UpdateKindDialSuccessful indicates dial succeeded.
+ UpdateKindDialSuccessful
+ // UpdateKindHandshakeProgressed indicates successful completion of the TCP 3-way
+ // handshake
+ UpdateKindHandshakeProgressed
+)
+
+func (k DialUpdateKind) String() string {
+ switch k {
+ case UpdateKindDialFailed:
+ return "DialFailed"
+ case UpdateKindDialSuccessful:
+ return "DialSuccessful"
+ case UpdateKindHandshakeProgressed:
+ return "UpdateKindHandshakeProgressed"
+ default:
+ return fmt.Sprintf("DialUpdateKind", k)
+ }
+}
+
+// DialUpdate is used by DialUpdater to provide dial updates.
+type DialUpdate struct {
+ // Kind is the kind of update event.
+ Kind DialUpdateKind
+ // Addr is the peer's address.
+ Addr ma.Multiaddr
+ // Conn is the resulting connection on success.
+ Conn CapableConn
+ // Err is the reason for dial failure.
+ Err error
+}
diff --git a/dashboards/README.md b/dashboards/README.md
new file mode 100644
index 0000000000..c3716859d0
--- /dev/null
+++ b/dashboards/README.md
@@ -0,0 +1,53 @@
+# Grafana Dashboards
+
+This directory contains prebuilt dashboards (provided as JSON files) for various components.
+For steps on how to import and use them [please read the official Grafana documentation.](https://grafana.com/docs/grafana/latest/dashboards/export-import/#import-dashboard)
+
+## Public dashboards
+
+You can check the following prebuilt dashboards in action:
+
+1. [AutoNAT](https://protocollabs.grafana.net/public-dashboards/fce8fdeb629742c89bd70f0ce38dfd97)
+2. [Auto Relay](https://protocollabs.grafana.net/public-dashboards/380d52aded12404e9cf6ceccb824b7f9)
+3. [Eventbus](https://protocollabs.grafana.net/public-dashboards/048029ac2d7e4a71b281ffea3535026e)
+4. [Identify](https://protocollabs.grafana.net/public-dashboards/96b70328253d47c0b352dfae06f12a1b)
+5. [Relay Service](https://protocollabs.grafana.net/public-dashboards/4a8cb5d245294893874ed65279b049be)
+6. [Swarm](https://protocollabs.grafana.net/public-dashboards/2bd3f1bee9964d40b6786fbe3eafd9fc)
+
+These metrics come from one of the public IPFS DHT [bootstrap nodes](https://docs.ipfs.tech/concepts/nodes/#bootstrap) run by Protocol Labs.
+At the time of writing (2023-08), these nodes handle many connections across various libp2p implementations, versions, and configurations (they don't handle large file transfers).
+
+## Using locally
+
+For local development and debugging, it can be useful to spin up a local Prometheus and Grafana instance.
+
+To expose metrics, we first need to expose a metrics collection endpoint. Add this to your code:
+
+```go
+import "github.com/prometheus/client_golang/prometheus/promhttp"
+
+go func() {
+ http.Handle("/debug/metrics/prometheus", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(":5001", nil))
+}()
+```
+
+This exposes a metrics collection endpoint at http://localhost:5001/debug/metrics/prometheus. Note that this is the same endpoint that [Kubo](https://github.com/ipfs/kubo) uses, so if you want to gather metrics from Kubo, you can skip this step.
+
+On macOS:
+```bash
+docker compose -f docker-compose.base.yml up
+```
+On Linux, dashboards can be inspected locally by running:
+```bash
+docker compose -f docker-compose.base.yml -f docker-compose-linux.yml up
+```
+
+and opening Grafana at http://localhost:3000.
+
+
+### Making Dashboards usable with Provisioning
+
+The following section is only relevant for creators of dashboards.
+
+Due to a bug in Grafana, it's not possible to provision dashboards shared for external use directly. We need to apply the workaround described in https://github.com/grafana/grafana/issues/10786#issuecomment-568788499 (adding a few lines in the dashboard JSON file).
diff --git a/dashboards/autonat/autonat.json b/dashboards/autonat/autonat.json
new file mode 100644
index 0000000000..fea5b7ac62
--- /dev/null
+++ b/dashboards/autonat/autonat.json
@@ -0,0 +1,737 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "state-timeline",
+ "name": "State timeline",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "custom": {
+ "fillOpacity": 70,
+ "lineWidth": 0,
+ "spanNulls": true
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "yellow",
+ "index": 0,
+ "text": "Unknown"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "Public"
+ },
+ "2": {
+ "color": "purple",
+ "index": 2,
+ "text": "Private"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 18,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "alignValue": "center",
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "mergeValues": true,
+ "rowHeight": 0.9,
+ "showValue": "auto",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "builder",
+ "expr": "libp2p_autonat_reachability_status{instance=~\"$instance\"}",
+ "legendFormat": " ",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Reachability Status",
+ "type": "state-timeline"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "max": 3,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 6,
+ "x": 18,
+ "y": 0
+ },
+ "id": 10,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "builder",
+ "expr": "libp2p_autonat_reachability_status_confidence{instance=~\"$instance\"}",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Reachability status confidence",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "dateTimeFromNow"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 3,
+ "x": 0,
+ "y": 7
+ },
+ "id": 12,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "/^Value$/",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "libp2p_autonat_next_probe_timestamp{instance=~\"$instance\"} * 1000",
+ "format": "table",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Next Probe Time",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "dial error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "dial refused"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ok"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 21,
+ "x": 3,
+ "y": 7
+ },
+ "id": 4,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(libp2p_autonat_received_dial_response_total{instance=~\"$instance\"}[$__rate_interval])",
+ "instant": false,
+ "legendFormat": "{{response_status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Received Dial Responses",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Responses sent to peers that are asking us to dial them",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "dial refused"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "dial error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ok"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 16
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_autonat_outgoing_dial_response_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{response_status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Outgoing Dial Responses",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "There are multiple reasons why we'd refuse a dial-back request from a remote node:\n* rate limiting\n* no valid addresses\n* dial blocked by policy",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "rate limited"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 16
+ },
+ "id": 14,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_autonat_outgoing_dial_refused_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{refusal_reason}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Outgoing Dial Refused",
+ "type": "timeseries"
+ }
+ ],
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-6h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p autonat",
+ "uid": "YNWSyiJ4k",
+ "version": 5,
+ "weekStart": ""
+}
diff --git a/dashboards/autonatv2/autonatv2.json b/dashboards/autonatv2/autonatv2.json
new file mode 100644
index 0000000000..82697edc26
--- /dev/null
+++ b/dashboards/autonatv2/autonatv2.json
@@ -0,0 +1,713 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 5,
+ "links": [],
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "editorMode": "code",
+ "expr": "sum (increase(libp2p_autonatv2_client_requests_completed_total{instance=~\"$instance\",dial_refused=\"false\"}[5m])) by (instance, ip_or_dns_version, transport, reachability)",
+ "legendFormat": "{{instance}} {{ip_or_dns_version}} {{transport}} {{reachability}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Requests by Reachability",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "editorMode": "code",
+ "expr": "increase(libp2p_autonatv2_client_requests_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{instance}} {{outcome}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "editorMode": "code",
+ "expr": "sum (increase(libp2p_autonatv2_client_requests_completed_total{instance=~\"$instance\", dial_refused=\"true\"}[$__rate_interval])) by (instance)",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{instance}} refused",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "All Requests",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 4,
+ "panels": [],
+ "title": "Server",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic",
+ "seriesBy": "last"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "E_DIAL_REFUSED"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 1,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (response_status) (increase(libp2p_autonatv2_requests_completed_total{instance=~\"$instance\", server_error=\"nil\"}[$__rate_interval]))\n",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Dial Request by Response Status",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic",
+ "seriesBy": "last"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "E_DIAL_REFUSED"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "E_DIAL_ERROR"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 2,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (ip_or_dns_version, transport, dial_status) (increase(libp2p_autonatv2_requests_completed_total{instance=~\"$instance\", server_error=\"nil\", response_status=\"OK\"}[$__rate_interval]))\n",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Dial Request by Dial Status",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic",
+ "seriesBy": "last"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "OK"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "E_DIAL_REFUSED"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "E_DIAL_ERROR"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 5,
+ "y": 17
+ },
+ "id": 3,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "hideZeros": false,
+ "maxHeight": 600,
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${data_source}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (server_error) (increase(libp2p_autonatv2_requests_completed_total{instance=~\"$instance\", server_error!=\"nil\"}[$__rate_interval]))\n",
+ "instant": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Dial Request Errors",
+ "type": "timeseries"
+ }
+ ],
+ "preload": false,
+ "refresh": "",
+ "schemaVersion": 41,
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": "All",
+ "value": [
+ "$__all"
+ ]
+ },
+ "definition": "label_values(up,instance)",
+ "includeAll": true,
+ "label": "instance",
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "qryType": 1,
+ "query": "label_values(up,instance)",
+ "refId": "PrometheusVariableQueryEditor-VariableQuery"
+ },
+ "refresh": 1,
+ "regex": "",
+ "type": "query"
+ },
+ {
+ "current": {
+ "text": "prometheus",
+ "value": "${data_source}"
+ },
+ "includeAll": false,
+ "name": "data_source",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "browser",
+ "title": "go-libp2p autoNATv2",
+ "uid": "cdpusyp3xtfcwa",
+ "version": 9
+}
\ No newline at end of file
diff --git a/dashboards/autorelay/autorelay.json b/dashboards/autorelay/autorelay.json
new file mode 100644
index 0000000000..9b558ee6b3
--- /dev/null
+++ b/dashboards/autorelay/autorelay.json
@@ -0,0 +1,1080 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.4.7"
+ },
+ {
+ "type": "panel",
+ "id": "piechart",
+ "name": "Pie chart",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "state-timeline",
+ "name": "State timeline",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 16,
+ "panels": [],
+ "title": "Status",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "custom": {
+ "fillOpacity": 70,
+ "lineWidth": 0,
+ "spanNulls": true
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "purple",
+ "index": 1,
+ "text": "No"
+ },
+ "1": {
+ "color": "green",
+ "index": 0,
+ "text": "Yes"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 24,
+ "x": 0,
+ "y": 1
+ },
+ "id": 2,
+ "options": {
+ "alignValue": "center",
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "mergeValues": true,
+ "rowHeight": 0.9,
+ "showValue": "auto",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_autorelay_status{instance=~\"$instance\"}",
+ "legendFormat": "active",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Relay Finder Status",
+ "type": "state-timeline"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 6
+ },
+ "id": 18,
+ "panels": [],
+ "title": "Reservations",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 7
+ },
+ "id": 4,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.4.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "libp2p_autorelay_reservations_opened_total{instance=~\"$instance\"} - libp2p_autorelay_reservations_closed_total{instance=~\"$instance\"}",
+ "instant": true,
+ "legendFormat": "current reservations",
+ "range": false,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "libp2p_autorelay_desired_reservations{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "instant": true,
+ "legendFormat": "desired",
+ "range": false,
+ "refId": "config-query"
+ }
+ ],
+ "title": "Current Reservations",
+ "transformations": [
+ {
+ "id": "configFromData",
+ "options": {
+ "applyTo": {
+ "id": "byFrameRefID",
+ "options": "A"
+ },
+ "configRefId": "config-query",
+ "mappings": [
+ {
+ "fieldName": "desired",
+ "handlerKey": "max"
+ },
+ {
+ "fieldName": "Time",
+ "handlerKey": "__ignore"
+ }
+ ]
+ }
+ }
+ ],
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "new: success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "refresh: success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 18,
+ "x": 6,
+ "y": 7
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "increase(libp2p_autorelay_reservation_requests_outcome_total{instance=~\"$instance\"}[$__rate_interval])",
+ "format": "time_series",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{request_type}}: {{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Reservation Requests Outcome",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 15
+ },
+ "id": 20,
+ "panels": [],
+ "title": "Candidates",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "fixed"
+ },
+ "custom": {
+ "fillOpacity": 70,
+ "lineWidth": 0,
+ "spanNulls": false
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "purple",
+ "index": 0,
+ "text": "peer source rate limited"
+ },
+ "1": {
+ "color": "blue",
+ "index": 1,
+ "text": "waiting on peer chan"
+ },
+ "2": {
+ "color": "green",
+ "index": 2,
+ "text": "waiting for trigger"
+ },
+ "3": {
+ "color": "light-yellow",
+ "index": 3,
+ "text": "stopped"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 28,
+ "options": {
+ "alignValue": "center",
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "mergeValues": true,
+ "rowHeight": 0.9,
+ "showValue": "auto",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "libp2p_autorelay_candidate_loop_state{instance=~\"$instance\"}",
+ "instant": false,
+ "legendFormat": "state",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Candidate Loop State",
+ "type": "state-timeline"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 1,
+ "y": 22
+ },
+ "id": 6,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.4.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_autorelay_candidates_total{type=\"added\",instance=~\"$instance\"} - ignoring(type) libp2p_autorelay_candidates_total{type=\"removed\",instance=~\"$instance\"}",
+ "legendFormat": "num candidates",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Current Candidates",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "yes"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "no"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 5,
+ "x": 6,
+ "y": 22
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_autorelay_candidates_circuit_v2_support_total{instance=~\"$instance\"}[$__range])",
+ "legendFormat": "{{support}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Candidates Circuit V2 Support",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "mappings": [
+ {
+ "options": {
+ "from": -9223372036854776000,
+ "result": {
+ "index": 0,
+ "text": "-"
+ },
+ "to": -86400
+ },
+ "type": "range"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "dateTimeFromNow"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 11,
+ "y": 22
+ },
+ "id": 26,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "center",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.4.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "libp2p_autorelay_scheduled_work_time{work_type=\"old candidate check\",instance=~\"$instance\"} * 1000\n",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Next Old Candidate Check",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "mappings": [
+ {
+ "options": {
+ "from": -86399,
+ "result": {
+ "index": 0,
+ "text": "immediately"
+ },
+ "to": 0
+ },
+ "type": "range"
+ },
+ {
+ "options": {
+ "from": -9223372036854776000,
+ "result": {
+ "index": 1,
+ "text": "-"
+ },
+ "to": -86400
+ },
+ "type": "range"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 17,
+ "y": 22
+ },
+ "id": 30,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.4.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "round(libp2p_autorelay_scheduled_work_time{work_type=\"allowed peer source call\",instance=~\"$instance\"} - time()) ",
+ "instant": true,
+ "legendFormat": "__auto",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Next Allowed Call to peer source",
+ "type": "stat"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 30
+ },
+ "id": 22,
+ "panels": [],
+ "title": "Relay Addresses",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 31
+ },
+ "id": 8,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.4.7",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_autorelay_relay_addresses_count{instance=~\"$instance\"}",
+ "legendFormat": "num addresses",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Relay Addresses",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "update triggered"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 18,
+ "x": 6,
+ "y": 31
+ },
+ "id": 14,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_autorelay_relay_addresses_updated_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "update triggered",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Relay Addresses Updated",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "",
+ "revision": 1,
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "libp2p Autorelay",
+ "uid": "deQ_uf-4k",
+ "version": 6,
+ "weekStart": ""
+}
diff --git a/dashboards/dashboard.yml b/dashboards/dashboard.yml
new file mode 100644
index 0000000000..b0a81133f3
--- /dev/null
+++ b/dashboards/dashboard.yml
@@ -0,0 +1,12 @@
+apiVersion: 1
+
+providers:
+ - name: "libp2p dashboard provider"
+ orgId: 1
+ type: file
+ disableDeletion: false
+ updateIntervalSeconds: 10
+ allowUiUpdates: false
+ options:
+ path: /var/lib/grafana/dashboards
+ foldersFromFilesStructure: true
diff --git a/dashboards/datasources.yml b/dashboards/datasources.yml
new file mode 100644
index 0000000000..ed47ec11a1
--- /dev/null
+++ b/dashboards/datasources.yml
@@ -0,0 +1,13 @@
+apiVersion: 1
+
+deleteDatasources:
+ - name: Prometheus
+ orgId: 1
+
+datasources:
+ - name: Prometheus
+ orgId: 1
+ type: prometheus
+ access: proxy
+ url: http://prometheus:9090
+ editable: false
diff --git a/dashboards/docker-compose-linux.yml b/dashboards/docker-compose-linux.yml
new file mode 100644
index 0000000000..da9293249e
--- /dev/null
+++ b/dashboards/docker-compose-linux.yml
@@ -0,0 +1,12 @@
+version: "3.7"
+services:
+ prometheus:
+ network_mode: "host"
+ extra_hosts:
+ # define a host.docker.internal alias, so we can use the same prometheus.yml on Linux and macOS
+ - "host.docker.internal:127.0.0.1"
+ grafana:
+ network_mode: "host"
+ extra_hosts:
+ # define a prometheus alias, so we can use the same datasources.yml on Linux and macOS
+ - "prometheus:127.0.0.1"
diff --git a/dashboards/docker-compose.base.yml b/dashboards/docker-compose.base.yml
new file mode 100644
index 0000000000..b46514af93
--- /dev/null
+++ b/dashboards/docker-compose.base.yml
@@ -0,0 +1,29 @@
+version: "3.7"
+services:
+ prometheus:
+ image: prom/prometheus:latest
+ ports:
+ - "9090:9090"
+ volumes:
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml
+ grafana:
+ image: grafana/grafana:latest
+ depends_on:
+ - prometheus
+ ports:
+ - "3000:3000"
+ environment:
+ - GF_AUTH_DISABLE_LOGIN_FORM=true
+ - GF_AUTH_ANONYMOUS_ENABLED=true
+ - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
+ volumes:
+ - ./dashboard.yml:/etc/grafana/provisioning/dashboards/main.yml
+ - ./datasources.yml:/etc/grafana/provisioning/datasources/prom.yml
+ - ./autonat/autonat.json:/var/lib/grafana/dashboards/autonat.json
+ - ./autorelay/autorelay.json:/var/lib/grafana/dashboards/autorelay.json
+ - ./eventbus/eventbus.json:/var/lib/grafana/dashboards/eventbus.json
+ - ./holepunch/holepunch.json:/var/lib/grafana/dashboards/holepunch.json
+ - ./identify/identify.json:/var/lib/grafana/dashboards/identify.json
+ - ./relaysvc/relaysvc.json:/var/lib/grafana/dashboards/relaysvc.json
+ - ./swarm/swarm.json:/var/lib/grafana/dashboards/swarm.json
+ - ./resource-manager/resource-manager.json:/var/lib/grafana/dashboards/resource-manager.json
diff --git a/dashboards/eventbus/eventbus.json b/dashboards/eventbus/eventbus.json
new file mode 100644
index 0000000000..f51549aa86
--- /dev/null
+++ b/dashboards/eventbus/eventbus.json
@@ -0,0 +1,561 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "state-timeline",
+ "name": "State timeline",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "rate(libp2p_eventbus_events_emitted_total{instance=~\"$instance\"}[$__rate_interval])",
+ "format": "time_series",
+ "instant": false,
+ "interval": "",
+ "legendFormat": "{{event}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Events Types emitted",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 6,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true,
+ "text": {
+ "titleSize": 14
+ }
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "builder",
+ "expr": "libp2p_eventbus_subscribers_total{instance=~\"$instance\"}",
+ "legendFormat": "{{event}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Event Subscribers",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 15
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "builder",
+ "expr": "rate(libp2p_eventbus_subscriber_event_queued{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{subscriber_name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Events emitted By Subscriber",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "Event Subscribers need to consume events quickly enough, otherwise they risk stalling the libp2p process.\nSubscribers use a buffered channel to catch temporary bursts. A queue length that doesn't return to 0 might be indicative of a problem.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 15
+ },
+ "id": 8,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "builder",
+ "expr": "libp2p_eventbus_subscriber_queue_length{instance=~\"$instance\"}",
+ "hide": false,
+ "legendFormat": "{{subscriber_name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Subscriber Queue Length",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "When the subscriber event queue fills up, it blocks the libp2p process. This can be mitigated by\n1. consuming events quickly enough on the subscriber side\n2. using a large enough buffer to absorb bursts",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "custom": {
+ "fillOpacity": 75,
+ "lineWidth": 0,
+ "spanNulls": 60000
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "green",
+ "index": 0,
+ "text": "OK"
+ },
+ "1": {
+ "color": "red",
+ "index": 1,
+ "text": "FULL"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "#73BF69",
+ "value": null
+ }
+ ]
+ },
+ "unit": "string"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 23,
+ "x": 0,
+ "y": 23
+ },
+ "id": 10,
+ "options": {
+ "alignValue": "center",
+ "legend": {
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": false
+ },
+ "mergeValues": true,
+ "rowHeight": 0.94,
+ "showValue": "always",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "builder",
+ "exemplar": false,
+ "expr": "libp2p_eventbus_subscriber_queue_full{instance=~\"$instance\"}",
+ "format": "time_series",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "{{subscriber_name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Subscriber Queue Full",
+ "type": "state-timeline"
+ }
+ ],
+ "refresh": false,
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p EventBus",
+ "uid": "ZFbI6NAVn",
+ "version": 4,
+ "weekStart": ""
+}
diff --git a/dashboards/holepunch/holepunch.json b/dashboards/holepunch/holepunch.json
new file mode 100644
index 0000000000..7d986f72e6
--- /dev/null
+++ b/dashboards/holepunch/holepunch.json
@@ -0,0 +1,1136 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "panel",
+ "id": "piechart",
+ "name": "Pie chart",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 8,
+ "panels": [],
+ "title": "DCUtR Initiator",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "id": 19,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum by (outcome) (increase(libp2p_holepunch_address_outcomes_total {side=\"initiator\", transport=\"tcp\", outcome=~\"success|failed\"}[$__range]))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches: TCP",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 1
+ },
+ "id": 20,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (outcome) (increase(libp2p_holepunch_address_outcomes_total {side=\"initiator\", transport=~\"quic|quic-v1\", outcome=~\"success|failed\"}[$__range]))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches: QUIC",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "no_suitable_address"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (outcome) (increase(libp2p_holepunch_outcomes_total{side=\"initiator\",instance=~\"$instance\"}[$__range]))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches: Total",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 4,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_holepunch_direct_dials_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Direct dials",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "no_suitable_address"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 23,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (outcome) (libp2p_holepunch_outcomes_total{side=\"initiator\",instance=~\"$instance\"}) - (sum by (outcome)(libp2p_holepunch_outcomes_total{side=\"initiator\",instance=~\"$instance\"} offset $__interval) or vector(0))",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 14,
+ "panels": [],
+ "title": "DCUtR Receiver",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 26
+ },
+ "id": 18,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (outcome) (increase(libp2p_holepunch_address_outcomes_total {side=\"receiver\", transport=~\"quic|quic-v1\", outcome=~\"success|failed\"}[$__range]))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches: QUIC ",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "id": 21,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (outcome) (increase(libp2p_holepunch_address_outcomes_total {side=\"receiver\", transport=\"tcp\", outcome=~\"success|failed\"}[$__range]))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches: TCP",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "no_suitable_address"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 34
+ },
+ "id": 17,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (outcome) (increase(libp2p_holepunch_outcomes_total{side=\"receiver\",instance=~\"$instance\"}[$__range]))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches: Total",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "success"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "failed"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "no_suitable_address"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 34
+ },
+ "id": 24,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "(sum by (outcome) (libp2p_holepunch_outcomes_total{side=\"receiver\",instance=~\"$instance\"})) - (sum by (outcome)(libp2p_holepunch_outcomes_total{side=\"receiver\",instance=~\"$instance\"} offset $__interval) or vector(0))",
+ "legendFormat": "{{outcome}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Hole punches",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "",
+ "revision": 1,
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "definition": "label_values(instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p Hole Punches",
+ "uid": "Ao24vOBVk",
+ "version": 6,
+ "weekStart": ""
+}
diff --git a/dashboards/host-addrs/host-addrs.json b/dashboards/host-addrs/host-addrs.json
new file mode 100644
index 0000000000..6a79282a01
--- /dev/null
+++ b/dashboards/host-addrs/host-addrs.json
@@ -0,0 +1,279 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 16,
+ "links": [],
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "bdpgk86mw6jgga"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 1,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "bdpgk86mw6jgga"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_host_addrs_reachable{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}} {{ipv}}, {{transport}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Reachable Addrs",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "bdpgk86mw6jgga"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 2,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "bdpgk86mw6jgga"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_host_addrs_unreachable{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}} {{ipv}}, {{transport}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Unreachable Addrs",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "bdpgk86mw6jgga"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "percentChangeColorMode": "standard",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showPercentChange": false,
+ "textMode": "auto",
+ "wideLayout": true
+ },
+ "pluginVersion": "11.6.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "bdpgk86mw6jgga"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_host_addrs_unknown{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}} {{ipv}}, {{transport}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Unknown Reachability Addrs",
+ "type": "stat"
+ }
+ ],
+ "preload": false,
+ "schemaVersion": 41,
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "definition": "label_values(up,instance)",
+ "includeAll": true,
+ "label": "instance",
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "qryType": 1,
+ "query": "label_values(up,instance)",
+ "refId": "PrometheusVariableQueryEditor-VariableQuery"
+ },
+ "refresh": 1,
+ "regex": "",
+ "type": "query"
+ },
+ {
+ "current": {
+ "text": "prometheus",
+ "value": "bdpgk86mw6jgga"
+ },
+ "includeAll": false,
+ "name": "data_source",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-6h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "browser",
+ "title": "go-libp2p Host Addresses",
+ "uid": "beon8z59rh7nkf",
+ "version": 5
+}
\ No newline at end of file
diff --git a/dashboards/identify/identify.json b/dashboards/identify/identify.json
new file mode 100644
index 0000000000..4f4631cfa9
--- /dev/null
+++ b/dashboards/identify/identify.json
@@ -0,0 +1,884 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "panel",
+ "id": "piechart",
+ "name": "Pie chart",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_identify_identify_pushes_triggered_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{trigger}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Pushes triggered",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 0
+ },
+ "id": 4,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "rate(libp2p_identify_identify_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{dir}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Identify Messages",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "address count"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ "id": 9,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "libp2p_identify_addrs_count{instance=~\"$instance\"}",
+ "legendFormat": "address count",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Outgoing Address Count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "protocols count"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 6,
+ "y": 9
+ },
+ "id": 17,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_identify_protocols_count{instance=~\"$instance\"}",
+ "legendFormat": "protocols count",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Outgoing Protocols Count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "rate(libp2p_identify_identify_push_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{dir}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Identify Push Messages",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "address count"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 11,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.5, sum(rate(libp2p_identify_addrs_received_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "legendFormat": "50th percentile",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum(rate(libp2p_identify_addrs_received_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "90th percentile",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum(rate(libp2p_identify_addrs_received_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "99 percentile",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Incoming Address Count (Avg)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "protocols count"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 13,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.5, sum(rate(libp2p_identify_protocols_received_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "legendFormat": "50th percentile",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum(rate(libp2p_identify_protocols_received_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "90th percentile",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.99, sum(rate(libp2p_identify_protocols_received_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "99th percentile",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Incoming Protocols Count (Avg)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 15,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_identify_conn_push_support_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{support}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Connections: Push Support",
+ "type": "piechart"
+ }
+ ],
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p Identify",
+ "uid": "0NDzQQ0Vz",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/dashboards/prometheus.yml b/dashboards/prometheus.yml
new file mode 100644
index 0000000000..89534d55dd
--- /dev/null
+++ b/dashboards/prometheus.yml
@@ -0,0 +1,30 @@
+global:
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ evaluation_interval: 15s
+alerting:
+ alertmanagers:
+ - scheme: http
+ timeout: 10s
+ api_version: v2
+ static_configs:
+ - targets: []
+scrape_configs:
+- job_name: prometheus
+ honor_timestamps: true
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ metrics_path: /metrics
+ scheme: http
+ static_configs:
+ - targets:
+ - localhost:9090
+- job_name: libp2p
+ honor_timestamps: true
+ scrape_interval: 15s
+ scrape_timeout: 10s
+ metrics_path: /debug/metrics/prometheus
+ scheme: http
+ static_configs:
+ - targets:
+ - host.docker.internal:5001
diff --git a/dashboards/relaysvc/relaysvc.json b/dashboards/relaysvc/relaysvc.json
new file mode 100644
index 0000000000..c2a0e224cb
--- /dev/null
+++ b/dashboards/relaysvc/relaysvc.json
@@ -0,0 +1,1236 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "state-timeline",
+ "name": "State timeline",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 20,
+ "panels": [],
+ "title": "Relay Service",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "custom": {
+ "fillOpacity": 70,
+ "lineWidth": 0,
+ "spanNulls": false
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "purple",
+ "index": 0,
+ "text": "no"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "yes"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 24,
+ "x": 0,
+ "y": 1
+ },
+ "id": 2,
+ "options": {
+ "alignValue": "center",
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "mergeValues": true,
+ "rowHeight": 0.9,
+ "showValue": "auto",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_relaysvc_status{instance=~\"$instance\"}",
+ "legendFormat": "active",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Status",
+ "type": "state-timeline"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 22,
+ "panels": [],
+ "title": "Reservations",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "index": 0,
+ "text": "0"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 9
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_relaysvc_reservations_total{type=\"opened\",instance=~\"$instance\"} - ignoring(type) libp2p_relaysvc_reservations_total{type=\"closed\",instance=~\"$instance\"}",
+ "legendFormat": "active reservations",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Active Reservations",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ok"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 18,
+ "x": 6,
+ "y": 9
+ },
+ "id": 8,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_reservation_request_response_status_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Reservation Request Response Status",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 26,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_reservations_total{type=\"opened\",instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "new",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_reservations_total{type=\"renewed\",instance=~\"$instance\"}[$__rate_interval])",
+ "hide": false,
+ "legendFormat": "renewed",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Reservation Requests: New vs Renewal",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_reservation_rejections_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{reason}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Reservation Request Rejected",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 24,
+ "panels": [],
+ "title": "Connections",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 0,
+ "y": 26
+ },
+ "id": 28,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_data_transferred_bytes_total{instance=~\"$instance\"}[$__range])",
+ "legendFormat": "data transferred",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Total Data Transferred",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ },
+ "mappings": [
+ {
+ "options": {
+ "match": "null",
+ "result": {
+ "index": 0,
+ "text": "0"
+ }
+ },
+ "type": "special"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 6,
+ "x": 6,
+ "y": 26
+ },
+ "id": 6,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_relaysvc_connections_total{type=\"opened\",instance=~\"$instance\"} - ignoring(type) libp2p_relaysvc_connections_total{type=\"closed\",instance=~\"$instance\"}",
+ "legendFormat": "active connections",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Active Connections",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "error"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "error"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ok"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_connection_request_response_status_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{status}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Connection Request Response Status",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "Bps"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "bandwidth"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "bandwidth"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 34
+ },
+ "id": 16,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "2 * rate(libp2p_relaysvc_data_transferred_bytes_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "bandwidth",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Bandwidth Used",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 34
+ },
+ "id": 14,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_relaysvc_connection_rejections_total{instance=~\"$instance\"}[$__rate_interval])",
+ "legendFormat": "{{reason}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Connection Request Rejected",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 42
+ },
+ "id": 18,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "rate(libp2p_relaysvc_connection_duration_seconds_sum{instance=~\"$instance\"}[$__range])/rate(libp2p_relaysvc_connection_duration_seconds_count{instance=~\"$instance\"}[$__range])\n",
+ "hide": false,
+ "legendFormat": "rolling average",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Connection Duration",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": "1m",
+ "schemaVersion": 37,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-1h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p Relay Service",
+ "uid": "C6RUfAx4z",
+ "version": 5,
+ "weekStart": ""
+}
diff --git a/dashboards/resource-manager/README.md b/dashboards/resource-manager/README.md
new file mode 100644
index 0000000000..cded715679
--- /dev/null
+++ b/dashboards/resource-manager/README.md
@@ -0,0 +1,17 @@
+# Ready to go Grafana Dashboard
+
+Here are some prebuilt dashboards that you can add to your Grafana instance. To
+import follow the Grafana docs [here](https://grafana.com/docs/grafana/latest/dashboards/export-import/#import-dashboard)
+
+## Setup
+
+Metrics are enabled by default. By default, metrics will be sent to
+`prometheus.DefaultRegisterer`. To use a different Registerer use the libp2p
+option `libp2p.PrometheusRegisterer`.
+
+## Updating Dashboard json
+
+Use the share functionality on an existing dashboard, and make sure to toggle
+"Export for sharing externally". See the [Grafana
+Docs](https://grafana.com/docs/grafana/latest/dashboards/export-import/#exporting-a-dashboard)
+for more details.
diff --git a/dashboards/resource-manager/resource-manager.json b/dashboards/resource-manager/resource-manager.json
new file mode 100644
index 0000000000..9b4ff68314
--- /dev/null
+++ b/dashboards/resource-manager/resource-manager.json
@@ -0,0 +1,1828 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "bargauge",
+ "name": "Bar gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "text",
+ "name": "Text",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "datasource",
+ "uid": "grafana"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 65,
+ "panels": [],
+ "title": "Blocked Resources",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "This should be empty. If it's large you are running into your resource manager limits.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 1
+ },
+ "id": 67,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "rate(libp2p_rcmgr_blocked_resources{instance=~\"$instance\"}[$__rate_interval])",
+ "interval": "",
+ "legendFormat": "",
+ "refId": "A"
+ }
+ ],
+ "title": "Number of blocked resource requests",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 42,
+ "panels": [],
+ "title": "Streams",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 48,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_streams{scope=\"system\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{dir}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "System Streams",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 44,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_streams{scope=\"transient\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{dir}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Transient Streams",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How many streams does each protocol have open.\n\nA protocol is similiar to a service except there may be many protocols for a single service. A service may have \nmultiple protocols for backwards compatibility. For example, bitswap the service is a single entity, but it supports bitswap protocols v1.1 and v1.2.\n\nA service is attached to a stream manually by the protocol developer. A service may be missing if there is no `StreamScope.SetService` call.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 18
+ },
+ "id": 52,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_streams{scope=\"protocol\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{dir}} {{protocol}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Streams by protocol",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 27
+ },
+ "id": 35,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.50, (libp2p_rcmgr_peer_streams_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_streams_bucket{instance=~\"$instance\"})) - 0.1",
+ "interval": "",
+ "legendFormat": "p50 {{dir}} streams per peer โย {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.90, (libp2p_rcmgr_peer_streams_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_streams_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "p90 {{dir}} streams per peer โย {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(1, (libp2p_rcmgr_peer_streams_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_streams_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "max {{dir}} streams per peer โย {{instance}}",
+ "refId": "C"
+ }
+ ],
+ "title": "Streams per peer, aggregated",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How many peers have N streams open",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 37
+ },
+ "id": 46,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "last"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": false,
+ "expr": "sum without (instance) (libp2p_rcmgr_peer_streams_bucket{dir=\"inbound\",instance=~\"$instance\"}-libp2p_rcmgr_previous_peer_streams_bucket{dir=\"inbound\",instance=~\"$instance\"})",
+ "format": "heatmap",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Current inbound streams per peer histogram. Across all instances",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How many peers have N streams open",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 37
+ },
+ "id": 47,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "last"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": false,
+ "expr": "sum without (instance) (libp2p_rcmgr_peer_streams_bucket{dir=\"outbound\",instance=~\"$instance\"}-libp2p_rcmgr_previous_peer_streams_bucket{dir=\"outbound\",instance=~\"$instance\"})",
+ "format": "heatmap",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Current outbound streams per peer histogram. Across all instances",
+ "type": "bargauge"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 45
+ },
+ "id": 29,
+ "panels": [],
+ "title": "Connections",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 46
+ },
+ "id": 31,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "# Libp2p Connections\n\nBroken down by [Resource Scope](https://github.com/libp2p/go-libp2p/blob/master/p2p/host/resource-manager/README.md#resource-scopes). \nScopes represent what is imposing limits on this resource. For connections, we have three main scopes:\n\n1. System. The total number of connections owned by the process. Includes both application usable connections + the number of transient connections.\n2. Transient. The total number of connections that are being upgraded into usable connections in the process.\n3. Peer. The total number of connections associated with this peer. When a connection has this scope it is usable by the application.\n\nAn example of a System connection is a connection you can open a libp2p stream on and send data.\nA transient connection is not yet usable for application data since it may be negotiating \na security handshake or a multiplexer.\n\nConnections start in the transient scope and move over to the System and Peer scopes once they are ready to be used.\n\nIt would be unusual to see a lot of transient connections. It would also be unusal to see a peer with a lot of connections.",
+ "mode": "markdown"
+ },
+ "pluginVersion": "9.3.6",
+ "title": "libp2p Connections",
+ "type": "text"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 55
+ },
+ "id": 33,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_connections{scope=\"system\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{dir}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "System Connections",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 55
+ },
+ "id": 36,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_connections{scope=\"transient\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{dir}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Transient Connections",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "gridPos": {
+ "h": 3,
+ "w": 24,
+ "x": 0,
+ "y": 63
+ },
+ "id": 38,
+ "options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
+ "content": "These are aggregated stats. They are grouped by buckets. Each bucket represents how many peers have N number of connections.",
+ "mode": "markdown"
+ },
+ "pluginVersion": "9.3.6",
+ "title": "Connections per Peer",
+ "type": "text"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 66
+ },
+ "id": 45,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.50, (libp2p_rcmgr_peer_connections_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_connections_bucket{instance=~\"$instance\"})) - 0.1",
+ "interval": "",
+ "legendFormat": "p50 {{dir}} connections per peer โย {{instance}}",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.90, (libp2p_rcmgr_peer_connections_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_connections_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "p90 {{dir}} connections per peer โย {{instance}}",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(1, (libp2p_rcmgr_peer_connections_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_connections_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "max {{dir}} connections per peer โย {{instance}}",
+ "refId": "C"
+ }
+ ],
+ "title": "Connections per peer, aggregated",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How many peers have N-0.1 connections open",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 76
+ },
+ "id": 39,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "last"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": false,
+ "expr": "sum without (instance) (libp2p_rcmgr_peer_connections_bucket{dir=\"inbound\",instance=~\"$instance\"}-libp2p_rcmgr_previous_peer_connections_bucket{dir=\"inbound\",instance=~\"$instance\"})",
+ "format": "heatmap",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Current inbound connections per peer histogram. Across all instances",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "How many peers have N-0.1 connections open",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 76
+ },
+ "id": 40,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "last"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showUnfilled": true
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": false,
+ "expr": "sum without (instance) (libp2p_rcmgr_peer_connections_bucket{dir=\"outbound\",instance=~\"$instance\"}-libp2p_rcmgr_previous_peer_connections_bucket{dir=\"outbound\",instance=~\"$instance\"})",
+ "format": "heatmap",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "{{le}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Curent outbound connections per peer histogram. Across all instances",
+ "type": "bargauge"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 84
+ },
+ "id": 54,
+ "panels": [],
+ "title": "Memory",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "As reported to Resource Manager",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 85
+ },
+ "id": 56,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_memory{scope=\"system\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "Bytes Reserved",
+ "refId": "A"
+ }
+ ],
+ "title": "System memory reservations",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "As reported to Resource Manager",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 94
+ },
+ "id": 57,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_memory{scope=\"protocol\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{protocol}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Memory reservations by protocol",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "As reported to Resource Manager",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 94
+ },
+ "id": 58,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_memory{scope=\"service\",instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{service}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "Memory reservations by service",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "As reported to the resource manager",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "bytes"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Number of peers aggregated"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-purple",
+ "mode": "fixed"
+ }
+ },
+ {
+ "id": "custom.axisPlacement",
+ "value": "right"
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 102
+ },
+ "id": 59,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.50, sum by (le) (libp2p_rcmgr_peer_memory_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_memory_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "p50 memory usage per peer",
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(0.90, sum by (le) (libp2p_rcmgr_peer_memory_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_memory_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "p90 memory usage per peer",
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "histogram_quantile(1, sum by (le) (libp2p_rcmgr_peer_memory_bucket{instance=~\"$instance\"} - libp2p_rcmgr_previous_peer_memory_bucket{instance=~\"$instance\"})) - 0.1",
+ "hide": false,
+ "interval": "",
+ "legendFormat": "max memory usage per peer",
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": true,
+ "expr": "sum(libp2p_rcmgr_peer_memory_count{instance=~\"$instance\"}-libp2p_rcmgr_previous_peer_memory_count{instance=~\"$instance\"})",
+ "hide": false,
+ "instant": false,
+ "interval": "",
+ "legendFormat": "Number of peers aggregated",
+ "range": true,
+ "refId": "D"
+ }
+ ],
+ "title": "Memory reservations per peer, aggregated across all instances",
+ "type": "timeseries"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 112
+ },
+ "id": 62,
+ "panels": [],
+ "title": "File Descriptors",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "As reported to the resource manager",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 113
+ },
+ "id": 60,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "exemplar": true,
+ "expr": "libp2p_rcmgr_fds{instance=~\"$instance\"}",
+ "interval": "",
+ "legendFormat": "{{scope}} {{instance}}",
+ "refId": "A"
+ }
+ ],
+ "title": "FDs in use",
+ "type": "timeseries"
+ }
+ ],
+ "refresh": false,
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-5m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p Resource Manager",
+ "uid": "MgmGIjjnj",
+ "version": 1,
+ "weekStart": ""
+}
diff --git a/dashboards/swarm/swarm.json b/dashboards/swarm/swarm.json
new file mode 100644
index 0000000000..e8536ea8e6
--- /dev/null
+++ b/dashboards/swarm/swarm.json
@@ -0,0 +1,3528 @@
+{
+ "__inputs": [
+ {
+ "name": "DS_PROMETHEUS",
+ "label": "Prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "panel",
+ "id": "gauge",
+ "name": "Gauge",
+ "version": ""
+ },
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.3.6"
+ },
+ {
+ "type": "panel",
+ "id": "piechart",
+ "name": "Pie chart",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "state-timeline",
+ "name": "State timeline",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "timeseries",
+ "name": "Time series",
+ "version": ""
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": {
+ "type": "grafana",
+ "uid": "-- Grafana --"
+ },
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "target": {
+ "limit": 100,
+ "matchAny": false,
+ "tags": [],
+ "type": "dashboard"
+ },
+ "type": "dashboard"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "enable": true,
+ "iconColor": "red",
+ "name": "New annotation"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 21,
+ "panels": [],
+ "title": "Currently Established Connections",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 50
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 1
+ },
+ "id": 23,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "9.3.6",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (transport) (libp2p_swarm_connections_opened_total{instance=~\"$instance\"}) - sum by (transport) (libp2p_swarm_connections_closed_total{instance=~\"$instance\"})",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Active Connections",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1 "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (transport, security, muxer) (libp2p_swarm_connections_opened_total{dir=\"inbound\",instance=~\"$instance\"}) - sum by (transport, security, muxer) (libp2p_swarm_connections_closed_total{dir=\"inbound\",instance=~\"$instance\"})",
+ "legendFormat": "{{transport}} {{security}} {{muxer}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Active Connections: Inbound",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1 "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 11,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (transport, security, muxer)(libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}) - sum by (transport, security, muxer) (libp2p_swarm_connections_closed_total{dir=\"outbound\",instance=~\"$instance\"})",
+ "legendFormat": "{{transport}} {{security}} {{muxer}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Active Connections: Outgoing",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 29,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by(le) (rate(libp2p_swarm_handshake_latency_seconds_bucket{transport=~\"quic|quic-v1\",instance=~\"$instance\"}[$__rate_interval])))",
+ "hide": false,
+ "legendFormat": "50th percentile",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by(le) (rate(libp2p_swarm_handshake_latency_seconds_bucket{transport=~\"quic|quic-v1\",instance=~\"$instance\"}[$__rate_interval])))",
+ "hide": false,
+ "legendFormat": "90th percentile",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum by(le) (rate(libp2p_swarm_handshake_latency_seconds_bucket{transport=~\"quic|quic-v1\",instance=~\"$instance\"}[$__rate_interval])))",
+ "hide": false,
+ "legendFormat": "95th percentile",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Handshake Latency (QUIC, QUIC v1)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 30,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.50, sum by(le) (rate(libp2p_swarm_handshake_latency_seconds_bucket{transport=\"tcp\",instance=~\"$instance\"}[$__rate_interval])))",
+ "hide": false,
+ "legendFormat": "50th percentile",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.90, sum by(le) (rate(libp2p_swarm_handshake_latency_seconds_bucket{transport=\"tcp\",instance=~\"$instance\"}[$__rate_interval])))",
+ "hide": false,
+ "legendFormat": "90th percentile",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum by(le) (rate(libp2p_swarm_handshake_latency_seconds_bucket{transport=\"tcp\",instance=~\"$instance\"}[$__rate_interval])))",
+ "hide": false,
+ "legendFormat": "95th percentile",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Handshake Latency (TCP)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "log"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 27,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.5, sum(rate(libp2p_swarm_connection_duration_seconds_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "instant": false,
+ "legendFormat": "50th percentile",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(libp2p_swarm_connection_duration_seconds_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "90th percentile",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum(rate(libp2p_swarm_connection_duration_seconds_bucket{instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "95th percentile",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Connection Duration",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "regular"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "early muxer"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 36,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(libp2p_swarm_connections_opened_total{transport=\"tcp\",early_muxer=\"true\",instance=~\"$instance\"} - libp2p_swarm_connections_closed_total{transport=\"tcp\",early_muxer=\"true\",instance=~\"$instance\"})",
+ "legendFormat": "early muxer",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(libp2p_swarm_connections_opened_total{transport=\"tcp\",early_muxer=\"false\",instance=~\"$instance\"} - libp2p_swarm_connections_closed_total{transport=\"tcp\",early_muxer=\"false\",instance=~\"$instance\"})",
+ "hide": false,
+ "legendFormat": "regular",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Active Connections: Early Muxer Negotiation",
+ "type": "piechart"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 33
+ },
+ "id": 19,
+ "panels": [],
+ "title": "Connection Establishment",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1 "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 0,
+ "y": 34
+ },
+ "id": 2,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum (rate(libp2p_swarm_connections_opened_total{dir=\"inbound\",instance=~\"$instance\"}[$__rate_interval])) by (transport, security, muxer)",
+ "legendFormat": "{{transport}} {{security}} {{muxer}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Connections: Inbound",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1 "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 12,
+ "x": 12,
+ "y": 34
+ },
+ "id": 5,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum (rate(libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}[$__rate_interval])) by (transport, security, muxer)",
+ "legendFormat": "{{transport}} {{security}} {{muxer}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Connections: Outgoing",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1 "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 43
+ },
+ "id": 4,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum (increase(libp2p_swarm_connections_opened_total{dir=\"inbound\",instance=~\"$instance\"}[$__range])) by (transport, security, muxer)",
+ "legendFormat": "{{transport}} {{security}} {{muxer}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Inbound Connections: Transports / Security / Muxers",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1 "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /noise /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket /noise /yamux/1.0.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit "
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp /tls/1.0.0 /mplex/6.7.0"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 43
+ },
+ "id": 6,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true,
+ "values": []
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum (increase(libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}[$__range])) by (transport, security, muxer)",
+ "legendFormat": "{{transport}} {{security}} {{muxer}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Outgoing Connections: Transports / Security / Muxers",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ip4"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ip6"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 51
+ },
+ "id": 32,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (ip_version) (libp2p_swarm_connections_opened_total{dir=\"inbound\",instance=~\"$instance\"}) - sum by (ip_version) (libp2p_swarm_connections_closed_total{dir=\"inbound\",instance=~\"$instance\"})",
+ "legendFormat": "{{ip_version}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Inbound Connections: IP Version",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ip6"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "ip4"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 51
+ },
+ "id": 34,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (ip_version) (libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}) - sum by (ip_version) (libp2p_swarm_connections_closed_total{dir=\"outbound\",instance=~\"$instance\"})",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "New Outbound Connections: IP Version",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "canceled: concurrent dial successful"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "application canceled"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "canceled: other"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "timeout"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "other"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "deadline"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "connection refused"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 59
+ },
+ "id": 15,
+ "options": {
+ "displayLabels": [],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true,
+ "values": ["percent"]
+ },
+ "pieType": "donut",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2-45365",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(libp2p_swarm_dial_errors_total{instance=~\"$instance\"}[$__range])) by (error)",
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Dial Errors",
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ },
+ "unit": "percentunit"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "super-light-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "quic-v1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "webtransport"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "tcp"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "websocket"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "p2p-circuit"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 59
+ },
+ "id": 17,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "9.3.2-67a213dc85",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (transport) (rate(libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}[$__rate_interval])) ",
+ "hide": true,
+ "legendFormat": "{{transport}}",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by(transport, error) (rate(libp2p_swarm_dial_errors_total{instance=~\"$instance\"}[$__rate_interval]))",
+ "hide": true,
+ "legendFormat": "dial error ({{error}}, {{transport}})",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(rate(libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}[$__rate_interval])) by (transport) / (sum(rate(libp2p_swarm_connections_opened_total{dir=\"outbound\",instance=~\"$instance\"}[$__rate_interval])) by (transport) + (sum(rate(libp2p_swarm_dial_errors_total{instance=~\"$instance\"}[$__rate_interval])) by (transport)))",
+ "hide": false,
+ "legendFormat": "__auto",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Dial Success Rates",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisBorderShow": false,
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "barWidthFactor": 0.6,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "insertNulls": false,
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "log": 2,
+ "type": "log"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "s"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 67
+ },
+ "id": 50,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "histogram_quantile(0.5, sum(rate(libp2p_swarm_dial_latency_seconds_bucket{outcome=\"success\", instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "instant": false,
+ "legendFormat": "50th percentile",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.9, sum(rate(libp2p_swarm_dial_latency_seconds_bucket{outcome=\"success\", instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "90th percentile",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "histogram_quantile(0.95, sum(rate(libp2p_swarm_dial_latency_seconds_bucket{outcome=\"success\", instance=~\"$instance\"}[$__rate_interval])) by (le))",
+ "hide": false,
+ "legendFormat": "95th percentile",
+ "range": true,
+ "refId": "C"
+ }
+ ],
+ "title": "Peer Dial Latency (Seconds)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "description": "on newly established connections",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 67
+ },
+ "id": 25,
+ "options": {
+ "legend": {
+ "displayMode": "list",
+ "placement": "right",
+ "showLegend": true
+ },
+ "pieType": "pie",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum by (key_type) (increase(libp2p_swarm_key_types_total{instance=~\"$instance\"}[$__range]))",
+ "legendFormat": "{{key_type}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "libp2p key types",
+ "type": "piechart"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 75
+ },
+ "id": 40,
+ "panels": [],
+ "title": "Dial Prioritisation",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": [],
+ "unit": "none"
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "<=300ms"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "<=500ms"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "<=750ms"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "<=50ms"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "<=10ms"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "blue",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 76
+ },
+ "id": 38,
+ "options": {
+ "displayLabels": ["percent"],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true,
+ "values": ["percent"]
+ },
+ "pieType": "donut",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.001\"}[$__range]))",
+ "format": "time_series",
+ "instant": false,
+ "legendFormat": "No delay",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.01\"}[$__range])) - ignoring(le) sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.001\"}[$__range]))",
+ "hide": false,
+ "instant": false,
+ "legendFormat": "<=10ms",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.05\"}[$__range])) - ignoring(le) sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.01\"}[$__range]))",
+ "hide": false,
+ "legendFormat": "<=50ms",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.3\"}[$__range])) - ignoring(le) sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.05\"}[$__range]))",
+ "hide": false,
+ "legendFormat": "<=300ms",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.5\"}[$__range])) - ignoring(le) sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.3\"}[$__range]))",
+ "hide": false,
+ "legendFormat": "<=500ms",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.75\"}[$__range])) - ignoring(le) sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.5\"}[$__range]))",
+ "hide": false,
+ "legendFormat": "<=750ms",
+ "range": true,
+ "refId": "G"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"+Inf\"}[$__range])) - ignoring(le) sum(increase(libp2p_swarm_dial_ranking_delay_seconds_bucket{instance=~\"$instance\",le=\"0.75\"}[$__range]))",
+ "hide": false,
+ "legendFormat": ">750ms",
+ "range": true,
+ "refId": "H"
+ }
+ ],
+ "title": "Dial Ranking Delay",
+ "transformations": [],
+ "type": "piechart"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ }
+ },
+ "mappings": []
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": ">=6"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "light-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "5"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-red",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "2"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "1"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "3"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ },
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "4"
+ },
+ "properties": [
+ {
+ "id": "color",
+ "value": {
+ "fixedColor": "dark-orange",
+ "mode": "fixed"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 76
+ },
+ "id": 42,
+ "options": {
+ "displayLabels": ["percent", "name"],
+ "legend": {
+ "displayMode": "table",
+ "placement": "right",
+ "showLegend": true,
+ "values": ["percent"]
+ },
+ "pieType": "donut",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\"0\"}[$__range])",
+ "legendFormat": "0",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\"1\"}[$__range])",
+ "hide": false,
+ "legendFormat": "1",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\"2\"}[$__range])",
+ "hide": false,
+ "legendFormat": "2",
+ "range": true,
+ "refId": "C"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\"3\"}[$__range])",
+ "hide": false,
+ "legendFormat": "3",
+ "range": true,
+ "refId": "D"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\"4\"}[$__range])",
+ "hide": false,
+ "legendFormat": "4",
+ "range": true,
+ "refId": "E"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\"5\"}[$__range])",
+ "hide": false,
+ "legendFormat": "5",
+ "range": true,
+ "refId": "F"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "increase(libp2p_swarm_dials_per_peer_total{instance=~\"$instance\", outcome=\"success\", num_dials=\">=6\"}[$__range])",
+ "hide": false,
+ "legendFormat": ">=6",
+ "range": true,
+ "refId": "G"
+ }
+ ],
+ "title": "Dials per connection",
+ "type": "piechart"
+ },
+ {
+ "collapsed": false,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 84
+ },
+ "id": 44,
+ "panels": [],
+ "title": "Black Hole Detection",
+ "type": "row"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "fixed"
+ },
+ "custom": {
+ "fillOpacity": 76,
+ "lineWidth": 0,
+ "spanNulls": true
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "color": "blue",
+ "index": 0,
+ "text": "Probing"
+ },
+ "1": {
+ "color": "green",
+ "index": 1,
+ "text": "Allowed"
+ },
+ "2": {
+ "color": "purple",
+ "index": 2,
+ "text": "Blocked"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 24,
+ "x": 0,
+ "y": 85
+ },
+ "id": 46,
+ "options": {
+ "alignValue": "center",
+ "legend": {
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "mergeValues": true,
+ "rowHeight": 0.9,
+ "showValue": "always",
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_swarm_black_hole_filter_state{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}} {{name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Black Hole Filter State",
+ "type": "state-timeline"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "purple",
+ "mode": "fixed"
+ },
+ "mappings": [
+ {
+ "options": {
+ "0": {
+ "index": 0,
+ "text": "-"
+ }
+ },
+ "type": "value"
+ }
+ ],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 91
+ },
+ "id": 49,
+ "options": {
+ "colorMode": "value",
+ "graphMode": "none",
+ "justifyMode": "auto",
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "value_and_name"
+ },
+ "pluginVersion": "10.0.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "expr": "libp2p_swarm_black_hole_filter_next_request_allowed_after{instance=~\"$instance\"}",
+ "legendFormat": "{{instance}}: {{name}}",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Black Hole Filter Requests Till Next Probe",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "mappings": [],
+ "max": 100,
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "purple",
+ "value": null
+ },
+ {
+ "color": "green",
+ "value": 5
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 91
+ },
+ "id": 47,
+ "options": {
+ "orientation": "vertical",
+ "reduceOptions": {
+ "calcs": ["lastNotNull"],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.1",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "editorMode": "code",
+ "exemplar": false,
+ "expr": "libp2p_swarm_black_hole_filter_success_fraction{instance=~\"$instance\"} * 100",
+ "instant": true,
+ "legendFormat": "{{instance}} {{name}}",
+ "range": false,
+ "refId": "A"
+ }
+ ],
+ "title": "Dial Success Rate",
+ "type": "gauge"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "hide": 0,
+ "label": "datasource",
+ "name": "DS_PROMETHEUS",
+ "options": [],
+ "query": "prometheus",
+ "refresh": 1,
+ "regex": "",
+ "type": "datasource"
+ },
+ {
+ "current": {},
+ "datasource": {
+ "type": "prometheus",
+ "uid": "${DS_PROMETHEUS}"
+ },
+ "definition": "label_values(up, instance)",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "instance",
+ "options": [],
+ "query": {
+ "query": "label_values(up, instance)",
+ "refId": "StandardVariableQuery"
+ },
+ "refresh": 1,
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-15m",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "go-libp2p Swarm",
+ "uid": "a15PyhO4z",
+ "version": 4,
+ "weekStart": ""
+}
diff --git a/defaults.go b/defaults.go
new file mode 100644
index 0000000000..31de6f025d
--- /dev/null
+++ b/defaults.go
@@ -0,0 +1,237 @@
+package libp2p
+
+// This file contains all the default configuration options.
+
+import (
+ "crypto/rand"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/net/connmgr"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ quic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+ webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ "github.com/prometheus/client_golang/prometheus"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+// DefaultSecurity is the default security option.
+//
+// Useful when you want to extend, but not replace, the supported transport
+// security protocols.
+var DefaultSecurity = ChainOptions(
+ Security(tls.ID, tls.New),
+ Security(noise.ID, noise.New),
+)
+
+// DefaultMuxers configures libp2p to use the stream connection multiplexers.
+//
+// Use this option when you want to *extend* the set of multiplexers used by
+// libp2p instead of replacing them.
+var DefaultMuxers = Muxer(yamux.ID, yamux.DefaultTransport)
+
+// DefaultTransports are the default libp2p transports.
+//
+// Use this option when you want to *extend* the set of transports used by
+// libp2p instead of replacing them.
+var DefaultTransports = ChainOptions(
+ Transport(tcp.NewTCPTransport),
+ Transport(quic.NewTransport),
+ Transport(ws.New),
+ Transport(webtransport.New),
+ Transport(libp2pwebrtc.New),
+)
+
+// DefaultPrivateTransports are the default libp2p transports when a PSK is supplied.
+//
+// Use this option when you want to *extend* the set of transports used by
+// libp2p instead of replacing them.
+var DefaultPrivateTransports = ChainOptions(
+ Transport(tcp.NewTCPTransport),
+ Transport(ws.New),
+)
+
+// DefaultPeerstore configures libp2p to use the default peerstore.
+var DefaultPeerstore Option = func(cfg *Config) error {
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return err
+ }
+ return cfg.Apply(Peerstore(ps))
+}
+
+// RandomIdentity generates a random identity. (default behaviour)
+var RandomIdentity = func(cfg *Config) error {
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ return err
+ }
+ return cfg.Apply(Identity(priv))
+}
+
+// DefaultListenAddrs configures libp2p to use default listen address.
+var DefaultListenAddrs = func(cfg *Config) error {
+ addrs := []string{
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/udp/0/quic-v1",
+ "/ip4/0.0.0.0/udp/0/quic-v1/webtransport",
+ "/ip4/0.0.0.0/udp/0/webrtc-direct",
+ "/ip6/::/tcp/0",
+ "/ip6/::/udp/0/quic-v1",
+ "/ip6/::/udp/0/quic-v1/webtransport",
+ "/ip6/::/udp/0/webrtc-direct",
+ }
+ listenAddrs := make([]multiaddr.Multiaddr, 0, len(addrs))
+ for _, s := range addrs {
+ addr, err := multiaddr.NewMultiaddr(s)
+ if err != nil {
+ return err
+ }
+ listenAddrs = append(listenAddrs, addr)
+ }
+ return cfg.Apply(ListenAddrs(listenAddrs...))
+}
+
+// DefaultEnableRelay enables relay dialing and listening by default.
+var DefaultEnableRelay = func(cfg *Config) error {
+ return cfg.Apply(EnableRelay())
+}
+
+var DefaultResourceManager = func(cfg *Config) error {
+ // Default memory limit: 1/8th of total memory, minimum 128MB, maximum 1GB
+ limits := rcmgr.DefaultLimits
+ SetDefaultServiceLimits(&limits)
+ mgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(limits.AutoScale()))
+ if err != nil {
+ return err
+ }
+
+ return cfg.Apply(ResourceManager(mgr))
+}
+
+// DefaultConnectionManager creates a default connection manager
+var DefaultConnectionManager = func(cfg *Config) error {
+ mgr, err := connmgr.NewConnManager(160, 192)
+ if err != nil {
+ return err
+ }
+
+ return cfg.Apply(ConnectionManager(mgr))
+}
+
+// DefaultPrometheusRegisterer configures libp2p to use the default registerer
+var DefaultPrometheusRegisterer = func(cfg *Config) error {
+ return cfg.Apply(PrometheusRegisterer(prometheus.DefaultRegisterer))
+}
+
+var defaultUDPBlackHoleDetector = func(cfg *Config) error {
+ // A black hole is a binary property. On a network if UDP dials are blocked, all dials will
+ // fail. So a low success rate of 5 out 100 dials is good enough.
+ return cfg.Apply(UDPBlackHoleSuccessCounter(&swarm.BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"}))
+}
+
+var defaultIPv6BlackHoleDetector = func(cfg *Config) error {
+ // A black hole is a binary property. On a network if there is no IPv6 connectivity, all
+ // dials will fail. So a low success rate of 5 out 100 dials is good enough.
+ return cfg.Apply(IPv6BlackHoleSuccessCounter(&swarm.BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"}))
+}
+
+// Complete list of default options and when to fallback on them.
+//
+// Please *DON'T* specify default options any other way. Putting this all here
+// makes tracking defaults *much* easier.
+var defaults = []struct {
+ fallback func(cfg *Config) bool
+ opt Option
+}{
+ {
+ fallback: func(cfg *Config) bool { return cfg.Transports == nil && cfg.ListenAddrs == nil },
+ opt: DefaultListenAddrs,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.Transports == nil && cfg.PSK == nil },
+ opt: DefaultTransports,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.Transports == nil && cfg.PSK != nil },
+ opt: DefaultPrivateTransports,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.Muxers == nil },
+ opt: DefaultMuxers,
+ },
+ {
+ fallback: func(cfg *Config) bool { return !cfg.Insecure && cfg.SecurityTransports == nil },
+ opt: DefaultSecurity,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.PeerKey == nil },
+ opt: RandomIdentity,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.Peerstore == nil },
+ opt: DefaultPeerstore,
+ },
+ {
+ fallback: func(cfg *Config) bool { return !cfg.RelayCustom },
+ opt: DefaultEnableRelay,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.ResourceManager == nil },
+ opt: DefaultResourceManager,
+ },
+ {
+ fallback: func(cfg *Config) bool { return cfg.ConnManager == nil },
+ opt: DefaultConnectionManager,
+ },
+ {
+ fallback: func(cfg *Config) bool { return !cfg.DisableMetrics && cfg.PrometheusRegisterer == nil },
+ opt: DefaultPrometheusRegisterer,
+ },
+ {
+ fallback: func(cfg *Config) bool {
+ return !cfg.CustomUDPBlackHoleSuccessCounter && cfg.UDPBlackHoleSuccessCounter == nil
+ },
+ opt: defaultUDPBlackHoleDetector,
+ },
+ {
+ fallback: func(cfg *Config) bool {
+ return !cfg.CustomIPv6BlackHoleSuccessCounter && cfg.IPv6BlackHoleSuccessCounter == nil
+ },
+ opt: defaultIPv6BlackHoleDetector,
+ },
+}
+
+// Defaults configures libp2p to use the default options. Can be combined with
+// other options to *extend* the default options.
+var Defaults Option = func(cfg *Config) error {
+ for _, def := range defaults {
+ if err := cfg.Apply(def.opt); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// FallbackDefaults applies default options to the libp2p node if and only if no
+// other relevant options have been applied. will be appended to the options
+// passed into New.
+var FallbackDefaults Option = func(cfg *Config) error {
+ for _, def := range defaults {
+ if !def.fallback(cfg) {
+ continue
+ }
+ if err := cfg.Apply(def.opt); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/docs/flaky-tests.md b/docs/flaky-tests.md
new file mode 100644
index 0000000000..667dd77a0f
--- /dev/null
+++ b/docs/flaky-tests.md
@@ -0,0 +1,43 @@
+# Debugging Flaky Tests
+
+If a test is flaky in CI it's probably because there's some timing issue. The
+test probably depends on some Go routine making progress in the background and
+polling to see if the expected outcome is achieved.
+
+This will pretty much always work locally because your local machine is likely
+pretty capable and there isn't too many concurrent processes running. In CI, we
+are susceptible to both slower hardware and noisier neighbors. However we can
+mimic this environment locally with
+[cgroups](https://man7.org/linux/man-pages/man7/cgroups.7.html).
+
+# Replicating noisy neighbors
+
+We can limit the amount of CPU time relative to real time a process gets with
+cgroups. This lets us replicate the environment where many other neighboring
+processes are vying for CPU time.
+
+```bash
+ # Compile some test we want to run. We do this outside the cgroup so this is
+ # fast
+ go test -c ./p2p/host/autorelay
+
+ # Create the group
+ sudo cgcreate -g cpu:/cpulimit
+
+ # Limit the time to 10,000 microseconds for every 1s
+ sudo cgset -r cpu.cfs_quota_us=10000 cpulimit
+ sudo cgset -r cpu.cfs_period_us=1000000 cpulimit
+
+ # Run a shell with in our limited environment
+ sudo cgexec -g cpu:cpulimit bash
+
+ # In the shell, run the test
+ ./autorelay.test -test.v
+```
+
+# Flakiness with coverage profile
+
+Sometimes adding the `-coverprofile=module-coverage.txt` introduces flaky
+behavior since it adds another goroutine to the mix. If you're having trouble
+reproducing a flaky test, try enabling this flag.
+
diff --git a/examples/README.md b/examples/README.md
index a47661a635..8e86d5e953 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,15 +1,23 @@
-
-# `go-libp2p` examples and tutorials
+# go-libp2p examples and tutorials
In this folder, you can find a variety of examples to help you get started in using go-libp2p. Every example as a specific purpose and some of each incorporate a full tutorial that you can follow through, helping you expand your knowledge about libp2p and p2p networks in general.
-Let us know if you find any issue or if you want to contribute and add a new tutorial, feel welcome to submit a pr, thank you!
+Let us know if you find any issue or if you want to contribute and add a new tutorial, feel welcome to submit a PR, thank you!
## Examples and Tutorials
- [The libp2p 'host'](./libp2p-host)
+- [The libp2p 'host' with Secure WebSockets and AutoTLS](./autotls)
- [Building an http proxy with libp2p](./http-proxy)
-- [Protocol Multiplexing with multicodecs](./protocol-multiplexing-with-multicodecs)
- [An echo host](./echo)
+- [Routed echo host](./routed-echo/)
- [Multicodecs with protobufs](./multipro)
-- [P2P chat application](./chat)
\ No newline at end of file
+- [Relay-based P2P Communication](./relay/)
+- [P2P chat application](./chat)
+- [P2P chat application w/ rendezvous peer discovery](./chat-with-rendezvous)
+- [P2P chat application with peer discovery using mdns](./chat-with-mdns)
+- [P2P chat using pubsub](./pubsub)
+- [A chapter based approach to building a libp2p application](./ipfs-camp-2019/) _Created for [IPFS Camp 2019](https://github.com/ipfs/camp/tree/master/CORE_AND_ELECTIVE_COURSES/CORE_COURSE_B)_
+- [View metrics using Prometheus and Grafana](./metrics-and-dashboards)
+
+For js-libp2p examples, check https://github.com/libp2p/js-libp2p-examples
diff --git a/examples/autotls/.gitignore b/examples/autotls/.gitignore
new file mode 100644
index 0000000000..7f968b0b0d
--- /dev/null
+++ b/examples/autotls/.gitignore
@@ -0,0 +1,3 @@
+autotls
+p2p-forge-certs/
+identity.key
diff --git a/examples/autotls/README.md b/examples/autotls/README.md
new file mode 100644
index 0000000000..f551d4704e
--- /dev/null
+++ b/examples/autotls/README.md
@@ -0,0 +1,14 @@
+# libp2p host with Secure WebSockets and AutoTLS
+
+This example builds on the [libp2p host example](../libp2p-host) example and demonstrates how to use [AutoTLS](https://blog.libp2p.io/autotls/) to automatically generate a wildcard Let's Encrypt TLS certificate unique to the libp2p host (`*..libp2p.direct`), and use it with [libp2p WebSockets transport over TCP](https://github.com/libp2p/specs/blob/master/websockets/README.md) enabling browsers to directly connect to the libp2p host.
+
+For this example to work, you need to have a public IP address and be publicly reachable. AutoTLS is guarded by connectivity check, and will not ask for a certificate unless your libp2p node emits `event.EvtLocalReachabilityChanged` with `network.ReachabilityPublic`.
+
+## Running the example
+
+From the `go-libp2p/examples` directory run the following:
+
+```sh
+cd autotls/
+go run .
+```
diff --git a/examples/autotls/identity.go b/examples/autotls/identity.go
new file mode 100644
index 0000000000..c76187e240
--- /dev/null
+++ b/examples/autotls/identity.go
@@ -0,0 +1,47 @@
+package main
+
+import (
+ "os"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+)
+
+// LoadIdentity reads a private key from the given path and, if it does not
+// exist, generates a new one.
+func LoadIdentity(keyPath string) (crypto.PrivKey, error) {
+ if _, err := os.Stat(keyPath); err == nil {
+ return ReadIdentity(keyPath)
+ } else if os.IsNotExist(err) {
+ logger.Infof("Generating peer identity in %s\n", keyPath)
+ return GenerateIdentity(keyPath)
+ } else {
+ return nil, err
+ }
+}
+
+// ReadIdentity reads a private key from the given path.
+func ReadIdentity(path string) (crypto.PrivKey, error) {
+ bytes, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return crypto.UnmarshalPrivateKey(bytes)
+}
+
+// GenerateIdentity writes a new random private key to the given path.
+func GenerateIdentity(path string) (crypto.PrivKey, error) {
+ privk, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ bytes, err := crypto.MarshalPrivateKey(privk)
+ if err != nil {
+ return nil, err
+ }
+
+ err = os.WriteFile(path, bytes, 0400)
+
+ return privk, err
+}
diff --git a/examples/autotls/main.go b/examples/autotls/main.go
new file mode 100644
index 0000000000..f3a81247c5
--- /dev/null
+++ b/examples/autotls/main.go
@@ -0,0 +1,151 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "time"
+
+ "github.com/caddyserver/certmagic"
+ "github.com/ipfs/go-log/v2"
+
+ p2pforge "github.com/ipshipyard/p2p-forge/client"
+ "github.com/libp2p/go-libp2p"
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ ws "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+)
+
+var logger = log.Logger("autotls-example")
+
+const userAgent = "go-libp2p/example/autotls"
+const identityKeyFile = "identity.key"
+
+func main() {
+ // Create a background context
+ ctx := context.Background()
+
+ log.SetLogLevel("*", "error")
+ log.SetLogLevel("autotls-example", "debug") // Set the log level for the example to debug
+ log.SetLogLevel("basichost", "info") // Set the log level for the basichost package to info
+ log.SetLogLevel("autotls", "debug") // Set the log level for the autotls-example package to debug
+ log.SetLogLevel("p2p-forge", "debug") // Set the log level for the p2pforge package to debug
+ log.SetLogLevel("nat", "debug") // Set the log level for the libp2p nat package to debug
+
+ certLoaded := make(chan bool, 1) // Create a channel to signal when the cert is loaded
+
+ // use dedicated logger for autotls feature
+ rawLogger := logger.Desugar()
+
+ // p2pforge is the AutoTLS client library.
+ // The cert manager handles the creation and management of certificate
+ certManager, err := p2pforge.NewP2PForgeCertMgr(
+ // Configure CA ACME endpoint
+ // NOTE:
+ // This example uses Let's Encrypt staging CA (p2pforge.DefaultCATestEndpoint)
+ // which will not work correctly in browser, but is useful for initial testing.
+ // Production should use Let's Encrypt production CA (p2pforge.DefaultCAEndpoint).
+ p2pforge.WithCAEndpoint(p2pforge.DefaultCATestEndpoint), // test CA endpoint
+ // TODO: p2pforge.WithCAEndpoint(p2pforge.DefaultCAEndpoint), // production CA endpoint
+
+ // Configure where to store certificate
+ p2pforge.WithCertificateStorage(&certmagic.FileStorage{Path: "p2p-forge-certs"}),
+
+ // Configure logger to use
+ p2pforge.WithLogger(rawLogger.Sugar().Named("autotls")),
+
+ // User-Agent to use during DNS-01 ACME challenge
+ p2pforge.WithUserAgent(userAgent),
+
+ // Optional extra delay before the initial registration
+ p2pforge.WithRegistrationDelay(10*time.Second),
+
+ // Optional hook called once certificate is ready
+ p2pforge.WithOnCertLoaded(func() {
+ certLoaded <- true
+ }),
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ // Start the cert manager
+ certManager.Start()
+ defer certManager.Stop()
+
+ // Load or generate a persistent peer identity key
+ privKey, err := LoadIdentity(identityKeyFile)
+ if err != nil {
+ panic(err)
+ }
+
+ opts := []libp2p.Option{
+ libp2p.Identity(privKey), // Use the loaded identity key
+ libp2p.DisableRelay(), // Disable relay, since we need a public IP address
+ libp2p.NATPortMap(), // Attempt to open ports using UPnP for NATed hosts.
+
+ libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/5500", // regular TCP IPv4 connections
+ "/ip6/::/tcp/5500", // regular TCP IPv6 connections
+
+ // Configure Secure WebSockets listeners on the same TCP port
+ // AutoTLS will automatically generate a certificate for this host
+ // and use the forge domain (`libp2p.direct`) as the SNI hostname.
+ fmt.Sprintf("/ip4/0.0.0.0/tcp/5500/tls/sni/*.%s/ws", p2pforge.DefaultForgeDomain),
+ fmt.Sprintf("/ip6/::/tcp/5500/tls/sni/*.%s/ws", p2pforge.DefaultForgeDomain),
+ ),
+
+ // Configure the TCP transport
+ libp2p.Transport(tcp.NewTCPTransport),
+
+ // Share the same TCP listener between the TCP and WS transports
+ libp2p.ShareTCPListener(),
+
+ // Configure the WS transport with the AutoTLS cert manager
+ libp2p.Transport(ws.New, ws.WithTLSConfig(certManager.TLSConfig())),
+
+ // Configure user agent for libp2p identify protocol (https://github.com/libp2p/specs/blob/master/identify/README.md)
+ libp2p.UserAgent(userAgent),
+
+ // AddrsFactory takes the multiaddrs we're listening on and sets the multiaddrs to advertise to the network.
+ // We use the AutoTLS address factory so that the `*` in the AutoTLS address string is replaced with the
+ // actual IP address of the host once detected
+ libp2p.AddrsFactory(certManager.AddressFactory()),
+ }
+ h, err := libp2p.New(opts...)
+ if err != nil {
+ panic(err)
+ }
+
+ logger.Info("Host created with PeerID: ", h.ID())
+
+ // Bootstrap the DHT to verify our public IPs address with AutoNAT
+ dhtOpts := []dht.Option{
+ dht.Mode(dht.ModeClient),
+ dht.BootstrapPeers(dht.GetDefaultBootstrapPeerAddrInfos()...),
+ }
+ dht, err := dht.New(ctx, h, dhtOpts...)
+ if err != nil {
+ panic(err)
+ }
+
+ go dht.Bootstrap(ctx)
+
+ logger.Info("Addresses: ", h.Addrs())
+
+ certManager.ProvideHost(h)
+
+ select {
+ case <-certLoaded:
+ logger.Info("TLS certificate loaded ")
+ logger.Info("Addresses: ", h.Addrs())
+ case <-ctx.Done():
+ logger.Info("Context done")
+ }
+ // Wait for interrupt signal
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt)
+ <-c
+}
diff --git a/examples/chat-with-mdns/.gitignore b/examples/chat-with-mdns/.gitignore
new file mode 100644
index 0000000000..81b4c92c0b
--- /dev/null
+++ b/examples/chat-with-mdns/.gitignore
@@ -0,0 +1 @@
+chat-with-mdns
diff --git a/examples/chat-with-mdns/README.md b/examples/chat-with-mdns/README.md
new file mode 100644
index 0000000000..8e467e696a
--- /dev/null
+++ b/examples/chat-with-mdns/README.md
@@ -0,0 +1,108 @@
+# p2p chat app with libp2p [support peer discovery using mdns]
+
+This program demonstrates a simple p2p chat application. You will learn how to discover a peer in the network (using mdns), connect to it and open a chat stream. This example is heavily influenced by (and shamelessly copied from) `chat-with-rendezvous` example
+
+## How to build this example?
+
+```
+go get -v -d ./...
+
+go build
+```
+
+## Usage
+
+Use two different terminal windows to run
+
+```
+./chat-with-mdns -port 6666
+./chat-with-mdns -port 6668
+```
+
+
+## So how does it work?
+
+1. **Configure a p2p host**
+```go
+ctx := context.Background()
+
+// libp2p.New constructs a new libp2p Host.
+// Other options can be added here.
+host, err := libp2p.New()
+```
+[libp2p.New](https://godoc.org/github.com/libp2p/go-libp2p#New) is the constructor for libp2p node. It creates a host with given configuration.
+
+2. **Set a default handler function for incoming connections.**
+
+This function is called on the local peer when a remote peer initiate a connection and starts a stream with the local peer.
+```go
+// Set a function as stream handler.
+host.SetStreamHandler("/chat/1.1.0", handleStream)
+```
+
+```handleStream``` is executed for each new stream incoming to the local peer. ```stream``` is used to exchange data between local and remote peer. This example uses non blocking functions for reading and writing from this stream.
+
+```go
+func handleStream(stream net.Stream) {
+
+ // Create a buffer stream for non blocking read and write.
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go readData(rw)
+ go writeData(rw)
+
+ // 'stream' will stay open until you close it (or the other side closes it).
+}
+```
+
+3. **Find peers nearby using mdns**
+
+New [mdns discovery](https://godoc.org/github.com/libp2p/go-libp2p/p2p/discovery#NewMdnsService) service in host.
+
+```go
+notifee := &discoveryNotifee{PeerChan: make(chan peer.AddrInfo)}
+ser, err := discovery.NewMdnsService(peerhost, rendezvous, notifee)
+```
+register [Notifee interface](https://godoc.org/github.com/libp2p/go-libp2p/p2p/discovery#Notifee) with service so that we get notified about peer discovery
+
+```go
+ ser.Start()
+```
+
+
+
+4. **Open streams to peers found.**
+
+Finally we open stream to the peers we found, as we find them
+
+```go
+ peer := <-peerChan // will block until we discover a peer
+ // this is used to avoid call `NewStream` from both side
+ if peer.ID > host.ID() {
+ // if other end peer id greater than us, don't connect to it, just wait for it to connect us
+ fmt.Println("Found peer:", peer, " id is greater than us, wait for it to connect to us")
+ continue
+ }
+ fmt.Println("Found peer:", peer, ", connecting")
+
+ if err := host.Connect(ctx, peer); err != nil {
+ fmt.Println("Connection failed:", err)
+ continue
+ }
+
+ // open a stream, this stream will be handled by handleStream other end
+ stream, err := host.NewStream(ctx, peer.ID, protocol.ID(cfg.ProtocolID))
+
+ if err != nil {
+ fmt.Println("Stream open failed", err)
+ } else {
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go writeData(rw)
+ go readData(rw)
+ fmt.Println("Connected to:", peer)
+ }
+```
+
+## Authors
+1. Bineesh Lazar
diff --git a/examples/chat-with-mdns/flags.go b/examples/chat-with-mdns/flags.go
new file mode 100644
index 0000000000..f535907feb
--- /dev/null
+++ b/examples/chat-with-mdns/flags.go
@@ -0,0 +1,24 @@
+package main
+
+import (
+ "flag"
+)
+
+type config struct {
+ RendezvousString string
+ ProtocolID string
+ listenHost string
+ listenPort int
+}
+
+func parseFlags() *config {
+ c := &config{}
+
+ flag.StringVar(&c.RendezvousString, "rendezvous", "meetme", "Unique string to identify group of nodes. Share this with your friends to let them connect with you")
+ flag.StringVar(&c.listenHost, "host", "0.0.0.0", "The bootstrap node host listen address\n")
+ flag.StringVar(&c.ProtocolID, "pid", "/chat/1.1.0", "Sets a protocol id for stream headers")
+ flag.IntVar(&c.listenPort, "port", 0, "node listen port (0 pick a random unused port)")
+
+ flag.Parse()
+ return c
+}
diff --git a/examples/chat-with-mdns/main.go b/examples/chat-with-mdns/main.go
new file mode 100644
index 0000000000..55c36adf40
--- /dev/null
+++ b/examples/chat-with-mdns/main.go
@@ -0,0 +1,144 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+func handleStream(stream network.Stream) {
+ fmt.Println("Got a new stream!")
+
+ // Create a buffer stream for non-blocking read and write.
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go readData(rw)
+ go writeData(rw)
+
+ // 'stream' will stay open until you close it (or the other side closes it).
+}
+
+func readData(rw *bufio.ReadWriter) {
+ for {
+ str, err := rw.ReadString('\n')
+ if err != nil {
+ fmt.Println("Error reading from buffer")
+ panic(err)
+ }
+
+ if str == "" {
+ return
+ }
+ if str != "\n" {
+ // Green console colour: \x1b[32m
+ // Reset console colour: \x1b[0m
+ fmt.Printf("\x1b[32m%s\x1b[0m> ", str)
+ }
+
+ }
+}
+
+func writeData(rw *bufio.ReadWriter) {
+ stdReader := bufio.NewReader(os.Stdin)
+
+ for {
+ fmt.Print("> ")
+ sendData, err := stdReader.ReadString('\n')
+ if err != nil {
+ fmt.Println("Error reading from stdin")
+ panic(err)
+ }
+
+ _, err = rw.WriteString(fmt.Sprintf("%s\n", sendData))
+ if err != nil {
+ fmt.Println("Error writing to buffer")
+ panic(err)
+ }
+ err = rw.Flush()
+ if err != nil {
+ fmt.Println("Error flushing buffer")
+ panic(err)
+ }
+ }
+}
+
+func main() {
+ help := flag.Bool("help", false, "Display Help")
+ cfg := parseFlags()
+
+ if *help {
+ fmt.Printf("Simple example for peer discovery using mDNS. mDNS is great when you have multiple peers in local LAN.")
+ fmt.Printf("Usage: \n Run './chat-with-mdns'\nor Run './chat-with-mdns -host [host] -port [port] -rendezvous [string] -pid [proto ID]'\n")
+
+ os.Exit(0)
+ }
+
+ fmt.Printf("[*] Listening on: %s with port: %d\n", cfg.listenHost, cfg.listenPort)
+
+ ctx := context.Background()
+ r := rand.Reader
+
+ // Creates a new RSA key pair for this host.
+ prvKey, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)
+ if err != nil {
+ panic(err)
+ }
+
+ // 0.0.0.0 will listen on any interface device.
+ sourceMultiAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", cfg.listenHost, cfg.listenPort))
+
+ // libp2p.New constructs a new libp2p Host.
+ // Other options can be added here.
+ host, err := libp2p.New(
+ libp2p.ListenAddrs(sourceMultiAddr),
+ libp2p.Identity(prvKey),
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ // Set a function as stream handler.
+ // This function is called when a peer initiates a connection and starts a stream with this peer.
+ host.SetStreamHandler(protocol.ID(cfg.ProtocolID), handleStream)
+
+ fmt.Printf("\n[*] Your Multiaddress Is: /ip4/%s/tcp/%v/p2p/%s\n", cfg.listenHost, cfg.listenPort, host.ID())
+
+ peerChan := initMDNS(host, cfg.RendezvousString)
+ for { // allows multiple peers to join
+ peer := <-peerChan // will block until we discover a peer
+ if peer.ID > host.ID() {
+ // if other end peer id greater than us, don't connect to it, just wait for it to connect us
+ fmt.Println("Found peer:", peer, " id is greater than us, wait for it to connect to us")
+ continue
+ }
+ fmt.Println("Found peer:", peer, ", connecting")
+
+ if err := host.Connect(ctx, peer); err != nil {
+ fmt.Println("Connection failed:", err)
+ continue
+ }
+
+ // open a stream, this stream will be handled by handleStream other end
+ stream, err := host.NewStream(ctx, peer.ID, protocol.ID(cfg.ProtocolID))
+
+ if err != nil {
+ fmt.Println("Stream open failed", err)
+ } else {
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go writeData(rw)
+ go readData(rw)
+ fmt.Println("Connected to:", peer)
+ }
+ }
+}
diff --git a/examples/chat-with-mdns/mdns.go b/examples/chat-with-mdns/mdns.go
new file mode 100644
index 0000000000..c1f5590e6d
--- /dev/null
+++ b/examples/chat-with-mdns/mdns.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/libp2p/go-libp2p/p2p/discovery/mdns"
+)
+
+type discoveryNotifee struct {
+ PeerChan chan peer.AddrInfo
+}
+
+// interface to be called when new peer is found
+func (n *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
+ n.PeerChan <- pi
+}
+
+// Initialize the MDNS service
+func initMDNS(peerhost host.Host, rendezvous string) chan peer.AddrInfo {
+ // register with service so that we get notified about peer discovery
+ n := &discoveryNotifee{}
+ n.PeerChan = make(chan peer.AddrInfo)
+
+ // An hour might be a long long period in practical applications. But this is fine for us
+ ser := mdns.NewMdnsService(peerhost, rendezvous, n)
+ if err := ser.Start(); err != nil {
+ panic(err)
+ }
+ return n.PeerChan
+}
diff --git a/examples/chat-with-rendezvous/.gitignore b/examples/chat-with-rendezvous/.gitignore
new file mode 100644
index 0000000000..b3f8f160b8
--- /dev/null
+++ b/examples/chat-with-rendezvous/.gitignore
@@ -0,0 +1 @@
+chat-with-rendezvous
diff --git a/examples/chat-with-rendezvous/README.md b/examples/chat-with-rendezvous/README.md
new file mode 100644
index 0000000000..78a2c1bd4e
--- /dev/null
+++ b/examples/chat-with-rendezvous/README.md
@@ -0,0 +1,134 @@
+# p2p chat app with libp2p [with peer discovery]
+
+This program demonstrates a simple p2p chat application. You will learn how to discover a peer in the network (using kad-dht), connect to it and open a chat stream.
+
+## Build
+
+From the `go-libp2p/examples` directory run the following:
+
+```
+> cd chat-with-rendezvous/
+> go build -o chat
+```
+
+## Usage
+
+Use two different terminal windows to run
+
+```
+./chat -listen /ip4/127.0.0.1/tcp/6666
+./chat -listen /ip4/127.0.0.1/tcp/6668
+```
+## So how does it work?
+
+1. **Configure a p2p host**
+```go
+// libp2p.New constructs a new libp2p Host.
+// Other options can be added here.
+host, err := libp2p.New()
+```
+[libp2p.New](https://pkg.go.dev/github.com/libp2p/go-libp2p#New) is the constructor for a libp2p node. It creates a host with the given configuration. Right now, all the options are default, documented [here](https://pkg.go.dev/github.com/libp2p/go-libp2p#New)
+
+2. **Set a default handler function for incoming connections.**
+
+This function is called on the local peer when a remote peer initiates a connection and starts a stream with the local peer.
+```go
+// Set a function as stream handler.
+host.SetStreamHandler("/chat/1.1.0", handleStream)
+```
+
+```handleStream``` is executed for each new incoming stream to the local peer. ```stream``` is used to exchange data between the local and remote peers. This example uses non blocking functions for reading and writing from this stream.
+
+```go
+func handleStream(stream net.Stream) {
+
+ // Create a buffer stream for non blocking read and write.
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go readData(rw)
+ go writeData(rw)
+
+ // 'stream' will stay open until you close it (or the other side closes it).
+}
+```
+
+3. **Initiate a new DHT Client with ```host``` as local peer.**
+
+
+```go
+dht, err := dht.New(ctx, host)
+```
+
+4. **Connect to IPFS bootstrap nodes.**
+
+These nodes are used to find nearby peers using DHT.
+
+```go
+for _, addr := range bootstrapPeers {
+
+ iaddr, _ := ipfsaddr.ParseString(addr)
+
+ peerinfo, _ := peerstore.InfoFromP2pAddr(iaddr.Multiaddr())
+
+ if err := host.Connect(ctx, *peerinfo); err != nil {
+ fmt.Println(err)
+ } else {
+ fmt.Println("Connection established with bootstrap node: ", *peerinfo)
+ }
+}
+```
+
+5. **Announce your presence using a rendezvous point.**
+
+[routingDiscovery.Advertise](https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/discovery/routing#RoutingDiscovery.Advertise) makes this node announce that it can provide a value for the given key. Where a key in this case is ```rendezvousString```. Other peers will hit the same key to find other peers.
+
+```go
+routingDiscovery := discovery.NewRoutingDiscovery(kademliaDHT)
+discovery.Advertise(ctx, routingDiscovery, config.RendezvousString)
+```
+
+6. **Find nearby peers.**
+
+[routingDiscovery.FindPeers](https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/discovery/routing#RoutingDiscovery.FindPeers) will return a channel of peers who have announced their presence.
+
+```go
+peerChan, err := routingDiscovery.FindPeers(ctx, config.RendezvousString)
+```
+
+The [discovery](https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/discovery/routing) package uses the DHT internally to [provide](https://pkg.go.dev/github.com/libp2p/go-libp2p-kad-dht#IpfsDHT.Provide) and [findProviders](https://pkg.go.dev/github.com/libp2p/go-libp2p-kad-dht#IpfsDHT.FindProviders).
+
+**Note:** Although [routingDiscovery.Advertise](https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/discovery/routing#RoutingDiscovery.Advertise) and [routingDiscovery.FindPeers](https://pkg.go.dev/github.com/libp2p/go-libp2p/p2p/discovery/routing#RoutingDiscovery.FindPeers) works for a rendezvous peer discovery, this is not the right way of doing it. Libp2p is currently working on an actual rendezvous protocol ([libp2p/specs#56](https://github.com/libp2p/specs/pull/56)) which can be used for bootstrap purposes, real time peer discovery and application specific routing.
+
+7. **Open streams to newly discovered peers.**
+
+Finally we open streams to the newly discovered peers.
+
+```go
+go func() {
+ for peer := range peerChan {
+ if peer.ID == host.ID() {
+ continue
+ }
+ fmt.Println("Found peer:", peer)
+
+ fmt.Println("Connecting to:", peer)
+ stream, err := host.NewStream(ctx, peer.ID, protocol.ID(config.ProtocolID))
+
+ if err != nil {
+ fmt.Println("Connection failed:", err)
+ continue
+ } else {
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go writeData(rw)
+ go readData(rw)
+ }
+
+ fmt.Println("Connected to:", peer)
+ }
+ }()
+```
+
+## Authors
+1. Abhishek Upperwal
+2. Mantas Vidutis
diff --git a/examples/chat-with-rendezvous/chat.go b/examples/chat-with-rendezvous/chat.go
new file mode 100644
index 0000000000..a2ddb897b4
--- /dev/null
+++ b/examples/chat-with-rendezvous/chat.go
@@ -0,0 +1,175 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ drouting "github.com/libp2p/go-libp2p/p2p/discovery/routing"
+ dutil "github.com/libp2p/go-libp2p/p2p/discovery/util"
+
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ "github.com/multiformats/go-multiaddr"
+
+ "github.com/ipfs/go-log/v2"
+)
+
+var logger = log.Logger("rendezvous")
+
+func handleStream(stream network.Stream) {
+ logger.Info("Got a new stream!")
+
+ // Create a buffer stream for non-blocking read and write.
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go readData(rw)
+ go writeData(rw)
+
+ // 'stream' will stay open until you close it (or the other side closes it).
+}
+
+func readData(rw *bufio.ReadWriter) {
+ for {
+ str, err := rw.ReadString('\n')
+ if err != nil {
+ fmt.Println("Error reading from buffer")
+ panic(err)
+ }
+
+ if str == "" {
+ return
+ }
+ if str != "\n" {
+ // Green console colour: \x1b[32m
+ // Reset console colour: \x1b[0m
+ fmt.Printf("\x1b[32m%s\x1b[0m> ", str)
+ }
+
+ }
+}
+
+func writeData(rw *bufio.ReadWriter) {
+ stdReader := bufio.NewReader(os.Stdin)
+
+ for {
+ fmt.Print("> ")
+ sendData, err := stdReader.ReadString('\n')
+ if err != nil {
+ fmt.Println("Error reading from stdin")
+ panic(err)
+ }
+
+ _, err = rw.WriteString(fmt.Sprintf("%s\n", sendData))
+ if err != nil {
+ fmt.Println("Error writing to buffer")
+ panic(err)
+ }
+ err = rw.Flush()
+ if err != nil {
+ fmt.Println("Error flushing buffer")
+ panic(err)
+ }
+ }
+}
+
+func main() {
+ log.SetAllLoggers(log.LevelWarn)
+ log.SetLogLevel("rendezvous", "info")
+ help := flag.Bool("h", false, "Display Help")
+ config, err := ParseFlags()
+ if err != nil {
+ panic(err)
+ }
+
+ if *help {
+ fmt.Println("This program demonstrates a simple p2p chat application using libp2p")
+ fmt.Println()
+ fmt.Println("Usage: Run './chat in two different terminals. Let them connect to the bootstrap nodes, announce themselves and connect to the peers")
+ flag.PrintDefaults()
+ return
+ }
+
+ // libp2p.New constructs a new libp2p Host. Other options can be added
+ // here.
+ host, err := libp2p.New(libp2p.ListenAddrs([]multiaddr.Multiaddr(config.ListenAddresses)...))
+ if err != nil {
+ panic(err)
+ }
+ logger.Info("Host created. We are:", host.ID())
+ logger.Info(host.Addrs())
+
+ // Set a function as stream handler. This function is called when a peer
+ // initiates a connection and starts a stream with this peer.
+ host.SetStreamHandler(protocol.ID(config.ProtocolID), handleStream)
+
+ // Start a DHT, for use in peer discovery. We can't just make a new DHT
+ // client because we want each peer to maintain its own local copy of the
+ // DHT, so that the bootstrapping node of the DHT can go down without
+ // inhibiting future peer discovery.
+ ctx := context.Background()
+ bootstrapPeers := make([]peer.AddrInfo, len(config.BootstrapPeers))
+ for i, addr := range config.BootstrapPeers {
+ peerinfo, _ := peer.AddrInfoFromP2pAddr(addr)
+ bootstrapPeers[i] = *peerinfo
+ }
+ kademliaDHT, err := dht.New(ctx, host, dht.BootstrapPeers(bootstrapPeers...))
+ if err != nil {
+ panic(err)
+ }
+
+ // Bootstrap the DHT. In the default configuration, this spawns a Background
+ // thread that will refresh the peer table every five minutes.
+ logger.Debug("Bootstrapping the DHT")
+ if err = kademliaDHT.Bootstrap(ctx); err != nil {
+ panic(err)
+ }
+
+ // Wait a bit to let bootstrapping finish (really bootstrap should block until it's ready, but that isn't the case yet.)
+ time.Sleep(1 * time.Second)
+
+ // We use a rendezvous point "meet me here" to announce our location.
+ // This is like telling your friends to meet you at the Eiffel Tower.
+ logger.Info("Announcing ourselves...")
+ routingDiscovery := drouting.NewRoutingDiscovery(kademliaDHT)
+ dutil.Advertise(ctx, routingDiscovery, config.RendezvousString)
+ logger.Debug("Successfully announced!")
+
+ // Now, look for others who have announced
+ // This is like your friend telling you the location to meet you.
+ logger.Debug("Searching for other peers...")
+ peerChan, err := routingDiscovery.FindPeers(ctx, config.RendezvousString)
+ if err != nil {
+ panic(err)
+ }
+
+ for peer := range peerChan {
+ if peer.ID == host.ID() {
+ continue
+ }
+ logger.Debug("Found peer:", peer)
+
+ logger.Debug("Connecting to:", peer)
+ stream, err := host.NewStream(ctx, peer.ID, protocol.ID(config.ProtocolID))
+
+ if err != nil {
+ logger.Warning("Connection failed:", err)
+ continue
+ } else {
+ rw := bufio.NewReadWriter(bufio.NewReader(stream), bufio.NewWriter(stream))
+
+ go writeData(rw)
+ go readData(rw)
+ }
+
+ logger.Info("Connected to:", peer)
+ }
+
+ select {}
+}
diff --git a/examples/chat-with-rendezvous/flags.go b/examples/chat-with-rendezvous/flags.go
new file mode 100644
index 0000000000..a2e8ed1dff
--- /dev/null
+++ b/examples/chat-with-rendezvous/flags.go
@@ -0,0 +1,63 @@
+package main
+
+import (
+ "flag"
+ "strings"
+
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ maddr "github.com/multiformats/go-multiaddr"
+)
+
+// A new type we need for writing a custom flag parser
+type addrList []maddr.Multiaddr
+
+func (al *addrList) String() string {
+ strs := make([]string, len(*al))
+ for i, addr := range *al {
+ strs[i] = addr.String()
+ }
+ return strings.Join(strs, ",")
+}
+
+func (al *addrList) Set(value string) error {
+ addr, err := maddr.NewMultiaddr(value)
+ if err != nil {
+ return err
+ }
+ *al = append(*al, addr)
+ return nil
+}
+
+func StringsToAddrs(addrStrings []string) (maddrs []maddr.Multiaddr, err error) {
+ for _, addrString := range addrStrings {
+ addr, err := maddr.NewMultiaddr(addrString)
+ if err != nil {
+ return maddrs, err
+ }
+ maddrs = append(maddrs, addr)
+ }
+ return
+}
+
+type Config struct {
+ RendezvousString string
+ BootstrapPeers addrList
+ ListenAddresses addrList
+ ProtocolID string
+}
+
+func ParseFlags() (Config, error) {
+ config := Config{}
+ flag.StringVar(&config.RendezvousString, "rendezvous", "meet me here",
+ "Unique string to identify group of nodes. Share this with your friends to let them connect with you")
+ flag.Var(&config.BootstrapPeers, "peer", "Adds a peer multiaddress to the bootstrap list")
+ flag.Var(&config.ListenAddresses, "listen", "Adds a multiaddress to the listen list")
+ flag.StringVar(&config.ProtocolID, "pid", "/chat/1.1.0", "Sets a protocol id for stream headers")
+ flag.Parse()
+
+ if len(config.BootstrapPeers) == 0 {
+ config.BootstrapPeers = dht.DefaultBootstrapPeers
+ }
+
+ return config, nil
+}
diff --git a/examples/chat/.gitignore b/examples/chat/.gitignore
new file mode 100644
index 0000000000..76cadc92a3
--- /dev/null
+++ b/examples/chat/.gitignore
@@ -0,0 +1 @@
+chat
diff --git a/examples/chat/README.md b/examples/chat/README.md
index da01591db2..1d171b8ae6 100644
--- a/examples/chat/README.md
+++ b/examples/chat/README.md
@@ -1,7 +1,7 @@
# p2p chat app with libp2p
This program demonstrates a simple p2p chat application. It can work between two peers if
-1. Both have private IP address (same network).
+1. Both have a private IP address (same network).
2. At least one of them has a public IP address.
Assume if 'A' and 'B' are on different networks host 'A' may or may not have a public IP address but host 'B' has one.
@@ -10,11 +10,11 @@ Usage: Run `./chat -sp ` on host 'B' where can be any
## Build
-To build the example, first run `make deps` in the root directory.
+From the `go-libp2p/examples` directory run the following:
```
-> make deps
-> go build ./examples/chat
+> cd chat/
+> go build
```
## Usage
@@ -23,7 +23,7 @@ On node 'B'
```
> ./chat -sp 3001
-Run ./chat -d /ip4/127.0.0.1/tcp/3001/ipfs/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo
+Run ./chat -d /ip4/127.0.0.1/tcp/3001/p2p/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo
2018/02/27 01:21:32 Got a new stream!
> hi (received messages in green colour)
@@ -34,16 +34,18 @@ Run ./chat -d /ip4/127.0.0.1/tcp/3001/ipfs/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6x
On node 'A'. Replace 127.0.0.1 with if node 'B' has one.
```
-> ./chat -d /ip4/127.0.0.1/tcp/3001/ipfs/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo
-Run ./chat -d /ip4/127.0.0.1/tcp/3001/ipfs/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo
+> ./chat -d /ip4/127.0.0.1/tcp/3001/p2p/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo
+Run ./chat -d /ip4/127.0.0.1/tcp/3001/p2p/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo
This node's multiaddress:
-/ip4/0.0.0.0/tcp/0/ipfs/QmWVx9NwsgaVWMRHNCpesq1WQAw2T3JurjGDNeVNWifPS7
+/ip4/0.0.0.0/tcp/0/p2p/QmWVx9NwsgaVWMRHNCpesq1WQAw2T3JurjGDNeVNWifPS7
> hi
> hello
```
-**NOTE: debug mode is enabled by default, debug mode will always generate same node id (on each node) on every execution. Disable debug using `--debug false` flag while running your executable.**
+**NOTE: debug mode is enabled by default, debug mode will always generate the same node id (on each node) on every execution. Disable debug using `--debug false` flag while running your executable.**
+
+**Note:** If you are looking for an implementation with peer discovery, [chat-with-rendezvous](../chat-with-rendezvous), supports peer discovery using a rendezvous point.
## Authors
-1. Abhishek Upperwal
\ No newline at end of file
+1. Abhishek Upperwal
diff --git a/examples/chat/chat.go b/examples/chat/chat.go
index 8f211ba949..1f88da4c07 100644
--- a/examples/chat/chat.go
+++ b/examples/chat/chat.go
@@ -1,31 +1,30 @@
/*
-*
-* The MIT License (MIT)
-*
-* Copyright (c) 2014 Juan Batiz-Benet
-*
-* Permission is hereby granted, free of charge, to any person obtaining a copy
-* of this software and associated documentation files (the "Software"), to deal
-* in the Software without restriction, including without limitation the rights
-* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-* copies of the Software, and to permit persons to whom the Software is
-* furnished to do so, subject to the following conditions:
-*
-* The above copyright notice and this permission notice shall be included in
-* all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-* THE SOFTWARE.
-*
-* This program demonstrate a simple chat application using p2p communication.
-*
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Juan Batiz-Benet
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * This program demonstrate a simple chat application using p2p communication.
+ *
*/
-
package main
import (
@@ -39,55 +38,20 @@ import (
mrand "math/rand"
"os"
- "github.com/libp2p/go-libp2p-crypto"
- "github.com/libp2p/go-libp2p-host"
- "github.com/libp2p/go-libp2p-net"
- "github.com/libp2p/go-libp2p-peer"
- "github.com/libp2p/go-libp2p-peerstore"
- "github.com/libp2p/go-libp2p-swarm"
- "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+
"github.com/multiformats/go-multiaddr"
)
-/*
-* addAddrToPeerstore parses a peer multiaddress and adds
-* it to the given host's peerstore, so it knows how to
-* contact it. It returns the peer ID of the remote peer.
-* @credit examples/http-proxy/proxy.go
- */
-func addAddrToPeerstore(h host.Host, addr string) peer.ID {
- // The following code extracts target's the peer ID from the
- // given multiaddress
- ipfsaddr, err := multiaddr.NewMultiaddr(addr)
- if err != nil {
- log.Fatalln(err)
- }
- pid, err := ipfsaddr.ValueForProtocol(multiaddr.P_IPFS)
- if err != nil {
- log.Fatalln(err)
- }
-
- peerid, err := peer.IDB58Decode(pid)
- if err != nil {
- log.Fatalln(err)
- }
-
- // Decapsulate the /ipfs/ part from the target
- // /ip4//ipfs/ becomes /ip4/
- targetPeerAddr, _ := multiaddr.NewMultiaddr(
- fmt.Sprintf("/ipfs/%s", peer.IDB58Encode(peerid)))
- targetAddr := ipfsaddr.Decapsulate(targetPeerAddr)
-
- // We have a peer ID and a targetAddr so we add
- // it to the peerstore so LibP2P knows how to contact it
- h.Peerstore().AddAddr(peerid, targetAddr, peerstore.PermanentAddrTTL)
- return peerid
-}
-
-func handleStream(s net.Stream) {
+func handleStream(s network.Stream) {
log.Println("Got a new stream!")
- // Create a buffer stream for non blocking read and write.
+ // Create a buffer stream for non-blocking read and write.
rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
go readData(rw)
@@ -95,6 +59,7 @@ func handleStream(s net.Stream) {
// stream 's' will stay open until you close it (or the other side closes it).
}
+
func readData(rw *bufio.ReadWriter) {
for {
str, _ := rw.ReadString('\n')
@@ -117,115 +82,153 @@ func writeData(rw *bufio.ReadWriter) {
for {
fmt.Print("> ")
sendData, err := stdReader.ReadString('\n')
-
if err != nil {
- panic(err)
+ log.Println(err)
+ return
}
rw.WriteString(fmt.Sprintf("%s\n", sendData))
rw.Flush()
}
-
}
func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
sourcePort := flag.Int("sp", 0, "Source port number")
- dest := flag.String("d", "", "Dest MultiAddr String")
- help := flag.Bool("help", false, "Display Help")
- debug := flag.Bool("debug", true, "Debug generated same node id on every execution.")
+ dest := flag.String("d", "", "Destination multiaddr string")
+ help := flag.Bool("help", false, "Display help")
+ debug := flag.Bool("debug", false, "Debug generates the same node ID on every execution")
flag.Parse()
if *help {
fmt.Printf("This program demonstrates a simple p2p chat application using libp2p\n\n")
- fmt.Printf("Usage: Run './chat -sp ' where can be any port number. Now run './chat -d ' where is multiaddress of previous listener host.\n")
+ fmt.Println("Usage: Run './chat -sp ' where can be any port number.")
+ fmt.Println("Now run './chat -d ' where is multiaddress of previous listener host.")
os.Exit(0)
}
- // If debug is enabled used constant random source else cryptographic randomness.
+ // If debug is enabled, use a constant random source to generate the peer ID. Only useful for debugging,
+ // off by default. Otherwise, it uses rand.Reader.
var r io.Reader
if *debug {
- // Constant random source. This will always generate the same host ID on multiple execution.
- // Don't do this in production code.
+ // Use the port number as the randomness source.
+ // This will always generate the same host ID on multiple executions, if the same port number is used.
+ // Never do this in production code.
r = mrand.New(mrand.NewSource(int64(*sourcePort)))
} else {
r = rand.Reader
}
- // Creates a new RSA key pair for this host
- prvKey, pubKey, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)
-
+ h, err := makeHost(*sourcePort, r)
if err != nil {
- panic(err)
+ log.Println(err)
+ return
}
- // Getting host ID from public key.
- // host ID is the hash of public key
- nodeID, _ := peer.IDFromPublicKey(pubKey)
+ if *dest == "" {
+ startPeer(ctx, h, handleStream)
+ } else {
+ rw, err := startPeerAndConnect(ctx, h, *dest)
+ if err != nil {
+ log.Println(err)
+ return
+ }
- // 0.0.0.0 will listen on any interface device
- sourceMultiAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", *sourcePort))
+ // Create a thread to read and write data.
+ go writeData(rw)
+ go readData(rw)
- // Adding self to the peerstore.
- ps := peerstore.NewPeerstore()
- ps.AddPrivKey(nodeID, prvKey)
- ps.AddPubKey(nodeID, pubKey)
+ }
- // Creating a new Swarm network.
- network, err := swarm.NewNetwork(context.Background(), []multiaddr.Multiaddr{sourceMultiAddr}, nodeID, ps, nil)
+ // Wait forever
+ select {}
+}
+func makeHost(port int, randomness io.Reader) (host.Host, error) {
+ // Creates a new RSA key pair for this host.
+ prvKey, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, randomness)
if err != nil {
- panic(err)
+ log.Println(err)
+ return nil, err
}
- // NewHost constructs a new *BasicHost and activates it by attaching its
- // stream and connection handlers to the given inet.Network (network).
- // Other options like NATManager can also be added here.
- // See docs: https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#HostOpts
- host := basichost.New(network)
+ // 0.0.0.0 will listen on any interface device.
+ sourceMultiAddr, _ := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", port))
- if *dest == "" {
- // Set a function as stream handler.
- // This function is called when a peer initiate a connection and starts a stream with this peer.
- // Only applicable on the receiving side.
- host.SetStreamHandler("/chat/1.0.0", handleStream)
+ // libp2p.New constructs a new libp2p Host.
+ // Other options can be added here.
+ return libp2p.New(
+ libp2p.ListenAddrs(sourceMultiAddr),
+ libp2p.Identity(prvKey),
+ )
+}
- fmt.Printf("Run './chat -d /ip4/127.0.0.1/tcp/%d/ipfs/%s' on another console.\n You can replace 127.0.0.1 with public IP as well.\n", *sourcePort, host.ID().Pretty())
- fmt.Printf("\nWaiting for incoming connection\n\n")
- // Hang forever
- <-make(chan struct{})
+func startPeer(_ context.Context, h host.Host, streamHandler network.StreamHandler) {
+ // Set a function as stream handler.
+ // This function is called when a peer connects, and starts a stream with this protocol.
+ // Only applies on the receiving side.
+ h.SetStreamHandler("/chat/1.0.0", streamHandler)
+
+ // Let's get the actual TCP port from our listen multiaddr, in case we're using 0 (default; random available port).
+ var port string
+ for _, la := range h.Network().ListenAddresses() {
+ if p, err := la.ValueForProtocol(multiaddr.P_TCP); err == nil {
+ port = p
+ break
+ }
+ }
- } else {
+ if port == "" {
+ log.Println("was not able to find actual local port")
+ return
+ }
- // Add destination peer multiaddress in the peerstore.
- // This will be used during connection and stream creation by libp2p.
- peerID := addAddrToPeerstore(host, *dest)
+ log.Printf("Run './chat -d /ip4/127.0.0.1/tcp/%v/p2p/%s' on another console.\n", port, h.ID())
+ log.Println("You can replace 127.0.0.1 with public IP as well.")
+ log.Println("Waiting for incoming connection")
+ log.Println()
+}
- fmt.Println("This node's multiaddress: ")
- // IP will be 0.0.0.0 (listen on any interface) and port will be 0 (choose one for me).
- // Although this node will not listen for any connection. It will just initiate a connect with
- // one of its peer and use that stream to communicate.
- fmt.Printf("%s/ipfs/%s\n", sourceMultiAddr, host.ID().Pretty())
+func startPeerAndConnect(_ context.Context, h host.Host, destination string) (*bufio.ReadWriter, error) {
+ log.Println("This node's multiaddresses:")
+ for _, la := range h.Addrs() {
+ log.Printf(" - %v\n", la)
+ }
+ log.Println()
- // Start a stream with peer with peer Id: 'peerId'.
- // Multiaddress of the destination peer is fetched from the peerstore using 'peerId'.
- s, err := host.NewStream(context.Background(), peerID, "/chat/1.0.0")
+ // Turn the destination into a multiaddr.
+ maddr, err := multiaddr.NewMultiaddr(destination)
+ if err != nil {
+ log.Println(err)
+ return nil, err
+ }
- if err != nil {
- panic(err)
- }
+ // Extract the peer ID from the multiaddr.
+ info, err := peer.AddrInfoFromP2pAddr(maddr)
+ if err != nil {
+ log.Println(err)
+ return nil, err
+ }
- // Create a buffered stream so that read and writes are non blocking.
- rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
+ // Add the destination's peer multiaddress in the peerstore.
+ // This will be used during connection and stream creation by libp2p.
+ h.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL)
- // Create a thread to read and write data.
- go writeData(rw)
- go readData(rw)
+ // Start a stream with the destination.
+ // Multiaddress of the destination peer is fetched from the peerstore using 'peerId'.
+ s, err := h.NewStream(context.Background(), info.ID, "/chat/1.0.0")
+ if err != nil {
+ log.Println(err)
+ return nil, err
+ }
+ log.Println("Established connection to destination")
- // Hang forever.
- select {}
+ // Create a buffered stream so that read and writes are non-blocking.
+ rw := bufio.NewReadWriter(bufio.NewReader(s), bufio.NewWriter(s))
- }
+ return rw, nil
}
diff --git a/examples/chat/chat_test.go b/examples/chat/chat_test.go
new file mode 100644
index 0000000000..4240b477c2
--- /dev/null
+++ b/examples/chat/chat_test.go
@@ -0,0 +1,73 @@
+package main
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "log"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/libp2p/go-libp2p/examples/testutils"
+)
+
+func TestMain(t *testing.T) {
+ var h testutils.LogHarness
+ h.Expect("Waiting for incoming connection")
+ h.Expect("Established connection to destination")
+ h.Expect("Got a new stream!")
+
+ h.Run(t, func() {
+ // Create a context that will stop the hosts when the tests end
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ port1, err := testutils.FindFreePort(t, "", 5)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ port2, err := testutils.FindFreePort(t, "", 5)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ h1, err := makeHost(port1, rand.Reader)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ go startPeer(ctx, h1, func(network.Stream) {
+ log.Println("Got a new stream!")
+ // Sleep a bit to let h2 print the logs we're waiting for
+ time.Sleep(500 * time.Millisecond)
+ cancel() // end the test
+ })
+
+ dest := fmt.Sprintf("/ip4/127.0.0.1/tcp/%v/p2p/%s", port1, h1.ID())
+
+ h2, err := makeHost(port2, rand.Reader)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ go func() {
+ rw, err := startPeerAndConnect(ctx, h2, dest)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ rw.WriteString("test message")
+ rw.Flush()
+ }()
+
+ <-ctx.Done()
+ })
+}
diff --git a/examples/echo/.gitignore b/examples/echo/.gitignore
new file mode 100644
index 0000000000..fa11a6a9c5
--- /dev/null
+++ b/examples/echo/.gitignore
@@ -0,0 +1 @@
+echo
diff --git a/examples/echo/README.md b/examples/echo/README.md
index 2b0dc24efa..4c30609fa8 100644
--- a/examples/echo/README.md
+++ b/examples/echo/README.md
@@ -10,19 +10,19 @@ In dial mode, the node will start up, connect to the given address, open a strea
## Build
-From `go-libp2p` base folder:
+From the `go-libp2p/examples` directory run the following:
```
-> make deps
-> go build ./examples/echo
+> cd echo/
+> go build
```
## Usage
```
-> ./echo -secio -l 10000
-2017/03/15 14:11:32 I am /ip4/127.0.0.1/tcp/10000/ipfs/QmYo41GybvrXk8y8Xnm1P7pfA4YEXCpfnLyzgRPnNbG35e
-2017/03/15 14:11:32 Now run "./echo -l 10001 -d /ip4/127.0.0.1/tcp/10000/ipfs/QmYo41GybvrXk8y8Xnm1P7pfA4YEXCpfnLyzgRPnNbG35e -secio" on a different terminal
+> ./echo -l 10000
+2017/03/15 14:11:32 I am /ip4/127.0.0.1/tcp/10000/p2p/QmYo41GybvrXk8y8Xnm1P7pfA4YEXCpfnLyzgRPnNbG35e
+2017/03/15 14:11:32 Now run "./echo -l 10001 -d /ip4/127.0.0.1/tcp/10000/p2p/QmYo41GybvrXk8y8Xnm1P7pfA4YEXCpfnLyzgRPnNbG35e" on a different terminal
2017/03/15 14:11:32 listening for connections
```
@@ -31,22 +31,22 @@ The listener libp2p host will print its `Multiaddress`, which indicates how it c
Now, launch another node that talks to the listener:
```
-> ./echo -secio -l 10001 -d /ip4/127.0.0.1/tcp/10000/ipfs/QmYo41GybvrXk8y8Xnm1P7pfA4YEXCpfnLyzgRPnNbG35e
+> ./echo -l 10001 -d /ip4/127.0.0.1/tcp/10000/p2p/QmYo41GybvrXk8y8Xnm1P7pfA4YEXCpfnLyzgRPnNbG35e
```
The new node with send the message `"Hello, world!"` to the listener, which will in turn echo it over the stream and close it. The listener logs the message, and the sender logs the response.
## Details
-The `makeBasicHost()` function creates a [go-libp2p-basichost](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic) object. `basichost` objects wrap [go-libp2 swarms](https://godoc.org/github.com/libp2p/go-libp2p-swarm#Swarm) and should be used preferentially. A [go-libp2p-swarm Network](https://godoc.org/github.com/libp2p/go-libp2p-swarm#Network) is a `swarm` which complies to the [go-libp2p-net Network interface](https://godoc.org/github.com/libp2p/go-libp2p-net#Network) and takes care of maintaining streams, connections, multiplexing different protocols on them, handling incoming connections etc.
+The `makeBasicHost()` function creates a [go-libp2p-basichost](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic) object. `basichost` objects wrap [go-libp2p swarms](https://godoc.org/github.com/libp2p/go-libp2p-swarm#Swarm) and should be used preferentially. A [go-libp2p-swarm Network](https://godoc.org/github.com/libp2p/go-libp2p-swarm#Network) is a `swarm` which complies to the [go-libp2p-net Network interface](https://godoc.org/github.com/libp2p/go-libp2p-net#Network) and takes care of maintaining streams, connections, multiplexing different protocols on them, handling incoming connections etc.
In order to create the swarm (and a `basichost`), the example needs:
-- An [ipfs-procotol ID](https://godoc.org/github.com/libp2p/go-libp2p-peer#ID) like `QmNtX1cvrm2K6mQmMEaMxAuB4rTexhd87vpYVot4sEZzxc`. The example autogenerates a key pair on every run and uses an ID extracted from the public key (the hash of the public key). When using `-secio`, it uses the key pair to encrypt communications (otherwise, it leaves the connections unencrypted).
+- An [ipfs-protocol ID](https://godoc.org/github.com/libp2p/go-libp2p-peer#ID) like `QmNtX1cvrm2K6mQmMEaMxAuB4rTexhd87vpYVot4sEZzxc`. The example autogenerates a key pair on every run and uses an ID extracted from the public key (the hash of the public key). When using `-insecure`, it leaves the connection unencrypted (otherwise, it uses the key pair to encrypt communications).
- A [Multiaddress](https://godoc.org/github.com/multiformats/go-multiaddr), which indicates how to reach this peer. There can be several of them (using different protocols or locations for example). Example: `/ip4/127.0.0.1/tcp/1234`.
-- A [go-libp2p-peerstore](https://godoc.org/github.com/libp2p/go-libp2p-peerstore), which is used as a address book which matches node IDs to the multiaddresses through which they can be contacted. This peerstore gets autopopulated when manually opening a connection (with [`Connect()`](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.Connect). Alternatively, we can manually [`AddAddr()`](https://godoc.org/github.com/libp2p/go-libp2p-peerstore#AddrManager.AddAddr) as in the example.
+- A [go-libp2p-peerstore](https://godoc.org/github.com/libp2p/go-libp2p-peerstore), which is used as an address book which matches node IDs to the multiaddresses through which they can be contacted. This peerstore gets autopopulated when manually opening a connection (with [`Connect()`](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.Connect). Alternatively, we can manually [`AddAddr()`](https://godoc.org/github.com/libp2p/go-libp2p-peerstore#AddrManager.AddAddr) as in the example.
-A `basichost` can now open streams (bi-directional channel between to peers) using [NewStream](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.NewStream) and use them to send and receive data tagged with a `Protocol.ID` (a string). The host can also listen for incoming connections for a given
+A `basichost` can now open streams (bi-directional channel between two peers) using [NewStream](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.NewStream) and use them to send and receive data tagged with a `Protocol.ID` (a string). The host can also listen for incoming connections for a given
`Protocol` with [`SetStreamHandle()`](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.SetStreamHandler).
The example makes use of all of this to enable communication between a listener and a sender using protocol `/echo/1.0.0` (which could be any other thing).
diff --git a/examples/echo/main.go b/examples/echo/main.go
index 412df18fc1..18e031a8dc 100644
--- a/examples/echo/main.go
+++ b/examples/echo/main.go
@@ -7,28 +7,58 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"log"
mrand "math/rand"
- golog "github.com/ipfs/go-log"
- libp2p "github.com/libp2p/go-libp2p"
- crypto "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
- net "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+
+ golog "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
- gologging "github.com/whyrusleeping/go-logging"
)
-// makeBasicHost creates a LibP2P host with a random peer ID listening on the
-// given multiaddress. It will use secio if secio is true.
-func makeBasicHost(listenPort int, secio bool, randseed int64) (host.Host, error) {
+func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // LibP2P code uses golog to log messages. They log with different
+ // string IDs (i.e. "swarm"). We can control the verbosity level for
+ // all loggers with:
+ golog.SetAllLoggers(golog.LevelInfo) // Change to INFO for extra info
+
+ // Parse options from the command line
+ listenF := flag.Int("l", 0, "wait for incoming connections")
+ targetF := flag.String("d", "", "target peer to dial")
+ insecureF := flag.Bool("insecure", false, "use an unencrypted connection")
+ seedF := flag.Int64("seed", 0, "set random seed for id generation")
+ flag.Parse()
+
+ if *listenF == 0 {
+ log.Fatal("Please provide a port to bind on with -l")
+ }
- // If the seed is zero, use real cryptographic randomness. Otherwise, use a
- // deterministic randomness source to make generated keys stay the same
- // across multiple runs
+ // Make a host that listens on the given multiaddress
+ ha, err := makeBasicHost(*listenF, *insecureF, *seedF)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if *targetF == "" {
+ startListener(ctx, ha, *listenF, *insecureF)
+ // Run until canceled.
+ <-ctx.Done()
+ } else {
+ runSender(ctx, ha, *targetF)
+ }
+}
+
+// makeBasicHost creates a LibP2P host with a random peer ID listening on the
+// given multiaddress. It won't encrypt the connection if insecure is true.
+func makeBasicHost(listenPort int, insecure bool, randseed int64) (host.Host, error) {
var r io.Reader
if randseed == 0 {
r = rand.Reader
@@ -46,61 +76,34 @@ func makeBasicHost(listenPort int, secio bool, randseed int64) (host.Host, error
opts := []libp2p.Option{
libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", listenPort)),
libp2p.Identity(priv),
+ libp2p.DisableRelay(),
}
- if !secio {
- opts = append(opts, libp2p.NoEncryption())
+ if insecure {
+ opts = append(opts, libp2p.NoSecurity)
}
- basicHost, err := libp2p.New(context.Background(), opts...)
- if err != nil {
- return nil, err
- }
+ return libp2p.New(opts...)
+}
+func getHostAddress(ha host.Host) string {
// Build host multiaddress
- hostAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", basicHost.ID().Pretty()))
+ hostAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", ha.ID()))
// Now we can build a full multiaddress to reach this host
// by encapsulating both addresses:
- addr := basicHost.Addrs()[0]
- fullAddr := addr.Encapsulate(hostAddr)
- log.Printf("I am %s\n", fullAddr)
- if secio {
- log.Printf("Now run \"./echo -l %d -d %s -secio\" on a different terminal\n", listenPort+1, fullAddr)
- } else {
- log.Printf("Now run \"./echo -l %d -d %s\" on a different terminal\n", listenPort+1, fullAddr)
- }
-
- return basicHost, nil
+ addr := ha.Addrs()[0]
+ return addr.Encapsulate(hostAddr).String()
}
-func main() {
- // LibP2P code uses golog to log messages. They log with different
- // string IDs (i.e. "swarm"). We can control the verbosity level for
- // all loggers with:
- golog.SetAllLoggers(gologging.INFO) // Change to DEBUG for extra info
-
- // Parse options from the command line
- listenF := flag.Int("l", 0, "wait for incoming connections")
- target := flag.String("d", "", "target peer to dial")
- secio := flag.Bool("secio", false, "enable secio")
- seed := flag.Int64("seed", 0, "set random seed for id generation")
- flag.Parse()
-
- if *listenF == 0 {
- log.Fatal("Please provide a port to bind on with -l")
- }
-
- // Make a host that listens on the given multiaddress
- ha, err := makeBasicHost(*listenF, *secio, *seed)
- if err != nil {
- log.Fatal(err)
- }
+func startListener(_ context.Context, ha host.Host, listenPort int, insecure bool) {
+ fullAddr := getHostAddress(ha)
+ log.Printf("I am %s\n", fullAddr)
// Set a stream handler on host A. /echo/1.0.0 is
// a user-defined protocol name.
- ha.SetStreamHandler("/echo/1.0.0", func(s net.Stream) {
- log.Println("Got a new stream!")
+ ha.SetStreamHandler("/echo/1.0.0", func(s network.Stream) {
+ log.Println("listener received new stream")
if err := doEcho(s); err != nil {
log.Println(err)
s.Reset()
@@ -109,70 +112,72 @@ func main() {
}
})
- if *target == "" {
- log.Println("listening for connections")
- select {} // hang forever
- }
- /**** This is where the listener code ends ****/
+ log.Println("listening for connections")
- // The following code extracts target's the peer ID from the
- // given multiaddress
- ipfsaddr, err := ma.NewMultiaddr(*target)
- if err != nil {
- log.Fatalln(err)
+ if insecure {
+ log.Printf("Now run \"./echo -l %d -d %s -insecure\" on a different terminal\n", listenPort+1, fullAddr)
+ } else {
+ log.Printf("Now run \"./echo -l %d -d %s\" on a different terminal\n", listenPort+1, fullAddr)
}
+}
- pid, err := ipfsaddr.ValueForProtocol(ma.P_IPFS)
+func runSender(_ context.Context, ha host.Host, targetPeer string) {
+ fullAddr := getHostAddress(ha)
+ log.Printf("I am %s\n", fullAddr)
+
+ // Turn the targetPeer into a multiaddr.
+ maddr, err := ma.NewMultiaddr(targetPeer)
if err != nil {
- log.Fatalln(err)
+ log.Println(err)
+ return
}
- peerid, err := peer.IDB58Decode(pid)
+ // Extract the peer ID from the multiaddr.
+ info, err := peer.AddrInfoFromP2pAddr(maddr)
if err != nil {
- log.Fatalln(err)
+ log.Println(err)
+ return
}
- // Decapsulate the /ipfs/ part from the target
- // /ip4//ipfs/ becomes /ip4/
- targetPeerAddr, _ := ma.NewMultiaddr(
- fmt.Sprintf("/ipfs/%s", peer.IDB58Encode(peerid)))
- targetAddr := ipfsaddr.Decapsulate(targetPeerAddr)
-
- // We have a peer ID and a targetAddr so we add it to the peerstore
+ // We have a peer ID and a targetAddr, so we add it to the peerstore
// so LibP2P knows how to contact it
- ha.Peerstore().AddAddr(peerid, targetAddr, pstore.PermanentAddrTTL)
+ ha.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL)
- log.Println("opening stream")
+ log.Println("sender opening stream")
// make a new stream from host B to host A
// it should be handled on host A by the handler we set above because
// we use the same /echo/1.0.0 protocol
- s, err := ha.NewStream(context.Background(), peerid, "/echo/1.0.0")
+ s, err := ha.NewStream(context.Background(), info.ID, "/echo/1.0.0")
if err != nil {
- log.Fatalln(err)
+ log.Println(err)
+ return
}
+ log.Println("sender saying hello")
_, err = s.Write([]byte("Hello, world!\n"))
if err != nil {
- log.Fatalln(err)
+ log.Println(err)
+ return
}
- out, err := ioutil.ReadAll(s)
+ out, err := io.ReadAll(s)
if err != nil {
- log.Fatalln(err)
+ log.Println(err)
+ return
}
log.Printf("read reply: %q\n", out)
}
// doEcho reads a line of data a stream and writes it back
-func doEcho(s net.Stream) error {
+func doEcho(s network.Stream) error {
buf := bufio.NewReader(s)
str, err := buf.ReadString('\n')
if err != nil {
return err
}
- log.Printf("read: %s\n", str)
+ log.Printf("read: %s", str)
_, err = s.Write([]byte(str))
return err
}
diff --git a/examples/echo/main_test.go b/examples/echo/main_test.go
new file mode 100644
index 0000000000..b57f094c9f
--- /dev/null
+++ b/examples/echo/main_test.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "context"
+ "log"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/examples/testutils"
+)
+
+func TestMain(t *testing.T) {
+ var h testutils.LogHarness
+ h.Expect("listening for connections")
+ h.Expect("sender opening stream")
+ h.Expect("sender saying hello")
+ h.Expect("listener received new stream")
+ h.Expect("read: Hello, world!")
+ h.Expect(`read reply: "Hello, world!\n"`)
+
+ h.Run(t, func() {
+ // Create a context that will stop the hosts when the tests end
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Get a tcp port for the listener
+ lport, err := testutils.FindFreePort(t, "", 5)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ // Get a tcp port for the sender
+ sport, err := testutils.FindFreePort(t, "", 5)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ // Make listener
+ lh, err := makeBasicHost(lport, true, 1)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ startListener(ctx, lh, lport, true)
+
+ // Make sender
+ listenAddr := getHostAddress(lh)
+ sh, err := makeBasicHost(sport, true, 2)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ runSender(ctx, sh, listenAddr)
+ })
+}
diff --git a/examples/go.mod b/examples/go.mod
new file mode 100644
index 0000000000..1e5e5315b3
--- /dev/null
+++ b/examples/go.mod
@@ -0,0 +1,142 @@
+module github.com/libp2p/go-libp2p/examples
+
+go 1.24
+
+require (
+ github.com/caddyserver/certmagic v0.21.6
+ github.com/gogo/protobuf v1.3.2
+ github.com/google/uuid v1.6.0
+ github.com/ipfs/go-datastore v0.6.0
+ github.com/ipfs/go-log/v2 v2.5.1
+ github.com/ipshipyard/p2p-forge v0.5.0
+ github.com/libp2p/go-libp2p v0.41.1
+ github.com/libp2p/go-libp2p-kad-dht v0.28.1
+ github.com/multiformats/go-multiaddr v0.15.0
+ github.com/prometheus/client_golang v1.21.1
+)
+
+require (
+ github.com/benbjohnson/clock v1.3.5 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/caddyserver/zerossl v0.1.3 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/containerd/cgroups v1.1.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/elastic/gosigar v0.14.3 // indirect
+ github.com/flynn/noise v1.1.0 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
+ github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
+ github.com/google/gopacket v1.1.19 // indirect
+ github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
+ github.com/gorilla/websocket v1.5.3 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/golang-lru v1.0.2 // indirect
+ github.com/huin/goupnp v1.3.0 // indirect
+ github.com/ipfs/boxo v0.25.0 // indirect
+ github.com/ipfs/go-cid v0.5.0 // indirect
+ github.com/ipld/go-ipld-prime v0.21.0 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2 // indirect
+ github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
+ github.com/jbenet/goprocess v0.1.4 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/koron/go-ssdp v0.0.5 // indirect
+ github.com/libdns/libdns v0.2.2 // indirect
+ github.com/libp2p/go-buffer-pool v0.1.0 // indirect
+ github.com/libp2p/go-cidranger v1.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.2.0 // indirect
+ github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
+ github.com/libp2p/go-libp2p-kbucket v0.6.4 // indirect
+ github.com/libp2p/go-libp2p-record v0.2.0 // indirect
+ github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect
+ github.com/libp2p/go-msgio v0.3.0 // indirect
+ github.com/libp2p/go-netroute v0.2.2 // indirect
+ github.com/libp2p/go-reuseport v0.4.0 // indirect
+ github.com/libp2p/go-yamux/v5 v5.0.0 // indirect
+ github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
+ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mholt/acmez/v3 v3.0.0 // indirect
+ github.com/miekg/dns v1.1.64 // indirect
+ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
+ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/mr-tron/base58 v1.2.0 // indirect
+ github.com/multiformats/go-base32 v0.1.0 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
+ github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
+ github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.0 // indirect
+ github.com/multiformats/go-multihash v0.2.3 // indirect
+ github.com/multiformats/go-multistream v0.6.0 // indirect
+ github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/onsi/ginkgo/v2 v2.22.2 // indirect
+ github.com/opencontainers/runtime-spec v1.2.0 // indirect
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
+ github.com/pion/datachannel v1.5.10 // indirect
+ github.com/pion/dtls/v2 v2.2.12 // indirect
+ github.com/pion/dtls/v3 v3.0.4 // indirect
+ github.com/pion/ice/v4 v4.0.8 // indirect
+ github.com/pion/interceptor v0.1.37 // indirect
+ github.com/pion/logging v0.2.3 // indirect
+ github.com/pion/mdns/v2 v2.0.7 // indirect
+ github.com/pion/randutil v0.1.0 // indirect
+ github.com/pion/rtcp v1.2.15 // indirect
+ github.com/pion/rtp v1.8.11 // indirect
+ github.com/pion/sctp v1.8.37 // indirect
+ github.com/pion/sdp/v3 v3.0.10 // indirect
+ github.com/pion/srtp/v3 v3.0.4 // indirect
+ github.com/pion/stun v0.6.1 // indirect
+ github.com/pion/stun/v3 v3.0.0 // indirect
+ github.com/pion/transport/v2 v2.2.10 // indirect
+ github.com/pion/transport/v3 v3.0.7 // indirect
+ github.com/pion/turn/v4 v4.0.0 // indirect
+ github.com/pion/webrtc/v4 v4.0.10 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/polydawn/refmt v0.89.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.62.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/quic-go/qpack v0.5.1 // indirect
+ github.com/quic-go/quic-go v0.50.1 // indirect
+ github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
+ github.com/raulk/go-watchdog v1.3.0 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
+ github.com/wlynxg/anet v0.0.5 // indirect
+ github.com/zeebo/blake3 v0.2.4 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/otel v1.33.0 // indirect
+ go.opentelemetry.io/otel/metric v1.33.0 // indirect
+ go.opentelemetry.io/otel/trace v1.33.0 // indirect
+ go.uber.org/dig v1.18.0 // indirect
+ go.uber.org/fx v1.23.0 // indirect
+ go.uber.org/mock v0.5.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ go.uber.org/zap/exp v0.3.0 // indirect
+ golang.org/x/crypto v0.35.0 // indirect
+ golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect
+ golang.org/x/mod v0.23.0 // indirect
+ golang.org/x/net v0.35.0 // indirect
+ golang.org/x/sync v0.11.0 // indirect
+ golang.org/x/sys v0.30.0 // indirect
+ golang.org/x/text v0.22.0 // indirect
+ golang.org/x/time v0.8.0 // indirect
+ golang.org/x/tools v0.30.0 // indirect
+ gonum.org/v1/gonum v0.15.1 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ lukechampine.com/blake3 v1.4.0 // indirect
+)
diff --git a/examples/go.sum b/examples/go.sum
new file mode 100644
index 0000000000..057983ce62
--- /dev/null
+++ b/examples/go.sum
@@ -0,0 +1,672 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/caddyserver/certmagic v0.21.6 h1:1th6GfprVfsAtFNOu4StNMF5IxK5XiaI0yZhAHlZFPE=
+github.com/caddyserver/certmagic v0.21.6/go.mod h1:n1sCo7zV1Ez2j+89wrzDxo4N/T1Ws/Vx8u5NvuBFabw=
+github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
+github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
+github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
+github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
+github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
+github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
+github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
+github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
+github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
+github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/ipfs/boxo v0.25.0 h1:FNZaKVirUDafGz3Y9sccztynAUazs9GfSapLk/5c7is=
+github.com/ipfs/boxo v0.25.0/go.mod h1:MQVkL3V8RfuIsn+aajCR0MXLl8nRlz+5uGlHMWFVyuE=
+github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs=
+github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
+github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
+github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
+github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
+github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0=
+github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs=
+github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
+github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
+github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew=
+github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI=
+github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E=
+github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ=
+github.com/ipshipyard/p2p-forge v0.5.0 h1:U1ta2RYkSOLPXNbeCWGT5iv5t5TS1GNDvE1hSupwPZA=
+github.com/ipshipyard/p2p-forge v0.5.0/go.mod h1:GNDXM2CR8KRS8mJGw7ARIRVlrG9NH8MdewgNVfIIByA=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
+github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
+github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
+github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
+github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
+github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
+github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
+github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
+github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
+github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
+github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
+github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
+github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
+github.com/libp2p/go-libp2p v0.41.1 h1:8ecNQVT5ev/jqALTvisSJeVNvXYJyK4NhQx1nNRXQZE=
+github.com/libp2p/go-libp2p v0.41.1/go.mod h1:DcGTovJzQl/I7HMrby5ZRjeD0kQkGiy+9w6aEkSZpRI=
+github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
+github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
+github.com/libp2p/go-libp2p-kad-dht v0.28.1 h1:DVTfzG8Ybn88g9RycIq47evWCRss5f0Wm8iWtpwyHso=
+github.com/libp2p/go-libp2p-kad-dht v0.28.1/go.mod h1:0wHURlSFdAC42+wF7GEmpLoARw8JuS8do2guCtc/Y/w=
+github.com/libp2p/go-libp2p-kbucket v0.6.4 h1:OjfiYxU42TKQSB8t8WYd8MKhYhMJeO2If+NiuKfb6iQ=
+github.com/libp2p/go-libp2p-kbucket v0.6.4/go.mod h1:jp6w82sczYaBsAypt5ayACcRJi0lgsba7o4TzJKEfWA=
+github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
+github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
+github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84=
+github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
+github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/libp2p/go-yamux/v5 v5.0.0 h1:2djUh96d3Jiac/JpGkKs4TO49YhsfLopAoryfPmf+Po=
+github.com/libp2p/go-yamux/v5 v5.0.0/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
+github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
+github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mholt/acmez/v3 v3.0.0 h1:r1NcjuWR0VaKP2BTjDK9LRFBw/WvURx3jlaEUl9Ht8E=
+github.com/mholt/acmez/v3 v3.0.0/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ=
+github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
+github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
+github.com/multiformats/go-multiaddr v0.15.0 h1:zB/HeaI/apcZiTDwhY5YqMvNVl/oQYvs3XySU+qeAVo=
+github.com/multiformats/go-multiaddr v0.15.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
+github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
+github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
+github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
+github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
+github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=
+github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
+github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
+github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
+github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
+github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
+github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
+github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
+github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
+github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
+github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
+github.com/pion/ice/v4 v4.0.8 h1:ajNx0idNG+S+v9Phu4LSn2cs8JEfTsA1/tEjkkAVpFY=
+github.com/pion/ice/v4 v4.0.8/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
+github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
+github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
+github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
+github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
+github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
+github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
+github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
+github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=
+github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
+github.com/pion/sctp v1.8.37 h1:ZDmGPtRPX9mKCiVXtMbTWybFw3z/hVKAZgU81wcOrqs=
+github.com/pion/sctp v1.8.37/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
+github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=
+github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
+github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
+github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
+github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
+github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
+github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
+github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
+github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
+github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
+github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
+github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
+github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
+github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
+github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
+github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
+github.com/pion/webrtc/v4 v4.0.10 h1:Hq/JLjhqLxi+NmCtE8lnRPDr8H4LcNvwg8OxVcdv56Q=
+github.com/pion/webrtc/v4 v4.0.10/go.mod h1:ViHLVaNpiuvaH8pdiuQxuA9awuE6KVzAXx3vVWilOck=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
+github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
+github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
+github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
+github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
+github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
+github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
+github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
+github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
+github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
+github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
+github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
+github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
+github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
+github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
+github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
+github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
+github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
+github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
+github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
+go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
+go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
+go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
+go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
+go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
+go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
+go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
+go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=
+go.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
+golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=
+golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
+golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
+golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
+golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
+golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0=
+gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+lukechampine.com/blake3 v1.4.0 h1:xDbKOZCVbnZsfzM6mHSYcGRHZ3YrLDzqz8XnV4uaD5w=
+lukechampine.com/blake3 v1.4.0/go.mod h1:MQJNQCTnR+kwOP/JEZSxj3MaQjp80FOFSNMMHXcSeX0=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/examples/http-proxy/.gitignore b/examples/http-proxy/.gitignore
new file mode 100644
index 0000000000..50aa7945cc
--- /dev/null
+++ b/examples/http-proxy/.gitignore
@@ -0,0 +1 @@
+http-proxy
diff --git a/examples/http-proxy/README.md b/examples/http-proxy/README.md
index 063cefc925..8653d191a7 100644
--- a/examples/http-proxy/README.md
+++ b/examples/http-proxy/README.md
@@ -1,6 +1,6 @@
# HTTP proxy service with libp2p
-This examples shows how to create a simple HTTP proxy service with libp2p:
+This example shows how to create a simple HTTP proxy service with libp2p:
```
XXX
@@ -18,15 +18,15 @@ This examples shows how to create a simple HTTP proxy service with libp2p:
In order to proxy an HTTP request, we create a local peer which listens on `localhost:9900`. HTTP requests performed to that address are tunneled via a libp2p stream to a remote peer, which then performs the HTTP requests and sends the response back to the local peer, which relays it to the user.
-Note that this is a very simple approach to a proxy, and does not perform any header management, nor supports HTTPS. The `proxy.go` code is thoroughly commeted, detailing what is happening in every step.
+Note that this is a very simple approach to a proxy, and does not perform any header management, nor supports HTTPS. The `proxy.go` code is thoroughly commented, detailing what is happening in every step.
## Build
-From `go-libp2p` base folder:
+From the `go-libp2p/examples` directory run the following:
```
-> make deps
-> go build ./examples/http-proxy
+> cd http-proxy/
+> go build
```
## Usage
@@ -37,22 +37,22 @@ First run the "remote" peer as follows. It will print a local peer address. If y
> ./http-proxy
Proxy server is ready
libp2p-peer addresses:
-/ip4/127.0.0.1/tcp/12000/ipfs/QmddTrQXhA9AkCpXPTkcY7e22NK73TwkUms3a44DhTKJTD
+/ip4/127.0.0.1/tcp/12000/p2p/QmddTrQXhA9AkCpXPTkcY7e22NK73TwkUms3a44DhTKJTD
```
-The run the local peer, indicating that it will need to forward http requests to the remote peer as follows:
+Then run the local peer, indicating that it will need to forward http requests to the remote peer as follows:
```
-> ./http-proxy -d /ip4/127.0.0.1/tcp/12000/ipfs/QmddTrQXhA9AkCpXPTkcY7e22NK73TwkUms3a44DhTKJTD
+> ./http-proxy -d /ip4/127.0.0.1/tcp/12000/p2p/QmddTrQXhA9AkCpXPTkcY7e22NK73TwkUms3a44DhTKJTD
Proxy server is ready
libp2p-peer addresses:
-/ip4/127.0.0.1/tcp/12001/ipfs/Qmaa2AYTha1UqcFVX97p9R1UP7vbzDLY7bqWsZw1135QvN
+/ip4/127.0.0.1/tcp/12001/p2p/Qmaa2AYTha1UqcFVX97p9R1UP7vbzDLY7bqWsZw1135QvN
proxy listening on 127.0.0.1:9900
```
-As you can see, the proxy prints the listening address `127.0.0.1:9900`. You can now use this address as proxy, for example with `curl`:
+As you can see, the proxy prints the listening address `127.0.0.1:9900`. You can now use this address as a proxy, for example with `curl`:
```
-> curl -x "127.0.0.1:9900" "http://ipfs.io/ipfs/QmfUX75pGRBRDnjeoMkQzuQczuCup2aYbeLxz5NzeSu9G6"
+> curl -x "127.0.0.1:9900" "http://ipfs.io/p2p/QmfUX75pGRBRDnjeoMkQzuQczuCup2aYbeLxz5NzeSu9G6"
it works!
```
diff --git a/examples/http-proxy/proxy.go b/examples/http-proxy/proxy.go
index f8206a8ca1..f96d4b8b00 100644
--- a/examples/http-proxy/proxy.go
+++ b/examples/http-proxy/proxy.go
@@ -11,17 +11,14 @@ import (
"strings"
// We need to import libp2p's libraries that we use in this project.
- // In order to work, these libraries need to be rewritten by gx-go.
- crypto "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- ps "github.com/libp2p/go-libp2p-peerstore"
- swarm "github.com/libp2p/go-libp2p-swarm"
- ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr-net"
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
- bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
)
// Protocol defines the libp2p protocol that we will use for the libp2p proxy
@@ -33,27 +30,11 @@ const Protocol = "/proxy-example/0.0.1"
// makeRandomHost creates a libp2p host with a randomly generated identity.
// This step is described in depth in other tutorials.
func makeRandomHost(port int) host.Host {
- priv, pub, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
- if err != nil {
- log.Fatalln(err)
- }
- pid, err := peer.IDFromPublicKey(pub)
- if err != nil {
- log.Fatalln(err)
- }
- listen, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port))
- if err != nil {
- log.Fatalln(err)
- }
- ps := ps.NewPeerstore()
- ps.AddPrivKey(pid, priv)
- ps.AddPubKey(pid, pub)
- n, err := swarm.NewNetwork(context.Background(),
- []ma.Multiaddr{listen}, pid, ps, nil)
+ host, err := libp2p.New(libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port)))
if err != nil {
log.Fatalln(err)
}
- return bhost.New(n)
+ return host
}
// ProxyService provides HTTP proxying on top of libp2p by launching an
@@ -84,7 +65,7 @@ func NewProxyService(h host.Host, proxyAddr ma.Multiaddr, dest peer.ID) *ProxySe
fmt.Println("Proxy server is ready")
fmt.Println("libp2p-peer addresses:")
for _, a := range h.Addrs() {
- fmt.Printf("%s/ipfs/%s\n", a, peer.IDB58Encode(h.ID()))
+ fmt.Printf("%s/ipfs/%s\n", a, h.ID())
}
return &ProxyService{
@@ -98,7 +79,7 @@ func NewProxyService(h host.Host, proxyAddr ma.Multiaddr, dest peer.ID) *ProxySe
// to our protocol. The streams should contain an HTTP request which we need
// to parse, make on behalf of the original node, and then write the response
// on the stream, before closing it.
-func streamHandler(stream inet.Stream) {
+func streamHandler(stream network.Stream) {
// Remember to close the stream when we are done.
defer stream.Close()
@@ -154,7 +135,7 @@ func (p *ProxyService) Serve() {
}
// ServeHTTP implements the http.Handler interface. WARNING: This is the
-// simplest approach to a proxy. Therefore we do not do any of the things
+// simplest approach to a proxy. Therefore, we do not do any of the things
// that should be done when implementing a reverse proxy (like handling
// headers correctly). For how to do it properly, see:
// https://golang.org/src/net/http/httputil/reverseproxy.go?s=3845:3920#L121
@@ -163,7 +144,7 @@ func (p *ProxyService) Serve() {
// Streams are multiplexed over single connections so, unlike connections
// themselves, they are cheap to create and dispose of.
func (p *ProxyService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- fmt.Printf("proxying request for %s to peer %s\n", r.URL, p.dest.Pretty())
+ fmt.Printf("proxying request for %s to peer %s\n", r.URL, p.dest)
// We need to send the request to the remote libp2p peer, so
// we open a stream to it
stream, err := p.host.NewStream(context.Background(), p.dest, Protocol)
@@ -225,20 +206,19 @@ func addAddrToPeerstore(h host.Host, addr string) peer.ID {
log.Fatalln(err)
}
- peerid, err := peer.IDB58Decode(pid)
+ peerid, err := peer.Decode(pid)
if err != nil {
log.Fatalln(err)
}
// Decapsulate the /ipfs/ part from the target
// /ip4//ipfs/ becomes /ip4/
- targetPeerAddr, _ := ma.NewMultiaddr(
- fmt.Sprintf("/ipfs/%s", peer.IDB58Encode(peerid)))
+ targetPeerAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", peerid))
targetAddr := ipfsaddr.Decapsulate(targetPeerAddr)
- // We have a peer ID and a targetAddr so we add
+ // We have a peer ID and a targetAddr, so we add
// it to the peerstore so LibP2P knows how to contact it
- h.Peerstore().AddAddr(peerid, targetAddr, ps.PermanentAddrTTL)
+ h.Peerstore().AddAddr(peerid, targetAddr, peerstore.PermanentAddrTTL)
return peerid
}
@@ -253,8 +233,7 @@ Usage: Start remote peer first with: ./proxy
Then you can do something like: curl -x "localhost:9900" "http://ipfs.io".
This proxies sends the request through the local peer, which proxies it to
-the remote peer, which makes it and sends the response back.
-`
+the remote peer, which makes it and sends the response back.`
func main() {
flag.Usage = func() {
diff --git a/examples/ipfs-camp-2019/01-Transports/.gitignore b/examples/ipfs-camp-2019/01-Transports/.gitignore
new file mode 100644
index 0000000000..aa99f7acf8
--- /dev/null
+++ b/examples/ipfs-camp-2019/01-Transports/.gitignore
@@ -0,0 +1 @@
+01-Transports
diff --git a/examples/ipfs-camp-2019/01-Transports/main.go b/examples/ipfs-camp-2019/01-Transports/main.go
new file mode 100644
index 0000000000..0b358f18c4
--- /dev/null
+++ b/examples/ipfs-camp-2019/01-Transports/main.go
@@ -0,0 +1,17 @@
+package main
+
+import (
+ "github.com/libp2p/go-libp2p"
+)
+
+func main() {
+ // TODO: add some libp2p.Transport options to this chain!
+ transports := libp2p.ChainOptions()
+
+ host, err := libp2p.New(transports)
+ if err != nil {
+ panic(err)
+ }
+
+ host.Close()
+}
diff --git a/examples/ipfs-camp-2019/02-Multiaddrs/.gitignore b/examples/ipfs-camp-2019/02-Multiaddrs/.gitignore
new file mode 100644
index 0000000000..81f5015363
--- /dev/null
+++ b/examples/ipfs-camp-2019/02-Multiaddrs/.gitignore
@@ -0,0 +1 @@
+02-Multiaddrs
diff --git a/examples/ipfs-camp-2019/02-Multiaddrs/main.go b/examples/ipfs-camp-2019/02-Multiaddrs/main.go
new file mode 100644
index 0000000000..dd7f4c38b4
--- /dev/null
+++ b/examples/ipfs-camp-2019/02-Multiaddrs/main.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+)
+
+func main() {
+ transports := libp2p.ChainOptions(
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(websocket.New),
+ )
+
+ // TODO: add some listen addresses with the libp2p.ListenAddrs or
+ // libp2p.ListenAddrStrings configuration options.
+
+ host, err := libp2p.New(transports)
+ if err != nil {
+ panic(err)
+ }
+
+ // TODO: with our host made, let's connect to our bootstrap peer
+
+ host.Close()
+}
diff --git a/examples/ipfs-camp-2019/03-Muxing-Encryption/.gitignore b/examples/ipfs-camp-2019/03-Muxing-Encryption/.gitignore
new file mode 100644
index 0000000000..bb599a6dfa
--- /dev/null
+++ b/examples/ipfs-camp-2019/03-Muxing-Encryption/.gitignore
@@ -0,0 +1 @@
+03-Muxing-Encryption
diff --git a/examples/ipfs-camp-2019/03-Muxing-Encryption/main.go b/examples/ipfs-camp-2019/03-Muxing-Encryption/main.go
new file mode 100644
index 0000000000..031b6da27a
--- /dev/null
+++ b/examples/ipfs-camp-2019/03-Muxing-Encryption/main.go
@@ -0,0 +1,57 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/multiformats/go-multiaddr"
+)
+
+func main() {
+ transports := libp2p.ChainOptions(
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(websocket.New),
+ )
+
+ // TODO: add a libp2p.Security instance and some libp2p.Muxer's
+
+ listenAddrs := libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/tcp/0/ws",
+ )
+
+ host, err := libp2p.New(transports, listenAddrs)
+ if err != nil {
+ panic(err)
+ }
+ defer host.Close()
+
+ for _, addr := range host.Addrs() {
+ fmt.Println("Listening on", addr)
+ }
+
+ targetAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/63785/p2p/QmWjz6xb8v9K4KnYEwP5Yk75k5mMBCehzWFLCvvQpYxF3d")
+ if err != nil {
+ panic(err)
+ }
+
+ targetInfo, err := peer.AddrInfoFromP2pAddr(targetAddr)
+ if err != nil {
+ panic(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ err = host.Connect(ctx, *targetInfo)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println("Connected to", targetInfo.ID)
+}
diff --git a/examples/ipfs-camp-2019/05-Discovery/.gitignore b/examples/ipfs-camp-2019/05-Discovery/.gitignore
new file mode 100644
index 0000000000..b8d75d3b0c
--- /dev/null
+++ b/examples/ipfs-camp-2019/05-Discovery/.gitignore
@@ -0,0 +1 @@
+05-Discovery
diff --git a/examples/ipfs-camp-2019/05-Discovery/README.md b/examples/ipfs-camp-2019/05-Discovery/README.md
new file mode 100644
index 0000000000..e19d45ac8d
--- /dev/null
+++ b/examples/ipfs-camp-2019/05-Discovery/README.md
@@ -0,0 +1,6 @@
+# 05 Discovery
+
+Be sure to check out these modules:
+
+- https://godoc.org/github.com/libp2p/go-libp2p#Routing
+- https://godoc.org/github.com/libp2p/go-libp2p-kad-dht#New
diff --git a/examples/ipfs-camp-2019/05-Discovery/main.go b/examples/ipfs-camp-2019/05-Discovery/main.go
new file mode 100644
index 0000000000..0f3446d8df
--- /dev/null
+++ b/examples/ipfs-camp-2019/05-Discovery/main.go
@@ -0,0 +1,86 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ transports := libp2p.ChainOptions(
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(websocket.New),
+ )
+
+ muxers := libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport)
+
+ security := libp2p.Security(tls.ID, tls.New)
+
+ listenAddrs := libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/tcp/0/ws",
+ )
+
+ // TODO: Configure libp2p to use a DHT with a libp2p.Routing option
+
+ host, err := libp2p.New(
+ transports,
+ listenAddrs,
+ muxers,
+ security,
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ host.SetStreamHandler(chatProtocol, chatHandler)
+
+ for _, addr := range host.Addrs() {
+ fmt.Println("Listening on", addr)
+ }
+
+ targetAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/63785/p2p/QmWjz6xb8v9K4KnYEwP5Yk75k5mMBCehzWFLCvvQpYxF3d")
+ if err != nil {
+ panic(err)
+ }
+
+ targetInfo, err := peer.AddrInfoFromP2pAddr(targetAddr)
+ if err != nil {
+ panic(err)
+ }
+
+ err = host.Connect(ctx, *targetInfo)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println("Connected to", targetInfo.ID)
+
+ donec := make(chan struct{}, 1)
+ go chatInputLoop(ctx, host, donec)
+
+ stop := make(chan os.Signal, 1)
+ signal.Notify(stop, syscall.SIGINT)
+
+ select {
+ case <-stop:
+ host.Close()
+ os.Exit(0)
+ case <-donec:
+ host.Close()
+ }
+}
diff --git a/examples/ipfs-camp-2019/05-Discovery/protocol.go b/examples/ipfs-camp-2019/05-Discovery/protocol.go
new file mode 100644
index 0000000000..70d22f08f2
--- /dev/null
+++ b/examples/ipfs-camp-2019/05-Discovery/protocol.go
@@ -0,0 +1,68 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+const chatProtocol = "/libp2p/chat/1.0.0"
+
+func chatHandler(s network.Stream) {
+ data, err := io.ReadAll(s)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ fmt.Println("Received:", string(data))
+}
+
+func chatSend(msg string, s network.Stream) error {
+ fmt.Println("Sending:", msg)
+ w := bufio.NewWriter(s)
+ n, err := w.WriteString(msg)
+ if n != len(msg) {
+ return fmt.Errorf("expected to write %d bytes, wrote %d", len(msg), n)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.Flush(); err != nil {
+ return err
+ }
+ s.Close()
+ data, err := io.ReadAll(s)
+ if err != nil {
+ return err
+ }
+ if len(data) > 0 {
+ fmt.Println("Received:", string(data))
+ }
+ return nil
+}
+
+func chatInputLoop(ctx context.Context, h host.Host, donec chan struct{}) {
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ msg := scanner.Text()
+ for _, peer := range h.Network().Peers() {
+ if _, err := h.Peerstore().SupportsProtocols(peer, chatProtocol); err == nil {
+ s, err := h.NewStream(ctx, peer, chatProtocol)
+ defer func() {
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }()
+ if err != nil {
+ continue
+ }
+ err = chatSend(msg, s)
+ }
+ }
+ }
+ donec <- struct{}{}
+}
diff --git a/examples/ipfs-camp-2019/06-Pubsub/.gitignore b/examples/ipfs-camp-2019/06-Pubsub/.gitignore
new file mode 100644
index 0000000000..31e0693a4d
--- /dev/null
+++ b/examples/ipfs-camp-2019/06-Pubsub/.gitignore
@@ -0,0 +1 @@
+06-Pubsub
diff --git a/examples/ipfs-camp-2019/06-Pubsub/README.md b/examples/ipfs-camp-2019/06-Pubsub/README.md
new file mode 100644
index 0000000000..e6cd2b7fab
--- /dev/null
+++ b/examples/ipfs-camp-2019/06-Pubsub/README.md
@@ -0,0 +1,5 @@
+# 06 Pubsub
+
+Be sure to check out these modules:
+
+- https://godoc.org/github.com/libp2p/go-libp2p-pubsub
diff --git a/examples/ipfs-camp-2019/06-Pubsub/main.go b/examples/ipfs-camp-2019/06-Pubsub/main.go
new file mode 100644
index 0000000000..af7cab277b
--- /dev/null
+++ b/examples/ipfs-camp-2019/06-Pubsub/main.go
@@ -0,0 +1,135 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/libp2p/go-libp2p"
+ kaddht "github.com/libp2p/go-libp2p-kad-dht"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/libp2p/go-libp2p/p2p/discovery/mdns"
+ drouting "github.com/libp2p/go-libp2p/p2p/discovery/routing"
+ dutil "github.com/libp2p/go-libp2p/p2p/discovery/util"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+type discoveryNotifee struct {
+ h host.Host
+ ctx context.Context
+}
+
+func (m *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
+ if m.h.Network().Connectedness(pi.ID) != network.Connected {
+ fmt.Printf("Found %s!\n", pi.ID.ShortString())
+ m.h.Connect(m.ctx, pi)
+ }
+}
+
+func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ transports := libp2p.ChainOptions(
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(websocket.New),
+ )
+
+ muxers := libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport)
+
+ security := libp2p.Security(tls.ID, tls.New)
+
+ listenAddrs := libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/tcp/0/ws",
+ )
+
+ var dht *kaddht.IpfsDHT
+ newDHT := func(h host.Host) (routing.PeerRouting, error) {
+ var err error
+ dht, err = kaddht.New(ctx, h)
+ return dht, err
+ }
+ routing := libp2p.Routing(newDHT)
+
+ host, err := libp2p.New(
+ transports,
+ listenAddrs,
+ muxers,
+ security,
+ routing,
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ // TODO: Replace our stream handler with a pubsub instance, and a handler
+ // to field incoming messages on our topic.
+ host.SetStreamHandler(chatProtocol, chatHandler)
+
+ for _, addr := range host.Addrs() {
+ fmt.Println("Listening on", addr)
+ }
+
+ targetAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/63785/p2p/QmWjz6xb8v9K4KnYEwP5Yk75k5mMBCehzWFLCvvQpYxF3d")
+ if err != nil {
+ panic(err)
+ }
+
+ targetInfo, err := peer.AddrInfoFromP2pAddr(targetAddr)
+ if err != nil {
+ panic(err)
+ }
+
+ err = host.Connect(ctx, *targetInfo)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "connecting to bootstrap: %s", err)
+ } else {
+ fmt.Println("Connected to", targetInfo.ID)
+ }
+
+ notifee := &discoveryNotifee{h: host, ctx: ctx}
+ mdns := mdns.NewMdnsService(host, "", notifee)
+ if err := mdns.Start(); err != nil {
+ panic(err)
+ }
+
+ err = dht.Bootstrap(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ routingDiscovery := drouting.NewRoutingDiscovery(dht)
+ dutil.Advertise(ctx, routingDiscovery, string(chatProtocol))
+ peers, err := dutil.FindPeers(ctx, routingDiscovery, string(chatProtocol))
+ if err != nil {
+ panic(err)
+ }
+ for _, peer := range peers {
+ notifee.HandlePeerFound(peer)
+ }
+
+ donec := make(chan struct{}, 1)
+ go chatInputLoop(ctx, host, donec)
+
+ stop := make(chan os.Signal, 1)
+ signal.Notify(stop, syscall.SIGINT)
+
+ select {
+ case <-stop:
+ host.Close()
+ os.Exit(0)
+ case <-donec:
+ host.Close()
+ }
+}
diff --git a/examples/ipfs-camp-2019/06-Pubsub/protocol.go b/examples/ipfs-camp-2019/06-Pubsub/protocol.go
new file mode 100644
index 0000000000..899c62ce39
--- /dev/null
+++ b/examples/ipfs-camp-2019/06-Pubsub/protocol.go
@@ -0,0 +1,72 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+const chatProtocol = "/libp2p/chat/1.0.0"
+
+// TODO: Replace this handler with a function that handles message from a
+// pubsub Subscribe channel.
+func chatHandler(s network.Stream) {
+ data, err := io.ReadAll(s)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ fmt.Println("Received:", string(data))
+}
+
+// TODO: Replace this with a send function that publishes the string messages
+// on our pubsub topic.
+func chatSend(msg string, s network.Stream) error {
+ fmt.Println("Sending:", msg)
+ w := bufio.NewWriter(s)
+ n, err := w.WriteString(msg)
+ if n != len(msg) {
+ return fmt.Errorf("expected to write %d bytes, wrote %d", len(msg), n)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.Flush(); err != nil {
+ return err
+ }
+ s.Close()
+ data, err := io.ReadAll(s)
+ if err != nil {
+ return err
+ }
+ if len(data) > 0 {
+ fmt.Println("Received:", string(data))
+ }
+ return nil
+}
+
+func chatInputLoop(ctx context.Context, h host.Host, donec chan struct{}) {
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ msg := scanner.Text()
+ for _, peer := range h.Network().Peers() {
+ if _, err := h.Peerstore().SupportsProtocols(peer, chatProtocol); err == nil {
+ s, err := h.NewStream(ctx, peer, chatProtocol)
+ defer func() {
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }()
+ if err != nil {
+ continue
+ }
+ err = chatSend(msg, s)
+ }
+ }
+ }
+ donec <- struct{}{}
+}
diff --git a/examples/ipfs-camp-2019/07-Messaging/.gitignore b/examples/ipfs-camp-2019/07-Messaging/.gitignore
new file mode 100644
index 0000000000..1ca6214a28
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/.gitignore
@@ -0,0 +1 @@
+07-Messaging
diff --git a/examples/ipfs-camp-2019/07-Messaging/README.md b/examples/ipfs-camp-2019/07-Messaging/README.md
new file mode 100644
index 0000000000..2aa4816d39
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/README.md
@@ -0,0 +1,5 @@
+# 07 Messaging
+
+Be sure to check out these modules:
+
+- https://godoc.org/github.com/libp2p/go-libp2p-pubsub
diff --git a/examples/ipfs-camp-2019/07-Messaging/chat.pb.go b/examples/ipfs-camp-2019/07-Messaging/chat.pb.go
new file mode 100644
index 0000000000..562290f912
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/chat.pb.go
@@ -0,0 +1,237 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: chat.proto
+
+package main
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type Request_Type int32
+
+const (
+ Request_SEND_MESSAGE Request_Type = 0
+ Request_UPDATE_PEER Request_Type = 1
+)
+
+var Request_Type_name = map[int32]string{
+ 0: "SEND_MESSAGE",
+ 1: "UPDATE_PEER",
+}
+
+var Request_Type_value = map[string]int32{
+ "SEND_MESSAGE": 0,
+ "UPDATE_PEER": 1,
+}
+
+func (x Request_Type) Enum() *Request_Type {
+ p := new(Request_Type)
+ *p = x
+ return p
+}
+
+func (x Request_Type) String() string {
+ return proto.EnumName(Request_Type_name, int32(x))
+}
+
+func (x *Request_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Request_Type_value, data, "Request_Type")
+ if err != nil {
+ return err
+ }
+ *x = Request_Type(value)
+ return nil
+}
+
+func (Request_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{0, 0}
+}
+
+type Request struct {
+ Type *Request_Type `protobuf:"varint,1,req,name=type,enum=main.Request_Type" json:"type,omitempty"`
+ SendMessage *SendMessage `protobuf:"bytes,2,opt,name=sendMessage" json:"sendMessage,omitempty"`
+ UpdatePeer *UpdatePeer `protobuf:"bytes,3,opt,name=updatePeer" json:"updatePeer,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Request.Unmarshal(m, b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+ return xxx_messageInfo_Request.Size(m)
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+func (m *Request) GetType() Request_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Request_SEND_MESSAGE
+}
+
+func (m *Request) GetSendMessage() *SendMessage {
+ if m != nil {
+ return m.SendMessage
+ }
+ return nil
+}
+
+func (m *Request) GetUpdatePeer() *UpdatePeer {
+ if m != nil {
+ return m.UpdatePeer
+ }
+ return nil
+}
+
+type SendMessage struct {
+ Data []byte `protobuf:"bytes,1,req,name=data" json:"data,omitempty"`
+ Created *int64 `protobuf:"varint,2,req,name=created" json:"created,omitempty"`
+ Id []byte `protobuf:"bytes,3,req,name=id" json:"id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SendMessage) Reset() { *m = SendMessage{} }
+func (m *SendMessage) String() string { return proto.CompactTextString(m) }
+func (*SendMessage) ProtoMessage() {}
+func (*SendMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{1}
+}
+func (m *SendMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SendMessage.Unmarshal(m, b)
+}
+func (m *SendMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SendMessage.Marshal(b, m, deterministic)
+}
+func (m *SendMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SendMessage.Merge(m, src)
+}
+func (m *SendMessage) XXX_Size() int {
+ return xxx_messageInfo_SendMessage.Size(m)
+}
+func (m *SendMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_SendMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SendMessage proto.InternalMessageInfo
+
+func (m *SendMessage) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *SendMessage) GetCreated() int64 {
+ if m != nil && m.Created != nil {
+ return *m.Created
+ }
+ return 0
+}
+
+func (m *SendMessage) GetId() []byte {
+ if m != nil {
+ return m.Id
+ }
+ return nil
+}
+
+type UpdatePeer struct {
+ UserHandle []byte `protobuf:"bytes,1,opt,name=userHandle" json:"userHandle,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdatePeer) Reset() { *m = UpdatePeer{} }
+func (m *UpdatePeer) String() string { return proto.CompactTextString(m) }
+func (*UpdatePeer) ProtoMessage() {}
+func (*UpdatePeer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{2}
+}
+func (m *UpdatePeer) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdatePeer.Unmarshal(m, b)
+}
+func (m *UpdatePeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdatePeer.Marshal(b, m, deterministic)
+}
+func (m *UpdatePeer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdatePeer.Merge(m, src)
+}
+func (m *UpdatePeer) XXX_Size() int {
+ return xxx_messageInfo_UpdatePeer.Size(m)
+}
+func (m *UpdatePeer) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdatePeer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdatePeer proto.InternalMessageInfo
+
+func (m *UpdatePeer) GetUserHandle() []byte {
+ if m != nil {
+ return m.UserHandle
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("main.Request_Type", Request_Type_name, Request_Type_value)
+ proto.RegisterType((*Request)(nil), "main.Request")
+ proto.RegisterType((*SendMessage)(nil), "main.SendMessage")
+ proto.RegisterType((*UpdatePeer)(nil), "main.UpdatePeer")
+}
+
+func init() { proto.RegisterFile("chat.proto", fileDescriptor_8c585a45e2093e54) }
+
+var fileDescriptor_8c585a45e2093e54 = []byte{
+ // 251 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x51, 0x4b, 0xc3, 0x30,
+ 0x14, 0x85, 0x4d, 0x5a, 0x18, 0xdc, 0x96, 0x59, 0xef, 0x53, 0x9e, 0xa4, 0xf4, 0x41, 0x2a, 0x48,
+ 0x91, 0xf9, 0x0b, 0x06, 0x0b, 0x0a, 0x32, 0x29, 0xe9, 0xf6, 0x3c, 0xc2, 0x72, 0xd1, 0x82, 0x76,
+ 0xb5, 0x49, 0x1f, 0xf6, 0xfb, 0xfc, 0x63, 0xd2, 0x4c, 0xb7, 0xbc, 0x25, 0xe7, 0x7c, 0x87, 0x73,
+ 0xef, 0x05, 0xd8, 0x7f, 0x68, 0x57, 0xf5, 0xc3, 0xc1, 0x1d, 0x30, 0xfe, 0xd2, 0x6d, 0x57, 0xfc,
+ 0x30, 0x98, 0x29, 0xfa, 0x1e, 0xc9, 0x3a, 0xbc, 0x83, 0xd8, 0x1d, 0x7b, 0x12, 0x2c, 0xe7, 0xe5,
+ 0x7c, 0x81, 0xd5, 0x04, 0x54, 0x7f, 0x66, 0xb5, 0x39, 0xf6, 0xa4, 0xbc, 0x8f, 0x4f, 0x90, 0x58,
+ 0xea, 0xcc, 0x9a, 0xac, 0xd5, 0xef, 0x24, 0x78, 0xce, 0xca, 0x64, 0x71, 0x73, 0xc2, 0x9b, 0x8b,
+ 0xa1, 0x42, 0x0a, 0x1f, 0x01, 0xc6, 0xde, 0x68, 0x47, 0x35, 0xd1, 0x20, 0x22, 0x9f, 0xc9, 0x4e,
+ 0x99, 0xed, 0x59, 0x57, 0x01, 0x53, 0xdc, 0x43, 0x3c, 0x95, 0x62, 0x06, 0x69, 0x23, 0xdf, 0x56,
+ 0xbb, 0xb5, 0x6c, 0x9a, 0xe5, 0xb3, 0xcc, 0xae, 0xf0, 0x1a, 0x92, 0x6d, 0xbd, 0x5a, 0x6e, 0xe4,
+ 0xae, 0x96, 0x52, 0x65, 0xac, 0x78, 0x85, 0x24, 0x28, 0x46, 0x84, 0xd8, 0x68, 0xa7, 0xfd, 0x22,
+ 0xa9, 0xf2, 0x6f, 0x14, 0x30, 0xdb, 0x0f, 0xa4, 0x1d, 0x19, 0xc1, 0x73, 0x5e, 0x46, 0xea, 0xff,
+ 0x8b, 0x73, 0xe0, 0xad, 0x11, 0x91, 0x67, 0x79, 0x6b, 0x8a, 0x07, 0x80, 0xcb, 0x44, 0x78, 0x0b,
+ 0x30, 0x5a, 0x1a, 0x5e, 0x74, 0x67, 0x3e, 0xa7, 0xd3, 0xb0, 0x32, 0x55, 0x81, 0xf2, 0x1b, 0x00,
+ 0x00, 0xff, 0xff, 0x5c, 0xd9, 0x58, 0xd2, 0x53, 0x01, 0x00, 0x00,
+}
diff --git a/examples/ipfs-camp-2019/07-Messaging/chat.proto b/examples/ipfs-camp-2019/07-Messaging/chat.proto
new file mode 100644
index 0000000000..bdf1429156
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/chat.proto
@@ -0,0 +1,23 @@
+syntax = "proto2";
+package main;
+
+message Request {
+ enum Type {
+ SEND_MESSAGE = 0;
+ UPDATE_PEER = 1;
+ }
+
+ required Type type = 1;
+ optional SendMessage sendMessage = 2;
+ optional UpdatePeer updatePeer = 3;
+}
+
+message SendMessage {
+ required bytes data = 1;
+ required int64 created = 2;
+ required bytes id = 3;
+}
+
+message UpdatePeer {
+ optional bytes userHandle = 1;
+}
\ No newline at end of file
diff --git a/examples/ipfs-camp-2019/07-Messaging/main.go b/examples/ipfs-camp-2019/07-Messaging/main.go
new file mode 100644
index 0000000000..b758e21191
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/main.go
@@ -0,0 +1,134 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/p2p/discovery/mdns"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+
+ kaddht "github.com/libp2p/go-libp2p-kad-dht"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+type mdnsNotifee struct {
+ h host.Host
+ ctx context.Context
+}
+
+func (m *mdnsNotifee) HandlePeerFound(pi peer.AddrInfo) {
+ m.h.Connect(m.ctx, pi)
+}
+
+func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ transports := libp2p.ChainOptions(
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(websocket.New),
+ )
+
+ muxers := libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport)
+
+ security := libp2p.Security(tls.ID, tls.New)
+
+ listenAddrs := libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/tcp/0/ws",
+ )
+
+ var dht *kaddht.IpfsDHT
+ newDHT := func(h host.Host) (routing.PeerRouting, error) {
+ var err error
+ dht, err = kaddht.New(ctx, h)
+ return dht, err
+ }
+ routing := libp2p.Routing(newDHT)
+
+ host, err := libp2p.New(
+ transports,
+ listenAddrs,
+ muxers,
+ security,
+ routing,
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ ps, err := pubsub.NewGossipSub(ctx, host)
+ if err != nil {
+ panic(err)
+ }
+ topic, err := ps.Join(pubsubTopic)
+ if err != nil {
+ panic(err)
+ }
+ defer topic.Close()
+ sub, err := topic.Subscribe()
+ if err != nil {
+ panic(err)
+ }
+ // TODO: Modify this handler to use the protobufs defined in this folder
+ go pubsubHandler(ctx, sub)
+
+ for _, addr := range host.Addrs() {
+ fmt.Println("Listening on", addr)
+ }
+
+ targetAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/63785/p2p/QmWjz6xb8v9K4KnYEwP5Yk75k5mMBCehzWFLCvvQpYxF3d")
+ if err != nil {
+ panic(err)
+ }
+
+ targetInfo, err := peer.AddrInfoFromP2pAddr(targetAddr)
+ if err != nil {
+ panic(err)
+ }
+
+ err = host.Connect(ctx, *targetInfo)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println("Connected to", targetInfo.ID)
+
+ mdns := mdns.NewMdnsService(host, "", &mdnsNotifee{h: host, ctx: ctx})
+ if err := mdns.Start(); err != nil {
+ panic(err)
+ }
+
+ err = dht.Bootstrap(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ donec := make(chan struct{}, 1)
+ // TODO: modify this chat input loop to use the protobufs defined in this
+ // folder.
+ go chatInputLoop(ctx, topic, donec)
+
+ stop := make(chan os.Signal, 1)
+ signal.Notify(stop, syscall.SIGINT)
+
+ select {
+ case <-stop:
+ host.Close()
+ os.Exit(0)
+ case <-donec:
+ host.Close()
+ }
+}
diff --git a/examples/ipfs-camp-2019/07-Messaging/protocol.go b/examples/ipfs-camp-2019/07-Messaging/protocol.go
new file mode 100644
index 0000000000..addfc8e98f
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/protocol.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+)
+
+func chatInputLoop(ctx context.Context, topic *pubsub.Topic, donec chan struct{}) {
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ msg := scanner.Text()
+ msgId := make([]byte, 10)
+ _, err := rand.Read(msgId)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+ now := time.Now().Unix()
+ req := &Request{
+ Type: Request_SEND_MESSAGE.Enum(),
+ SendMessage: &SendMessage{
+ Id: msgId,
+ Data: []byte(msg),
+ Created: &now,
+ },
+ }
+ msgBytes, err := proto.Marshal(req)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+ err = topic.Publish(ctx, msgBytes)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+ }
+ donec <- struct{}{}
+}
diff --git a/examples/ipfs-camp-2019/07-Messaging/pubsub.go b/examples/ipfs-camp-2019/07-Messaging/pubsub.go
new file mode 100644
index 0000000000..8245bbbbc2
--- /dev/null
+++ b/examples/ipfs-camp-2019/07-Messaging/pubsub.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/gogo/protobuf/proto"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+)
+
+const pubsubTopic = "/libp2p/example/chat/1.0.0"
+
+func pubsubMessageHandler(id peer.ID, msg *SendMessage) {
+ fmt.Printf("%s: %s\n", id.ShortString(), msg.Data)
+}
+
+func pubsubUpdateHandler(_ peer.ID, _ *UpdatePeer) {
+
+}
+
+func pubsubHandler(ctx context.Context, sub *pubsub.Subscription) {
+ defer sub.Cancel()
+ for {
+ msg, err := sub.Next(ctx)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+
+ req := &Request{}
+ err = proto.Unmarshal(msg.Data, req)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+
+ switch *req.Type {
+ case Request_SEND_MESSAGE:
+ pubsubMessageHandler(msg.GetFrom(), req.SendMessage)
+ case Request_UPDATE_PEER:
+ pubsubUpdateHandler(msg.GetFrom(), req.UpdatePeer)
+ }
+ }
+}
diff --git a/examples/ipfs-camp-2019/08-End/.gitignore b/examples/ipfs-camp-2019/08-End/.gitignore
new file mode 100644
index 0000000000..c9a4a6d8c4
--- /dev/null
+++ b/examples/ipfs-camp-2019/08-End/.gitignore
@@ -0,0 +1 @@
+08-End
diff --git a/examples/ipfs-camp-2019/08-End/chat.pb.go b/examples/ipfs-camp-2019/08-End/chat.pb.go
new file mode 100644
index 0000000000..562290f912
--- /dev/null
+++ b/examples/ipfs-camp-2019/08-End/chat.pb.go
@@ -0,0 +1,237 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: chat.proto
+
+package main
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type Request_Type int32
+
+const (
+ Request_SEND_MESSAGE Request_Type = 0
+ Request_UPDATE_PEER Request_Type = 1
+)
+
+var Request_Type_name = map[int32]string{
+ 0: "SEND_MESSAGE",
+ 1: "UPDATE_PEER",
+}
+
+var Request_Type_value = map[string]int32{
+ "SEND_MESSAGE": 0,
+ "UPDATE_PEER": 1,
+}
+
+func (x Request_Type) Enum() *Request_Type {
+ p := new(Request_Type)
+ *p = x
+ return p
+}
+
+func (x Request_Type) String() string {
+ return proto.EnumName(Request_Type_name, int32(x))
+}
+
+func (x *Request_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Request_Type_value, data, "Request_Type")
+ if err != nil {
+ return err
+ }
+ *x = Request_Type(value)
+ return nil
+}
+
+func (Request_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{0, 0}
+}
+
+type Request struct {
+ Type *Request_Type `protobuf:"varint,1,req,name=type,enum=main.Request_Type" json:"type,omitempty"`
+ SendMessage *SendMessage `protobuf:"bytes,2,opt,name=sendMessage" json:"sendMessage,omitempty"`
+ UpdatePeer *UpdatePeer `protobuf:"bytes,3,opt,name=updatePeer" json:"updatePeer,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Request.Unmarshal(m, b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+ return xxx_messageInfo_Request.Size(m)
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+func (m *Request) GetType() Request_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Request_SEND_MESSAGE
+}
+
+func (m *Request) GetSendMessage() *SendMessage {
+ if m != nil {
+ return m.SendMessage
+ }
+ return nil
+}
+
+func (m *Request) GetUpdatePeer() *UpdatePeer {
+ if m != nil {
+ return m.UpdatePeer
+ }
+ return nil
+}
+
+type SendMessage struct {
+ Data []byte `protobuf:"bytes,1,req,name=data" json:"data,omitempty"`
+ Created *int64 `protobuf:"varint,2,req,name=created" json:"created,omitempty"`
+ Id []byte `protobuf:"bytes,3,req,name=id" json:"id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SendMessage) Reset() { *m = SendMessage{} }
+func (m *SendMessage) String() string { return proto.CompactTextString(m) }
+func (*SendMessage) ProtoMessage() {}
+func (*SendMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{1}
+}
+func (m *SendMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SendMessage.Unmarshal(m, b)
+}
+func (m *SendMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SendMessage.Marshal(b, m, deterministic)
+}
+func (m *SendMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SendMessage.Merge(m, src)
+}
+func (m *SendMessage) XXX_Size() int {
+ return xxx_messageInfo_SendMessage.Size(m)
+}
+func (m *SendMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_SendMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SendMessage proto.InternalMessageInfo
+
+func (m *SendMessage) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *SendMessage) GetCreated() int64 {
+ if m != nil && m.Created != nil {
+ return *m.Created
+ }
+ return 0
+}
+
+func (m *SendMessage) GetId() []byte {
+ if m != nil {
+ return m.Id
+ }
+ return nil
+}
+
+type UpdatePeer struct {
+ UserHandle []byte `protobuf:"bytes,1,opt,name=userHandle" json:"userHandle,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdatePeer) Reset() { *m = UpdatePeer{} }
+func (m *UpdatePeer) String() string { return proto.CompactTextString(m) }
+func (*UpdatePeer) ProtoMessage() {}
+func (*UpdatePeer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8c585a45e2093e54, []int{2}
+}
+func (m *UpdatePeer) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdatePeer.Unmarshal(m, b)
+}
+func (m *UpdatePeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdatePeer.Marshal(b, m, deterministic)
+}
+func (m *UpdatePeer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdatePeer.Merge(m, src)
+}
+func (m *UpdatePeer) XXX_Size() int {
+ return xxx_messageInfo_UpdatePeer.Size(m)
+}
+func (m *UpdatePeer) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdatePeer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdatePeer proto.InternalMessageInfo
+
+func (m *UpdatePeer) GetUserHandle() []byte {
+ if m != nil {
+ return m.UserHandle
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("main.Request_Type", Request_Type_name, Request_Type_value)
+ proto.RegisterType((*Request)(nil), "main.Request")
+ proto.RegisterType((*SendMessage)(nil), "main.SendMessage")
+ proto.RegisterType((*UpdatePeer)(nil), "main.UpdatePeer")
+}
+
+func init() { proto.RegisterFile("chat.proto", fileDescriptor_8c585a45e2093e54) }
+
+var fileDescriptor_8c585a45e2093e54 = []byte{
+ // 251 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x51, 0x4b, 0xc3, 0x30,
+ 0x14, 0x85, 0x4d, 0x5a, 0x18, 0xdc, 0x96, 0x59, 0xef, 0x53, 0x9e, 0xa4, 0xf4, 0x41, 0x2a, 0x48,
+ 0x91, 0xf9, 0x0b, 0x06, 0x0b, 0x0a, 0x32, 0x29, 0xe9, 0xf6, 0x3c, 0xc2, 0x72, 0xd1, 0x82, 0x76,
+ 0xb5, 0x49, 0x1f, 0xf6, 0xfb, 0xfc, 0x63, 0xd2, 0x4c, 0xb7, 0xbc, 0x25, 0xe7, 0x7c, 0x87, 0x73,
+ 0xef, 0x05, 0xd8, 0x7f, 0x68, 0x57, 0xf5, 0xc3, 0xc1, 0x1d, 0x30, 0xfe, 0xd2, 0x6d, 0x57, 0xfc,
+ 0x30, 0x98, 0x29, 0xfa, 0x1e, 0xc9, 0x3a, 0xbc, 0x83, 0xd8, 0x1d, 0x7b, 0x12, 0x2c, 0xe7, 0xe5,
+ 0x7c, 0x81, 0xd5, 0x04, 0x54, 0x7f, 0x66, 0xb5, 0x39, 0xf6, 0xa4, 0xbc, 0x8f, 0x4f, 0x90, 0x58,
+ 0xea, 0xcc, 0x9a, 0xac, 0xd5, 0xef, 0x24, 0x78, 0xce, 0xca, 0x64, 0x71, 0x73, 0xc2, 0x9b, 0x8b,
+ 0xa1, 0x42, 0x0a, 0x1f, 0x01, 0xc6, 0xde, 0x68, 0x47, 0x35, 0xd1, 0x20, 0x22, 0x9f, 0xc9, 0x4e,
+ 0x99, 0xed, 0x59, 0x57, 0x01, 0x53, 0xdc, 0x43, 0x3c, 0x95, 0x62, 0x06, 0x69, 0x23, 0xdf, 0x56,
+ 0xbb, 0xb5, 0x6c, 0x9a, 0xe5, 0xb3, 0xcc, 0xae, 0xf0, 0x1a, 0x92, 0x6d, 0xbd, 0x5a, 0x6e, 0xe4,
+ 0xae, 0x96, 0x52, 0x65, 0xac, 0x78, 0x85, 0x24, 0x28, 0x46, 0x84, 0xd8, 0x68, 0xa7, 0xfd, 0x22,
+ 0xa9, 0xf2, 0x6f, 0x14, 0x30, 0xdb, 0x0f, 0xa4, 0x1d, 0x19, 0xc1, 0x73, 0x5e, 0x46, 0xea, 0xff,
+ 0x8b, 0x73, 0xe0, 0xad, 0x11, 0x91, 0x67, 0x79, 0x6b, 0x8a, 0x07, 0x80, 0xcb, 0x44, 0x78, 0x0b,
+ 0x30, 0x5a, 0x1a, 0x5e, 0x74, 0x67, 0x3e, 0xa7, 0xd3, 0xb0, 0x32, 0x55, 0x81, 0xf2, 0x1b, 0x00,
+ 0x00, 0xff, 0xff, 0x5c, 0xd9, 0x58, 0xd2, 0x53, 0x01, 0x00, 0x00,
+}
diff --git a/examples/ipfs-camp-2019/08-End/chat.proto b/examples/ipfs-camp-2019/08-End/chat.proto
new file mode 100644
index 0000000000..bdf1429156
--- /dev/null
+++ b/examples/ipfs-camp-2019/08-End/chat.proto
@@ -0,0 +1,23 @@
+syntax = "proto2";
+package main;
+
+message Request {
+ enum Type {
+ SEND_MESSAGE = 0;
+ UPDATE_PEER = 1;
+ }
+
+ required Type type = 1;
+ optional SendMessage sendMessage = 2;
+ optional UpdatePeer updatePeer = 3;
+}
+
+message SendMessage {
+ required bytes data = 1;
+ required int64 created = 2;
+ required bytes id = 3;
+}
+
+message UpdatePeer {
+ optional bytes userHandle = 1;
+}
\ No newline at end of file
diff --git a/examples/ipfs-camp-2019/08-End/main.go b/examples/ipfs-camp-2019/08-End/main.go
new file mode 100644
index 0000000000..e3ef953df7
--- /dev/null
+++ b/examples/ipfs-camp-2019/08-End/main.go
@@ -0,0 +1,130 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/libp2p/go-libp2p"
+ kaddht "github.com/libp2p/go-libp2p-kad-dht"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/libp2p/go-libp2p/p2p/discovery/mdns"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+type mdnsNotifee struct {
+ h host.Host
+ ctx context.Context
+}
+
+func (m *mdnsNotifee) HandlePeerFound(pi peer.AddrInfo) {
+ m.h.Connect(m.ctx, pi)
+}
+
+func main() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ transports := libp2p.ChainOptions(
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(websocket.New),
+ )
+
+ muxers := libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport)
+
+ security := libp2p.Security(tls.ID, tls.New)
+
+ listenAddrs := libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/0",
+ "/ip4/0.0.0.0/tcp/0/ws",
+ )
+
+ var dht *kaddht.IpfsDHT
+ newDHT := func(h host.Host) (routing.PeerRouting, error) {
+ var err error
+ dht, err = kaddht.New(ctx, h)
+ return dht, err
+ }
+ routing := libp2p.Routing(newDHT)
+
+ host, err := libp2p.New(
+ transports,
+ listenAddrs,
+ muxers,
+ security,
+ routing,
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ ps, err := pubsub.NewGossipSub(ctx, host)
+ if err != nil {
+ panic(err)
+ }
+ topic, err := ps.Join(pubsubTopic)
+ if err != nil {
+ panic(err)
+ }
+ defer topic.Close()
+ sub, err := topic.Subscribe()
+ if err != nil {
+ panic(err)
+ }
+ go pubsubHandler(ctx, sub)
+
+ for _, addr := range host.Addrs() {
+ fmt.Println("Listening on", addr)
+ }
+
+ targetAddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/63785/p2p/QmWjz6xb8v9K4KnYEwP5Yk75k5mMBCehzWFLCvvQpYxF3d")
+ if err != nil {
+ panic(err)
+ }
+
+ targetInfo, err := peer.AddrInfoFromP2pAddr(targetAddr)
+ if err != nil {
+ panic(err)
+ }
+
+ err = host.Connect(ctx, *targetInfo)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println("Connected to", targetInfo.ID)
+
+ mdns := mdns.NewMdnsService(host, "", &mdnsNotifee{h: host, ctx: ctx})
+ if err := mdns.Start(); err != nil {
+ panic(err)
+ }
+
+ err = dht.Bootstrap(ctx)
+ if err != nil {
+ panic(err)
+ }
+
+ donec := make(chan struct{}, 1)
+ go chatInputLoop(ctx, host, topic, donec)
+
+ stop := make(chan os.Signal, 1)
+ signal.Notify(stop, syscall.SIGINT)
+
+ select {
+ case <-stop:
+ host.Close()
+ os.Exit(0)
+ case <-donec:
+ host.Close()
+ }
+}
diff --git a/examples/ipfs-camp-2019/08-End/protocol.go b/examples/ipfs-camp-2019/08-End/protocol.go
new file mode 100644
index 0000000000..02e4b98bc5
--- /dev/null
+++ b/examples/ipfs-camp-2019/08-End/protocol.go
@@ -0,0 +1,86 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "fmt"
+ "os"
+ "time"
+
+ "strings"
+
+ "github.com/gogo/protobuf/proto"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/host"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+)
+
+func sendMessage(ctx context.Context, topic *pubsub.Topic, msg string) {
+ msgId := make([]byte, 10)
+ _, err := rand.Read(msgId)
+ defer func() {
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ }
+ }()
+ if err != nil {
+ return
+ }
+ now := time.Now().Unix()
+ req := &Request{
+ Type: Request_SEND_MESSAGE.Enum(),
+ SendMessage: &SendMessage{
+ Id: msgId,
+ Data: []byte(msg),
+ Created: &now,
+ },
+ }
+ msgBytes, err := proto.Marshal(req)
+ if err != nil {
+ return
+ }
+ err = topic.Publish(ctx, msgBytes)
+}
+
+func updatePeer(ctx context.Context, topic *pubsub.Topic, id peer.ID, handle string) {
+ oldHandle, ok := handles[id.String()]
+ if !ok {
+ oldHandle = id.ShortString()
+ }
+ handles[id.String()] = handle
+
+ req := &Request{
+ Type: Request_UPDATE_PEER.Enum(),
+ UpdatePeer: &UpdatePeer{
+ UserHandle: []byte(handle),
+ },
+ }
+ reqBytes, err := proto.Marshal(req)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+ err = topic.Publish(ctx, reqBytes)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+
+ fmt.Printf("%s -> %s\n", oldHandle, handle)
+}
+
+func chatInputLoop(ctx context.Context, h host.Host, topic *pubsub.Topic, donec chan struct{}) {
+ scanner := bufio.NewScanner(os.Stdin)
+ for scanner.Scan() {
+ msg := scanner.Text()
+ if strings.HasPrefix(msg, "/name ") {
+ newHandle := strings.TrimPrefix(msg, "/name ")
+ newHandle = strings.TrimSpace(newHandle)
+ updatePeer(ctx, topic, h.ID(), newHandle)
+ } else {
+ sendMessage(ctx, topic, msg)
+ }
+ }
+ donec <- struct{}{}
+}
diff --git a/examples/ipfs-camp-2019/08-End/pubsub.go b/examples/ipfs-camp-2019/08-End/pubsub.go
new file mode 100644
index 0000000000..2be6b0123e
--- /dev/null
+++ b/examples/ipfs-camp-2019/08-End/pubsub.go
@@ -0,0 +1,57 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "github.com/gogo/protobuf/proto"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+)
+
+var handles = map[string]string{}
+
+const pubsubTopic = "/libp2p/example/chat/1.0.0"
+
+func pubsubMessageHandler(id peer.ID, msg *SendMessage) {
+ handle, ok := handles[id.String()]
+ if !ok {
+ handle = id.ShortString()
+ }
+ fmt.Printf("%s: %s\n", handle, msg.Data)
+}
+
+func pubsubUpdateHandler(id peer.ID, msg *UpdatePeer) {
+ oldHandle, ok := handles[id.String()]
+ if !ok {
+ oldHandle = id.ShortString()
+ }
+ handles[id.String()] = string(msg.UserHandle)
+ fmt.Printf("%s -> %s\n", oldHandle, msg.UserHandle)
+}
+
+func pubsubHandler(ctx context.Context, sub *pubsub.Subscription) {
+ defer sub.Cancel()
+ for {
+ msg, err := sub.Next(ctx)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+
+ req := &Request{}
+ err = proto.Unmarshal(msg.Data, req)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ continue
+ }
+
+ switch *req.Type {
+ case Request_SEND_MESSAGE:
+ pubsubMessageHandler(msg.GetFrom(), req.SendMessage)
+ case Request_UPDATE_PEER:
+ pubsubUpdateHandler(msg.GetFrom(), req.UpdatePeer)
+ }
+ }
+}
diff --git a/examples/ipfs-camp-2019/README.md b/examples/ipfs-camp-2019/README.md
new file mode 100644
index 0000000000..5502b27cb5
--- /dev/null
+++ b/examples/ipfs-camp-2019/README.md
@@ -0,0 +1,22 @@
+# IPFS Camp 2019 - Course B
+We've included some scaffolding to help you through the workshop. The folders
+in this repository are "checkpoints" of the project as we progress through the
+project goals. Should you get stuck at one and find yourself eager to push on,
+feel free to save your work and move on to the next stage!
+
+## Dependencies
+- [golang 1.12+](https://golang.org)
+
+## Optional Tooling
+If you'd like a more pleasant editing experience, VS Code's golang plugin has
+fantastic support for the nascent go language server implementation. I
+recommend saibing's fork which adds lots of useful features.
+
+- [saibing's go language server](https://github.com/saibing/tools)
+ Specifically tools/cmd/gopls
+- [VS Code](https://code.visualstudio.com/)
+
+## Running the Examples
+All of the examples, 01-Transports through 08-End, will compile as written. To
+execute, simply change into the respective example's directory and run
+`go run .`
diff --git a/examples/ipfs-camp-2019/go.mod b/examples/ipfs-camp-2019/go.mod
new file mode 100644
index 0000000000..9ad1f28b07
--- /dev/null
+++ b/examples/ipfs-camp-2019/go.mod
@@ -0,0 +1,115 @@
+module github.com/libp2p/go-libp2p/examples/ipfs-camp-2019
+
+go 1.24
+
+require (
+ github.com/gogo/protobuf v1.3.2
+ github.com/libp2p/go-libp2p v0.33.0
+ github.com/libp2p/go-libp2p-kad-dht v0.25.1
+ github.com/libp2p/go-libp2p-pubsub v0.10.0
+ github.com/multiformats/go-multiaddr v0.12.2
+)
+
+require (
+ github.com/benbjohnson/clock v1.3.5 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/containerd/cgroups v1.1.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/elastic/gosigar v0.14.2 // indirect
+ github.com/flynn/noise v1.1.0 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/go-logr/logr v1.3.0 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/google/gopacket v1.1.19 // indirect
+ github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
+ github.com/google/uuid v1.4.0 // indirect
+ github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
+ github.com/huin/goupnp v1.3.0 // indirect
+ github.com/ipfs/boxo v0.10.0 // indirect
+ github.com/ipfs/go-cid v0.4.1 // indirect
+ github.com/ipfs/go-datastore v0.6.0 // indirect
+ github.com/ipfs/go-log v1.0.5 // indirect
+ github.com/ipfs/go-log/v2 v2.5.1 // indirect
+ github.com/ipld/go-ipld-prime v0.20.0 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2 // indirect
+ github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
+ github.com/jbenet/goprocess v0.1.4 // indirect
+ github.com/klauspost/compress v1.17.6 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/koron/go-ssdp v0.0.4 // indirect
+ github.com/libp2p/go-buffer-pool v0.1.0 // indirect
+ github.com/libp2p/go-cidranger v1.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.1.0 // indirect
+ github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
+ github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect
+ github.com/libp2p/go-libp2p-record v0.2.0 // indirect
+ github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect
+ github.com/libp2p/go-msgio v0.3.0 // indirect
+ github.com/libp2p/go-nat v0.2.0 // indirect
+ github.com/libp2p/go-netroute v0.2.1 // indirect
+ github.com/libp2p/go-reuseport v0.4.0 // indirect
+ github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
+ github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
+ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/miekg/dns v1.1.58 // indirect
+ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
+ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/mr-tron/base58 v1.2.0 // indirect
+ github.com/multiformats/go-base32 v0.1.0 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
+ github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
+ github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.0 // indirect
+ github.com/multiformats/go-multihash v0.2.3 // indirect
+ github.com/multiformats/go-multistream v0.5.0 // indirect
+ github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/onsi/ginkgo/v2 v2.15.0 // indirect
+ github.com/opencontainers/runtime-spec v1.2.0 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/polydawn/refmt v0.89.0 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.6.0 // indirect
+ github.com/prometheus/common v0.47.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/quic-go/qpack v0.4.0 // indirect
+ github.com/quic-go/quic-go v0.41.0 // indirect
+ github.com/quic-go/webtransport-go v0.6.0 // indirect
+ github.com/raulk/go-watchdog v1.3.0 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/otel v1.16.0 // indirect
+ go.opentelemetry.io/otel/metric v1.16.0 // indirect
+ go.opentelemetry.io/otel/trace v1.16.0 // indirect
+ go.uber.org/dig v1.17.1 // indirect
+ go.uber.org/fx v1.20.1 // indirect
+ go.uber.org/mock v0.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ golang.org/x/crypto v0.19.0 // indirect
+ golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
+ golang.org/x/mod v0.15.0 // indirect
+ golang.org/x/net v0.21.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.17.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.18.0 // indirect
+ gonum.org/v1/gonum v0.13.0 // indirect
+ google.golang.org/protobuf v1.32.0 // indirect
+ lukechampine.com/blake3 v1.2.1 // indirect
+)
diff --git a/examples/ipfs-camp-2019/go.sum b/examples/ipfs-camp-2019/go.sum
new file mode 100644
index 0000000000..fecbd95e6a
--- /dev/null
+++ b/examples/ipfs-camp-2019/go.sum
@@ -0,0 +1,590 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
+github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
+github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
+github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
+github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
+github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
+github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
+github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
+github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY=
+github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM=
+github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
+github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
+github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
+github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
+github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
+github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
+github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
+github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
+github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
+github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo=
+github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
+github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
+github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
+github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g=
+github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
+github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
+github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
+github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
+github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
+github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
+github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
+github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
+github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
+github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
+github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
+github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
+github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
+github.com/libp2p/go-libp2p v0.33.0 h1:yTPSr8sJRbfeEYXyeN8VPVSlTlFjtMUwGDRniwaf/xQ=
+github.com/libp2p/go-libp2p v0.33.0/go.mod h1:RIJFRQVUBKy82dnW7J5f1homqqv6NcsDJAl3e7CRGfE=
+github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
+github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
+github.com/libp2p/go-libp2p-kad-dht v0.25.1 h1:ofFNrf6MMEy4vi3R1VbJ7LOcTn3Csh0cDcaWHTxtWNA=
+github.com/libp2p/go-libp2p-kad-dht v0.25.1/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo=
+github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0=
+github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0=
+github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA=
+github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw=
+github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
+github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
+github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0=
+github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
+github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
+github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
+github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
+github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
+github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
+github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
+github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
+github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
+github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
+github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24=
+github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M=
+github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
+github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
+github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
+github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
+github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
+github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
+github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
+github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
+github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
+github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
+github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
+github.com/quic-go/quic-go v0.41.0 h1:aD8MmHfgqTURWNJy48IYFg2OnxwHT3JL7ahGs73lb4k=
+github.com/quic-go/quic-go v0.41.0/go.mod h1:qCkNjqczPEvgsOnxZ0eCD14lv+B2LHlFAB++CNOh9hA=
+github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
+github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
+github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
+github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
+github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
+github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
+github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
+github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
+github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
+go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
+go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
+go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
+go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
+go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
+go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk=
+go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
+golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
+gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
+lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/examples/libp2p-host/.gitignore b/examples/libp2p-host/.gitignore
new file mode 100644
index 0000000000..c5551b38f4
--- /dev/null
+++ b/examples/libp2p-host/.gitignore
@@ -0,0 +1 @@
+libp2p-host
diff --git a/examples/libp2p-host/README.md b/examples/libp2p-host/README.md
index f0cac08374..6de534cff6 100644
--- a/examples/libp2p-host/README.md
+++ b/examples/libp2p-host/README.md
@@ -1,79 +1,14 @@
# The libp2p 'host'
-For most applications, the host is the basic building block you'll need to get started. This guide will show how to construct and use a simple host.
+For most applications, the host is the basic building block you'll need to get started. This guide will show how to construct and use a simple host on one side, and a more fully-featured host on the other.
The host is an abstraction that manages services on top of a swarm. It provides a clean interface to connect to a service on a given remote peer.
-First, you'll need an ID, and a place to store that ID. To generate an ID, you can do the following:
+If you want to create a host with a default configuration refer to the example in `./host.go`
-```go
-import (
- "crypto/rand"
+If you want more control over the configuration, you can specify some options to the constructor. For a full list of all the configuration supported by the constructor [see the different options in the docs](https://godoc.org/github.com/libp2p/go-libp2p).
- crypto "github.com/libp2p/go-libp2p-crypto"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
-)
-
-// Generate an identity keypair using go's cryptographic randomness source
-priv, pub, err := crypto.GenerateEd25519Key(rand.Reader)
-if err != nil {
- panic(err)
-}
-
-// A peers ID is the hash of its public key
-pid, err := peer.IDFromPublicKey(pub)
-if err != nil {
- panic(err)
-}
-
-// We've created the identity, now we need to store it.
-// A peerstore holds information about peers, including your own
-ps := pstore.NewPeerstore()
-ps.AddPrivKey(pid, priv)
-ps.AddPubKey(pid, pub)
-```
-
-Next, you'll need at least one address that you want to listen on. You can go from a string to a multiaddr like this:
-
-```go
-import ma "github.com/multiformats/go-multiaddr"
-
-...
-
-maddr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/9000")
-if err != nil {
- panic(err)
-}
-```
-
-Now you know who you are, and where you live (in a manner of speaking). The next step is setting up a 'swarm network' to handle all the peers you will connect to. The swarm handles incoming connections from other peers, and handles the negotiation of new outbound connections.
-
-```go
-import (
- "context"
- swarm "github.com/libp2p/go-libp2p-swarm"
-)
-
-// Make a context to govern the lifespan of the swarm
-ctx := context.Background()
-
-// Put all this together
-netw, err := swarm.NewNetwork(ctx, []ma.Multiaddr{maddr}, pid, ps, nil)
-if err != nil {
- panic(err)
-}
-```
-
-At this point, we have everything needed to finally construct a host. That call is the simplest one so far:
-
-```go
-import bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
-
-myhost := bhost.New(netw)
-```
-
-And thats it, you have a libp2p host and you're ready to start doing some awesome p2p networking!
+In `./host.go` we set a number of useful options like a custom ID and enable routing. This will improve discoverability and reachability of the peer on NAT'ed environments.
In future guides we will go over ways to use hosts, configure them differently (hint: there are a huge number of ways to set these up), and interesting ways to apply this technology to various applications you might want to build.
diff --git a/examples/libp2p-host/host.go b/examples/libp2p-host/host.go
index 793cdf4fd8..8010dccaf0 100644
--- a/examples/libp2p-host/host.go
+++ b/examples/libp2p-host/host.go
@@ -2,46 +2,107 @@ package main
import (
"context"
- "crypto/rand"
- "fmt"
+ "log"
+ "time"
- libp2p "github.com/libp2p/go-libp2p"
- crypto "github.com/libp2p/go-libp2p-crypto"
+ "github.com/libp2p/go-libp2p/p2p/net/connmgr"
+
+ "github.com/libp2p/go-libp2p"
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
)
func main() {
- // The context governs the lifetime of the libp2p node
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ run()
+}
+func run() {
// To construct a simple host with all the default settings, just use `New`
- h, err := libp2p.New(ctx)
+ h, err := libp2p.New()
if err != nil {
panic(err)
}
+ defer h.Close()
- fmt.Printf("Hello World, my hosts ID is %s\n", h.ID())
+ log.Printf("Hello World, my hosts ID is %s\n", h.ID())
- // If you want more control over the configuration, you can specify some
- // options to the constructor
+ // Now, normally you do not just want a simple host, you want
+ // that is fully configured to best support your p2p application.
+ // Let's create a second host setting some more options.
// Set your own keypair
- priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ priv, _, err := crypto.GenerateKeyPair(
+ crypto.Ed25519, // Select your key type. Ed25519 are nice short
+ -1, // Select key length when possible (i.e. RSA).
+ )
if err != nil {
panic(err)
}
- h2, err := libp2p.New(ctx,
- // Use your own created keypair
- libp2p.Identity(priv),
+ var idht *dht.IpfsDHT
- // Set your own listen address
- // The config takes an array of addresses, specify as many as you want.
- libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/9000"),
+ connmgr, err := connmgr.NewConnManager(
+ 100, // Lowwater
+ 400, // HighWater,
+ connmgr.WithGracePeriod(time.Minute),
+ )
+ if err != nil {
+ panic(err)
+ }
+ h2, err := libp2p.New(
+ // Use the keypair we generated
+ libp2p.Identity(priv),
+ // Multiple listen addresses
+ libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/tcp/9000", // regular tcp connections
+ "/ip4/0.0.0.0/udp/9000/quic-v1", // a UDP endpoint for the QUIC transport
+ ),
+ // support TLS connections
+ libp2p.Security(libp2ptls.ID, libp2ptls.New),
+ // support noise connections
+ libp2p.Security(noise.ID, noise.New),
+ // support any other default transports (TCP)
+ libp2p.DefaultTransports,
+ // Let's prevent our peer from having too many
+ // connections by attaching a connection manager.
+ libp2p.ConnectionManager(connmgr),
+ // Attempt to open ports using uPNP for NATed hosts.
+ libp2p.NATPortMap(),
+ // Let this host use the DHT to find other hosts
+ libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
+ idht, err = dht.New(context.Background(), h)
+ return idht, err
+ }),
+ // If you want to help other peers to figure out if they are behind
+ // NATs, you can launch the server-side of AutoNAT too (AutoRelay
+ // already runs the client)
+ //
+ // This service is highly rate-limited and should not cause any
+ // performance issues.
+ libp2p.EnableNATService(),
)
if err != nil {
panic(err)
}
+ defer h2.Close()
+
+ // The last step to get fully up and running would be to connect to
+ // bootstrap peers (or any other peers). We leave this commented as
+ // this is an example and the peer will die as soon as it finishes, so
+ // it is unnecessary to put strain on the network.
- fmt.Printf("Hello World, my second hosts ID is %s\n", h2.ID())
+ /*
+ // This connects to public bootstrappers
+ for _, addr := range dht.DefaultBootstrapPeers {
+ pi, _ := peer.AddrInfoFromP2pAddr(addr)
+ // We ignore errors as some bootstrap peers may be down
+ // and that is fine.
+ h2.Connect(ctx, *pi)
+ }
+ */
+ log.Printf("Hello World, my second hosts ID is %s\n", h2.ID())
}
diff --git a/examples/libp2p-host/host_test.go b/examples/libp2p-host/host_test.go
new file mode 100644
index 0000000000..989014c52b
--- /dev/null
+++ b/examples/libp2p-host/host_test.go
@@ -0,0 +1,14 @@
+package main
+
+import (
+ "testing"
+
+ "github.com/libp2p/go-libp2p/examples/testutils"
+)
+
+func TestMain(t *testing.T) {
+ var h testutils.LogHarness
+ h.ExpectPrefix("Hello World, my hosts ID is ")
+ h.ExpectPrefix("Hello World, my second hosts ID is ")
+ h.Run(t, run)
+}
diff --git a/examples/metrics-and-dashboards/README.md b/examples/metrics-and-dashboards/README.md
new file mode 100644
index 0000000000..9218994d2e
--- /dev/null
+++ b/examples/metrics-and-dashboards/README.md
@@ -0,0 +1,13 @@
+# Metrics and Dashboards
+
+An example to demonstrate using Prometheus and Grafana to view go-libp2p
+metrics. Sets up a Prometheus server and Grafana server via Docker compose. A
+small go-libp2p dummy application is included to emit metrics.
+
+Run it with:
+
+```
+docker compose -f ../../dashboards/docker-compose.base.yml -f ./compose.yml up
+```
+
+Go to http://localhost:3000/dashboards to see the dashboards.
diff --git a/examples/metrics-and-dashboards/compose.yml b/examples/metrics-and-dashboards/compose.yml
new file mode 100644
index 0000000000..fb10b4a81a
--- /dev/null
+++ b/examples/metrics-and-dashboards/compose.yml
@@ -0,0 +1,15 @@
+services:
+ prometheus:
+ image: prom/prometheus:latest
+ ports:
+ - "9090:9090"
+ volumes:
+ - ../examples/metrics-and-dashboards/prometheus.yml:/etc/prometheus/prometheus.yml
+ go-libp2p-node:
+ build:
+ context: ../examples/metrics-and-dashboards/
+ dockerfile: go-libp2p-node.Dockerfile
+ ports:
+ - 5001:5001
+ expose:
+ - 5001
diff --git a/examples/metrics-and-dashboards/go-libp2p-node.Dockerfile b/examples/metrics-and-dashboards/go-libp2p-node.Dockerfile
new file mode 100644
index 0000000000..bd044e088a
--- /dev/null
+++ b/examples/metrics-and-dashboards/go-libp2p-node.Dockerfile
@@ -0,0 +1,7 @@
+FROM golang:alpine
+WORKDIR /app
+COPY ./main.go .
+RUN go mod init example.com/m/v2
+RUN go mod tidy
+RUN go build main.go
+ENTRYPOINT [ "/app/main" ]
diff --git a/examples/metrics-and-dashboards/main.go b/examples/metrics-and-dashboards/main.go
new file mode 100644
index 0000000000..37eee4285a
--- /dev/null
+++ b/examples/metrics-and-dashboards/main.go
@@ -0,0 +1,93 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+)
+
+const ClientCount = 32
+
+func main() {
+ http.Handle("/metrics", promhttp.Handler())
+ go func() {
+ http.Handle("/debug/metrics/prometheus", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(":5001", nil))
+ }()
+
+ rcmgr.MustRegisterWith(prometheus.DefaultRegisterer)
+
+ str, err := rcmgr.NewStatsTraceReporter()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ rmgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()), rcmgr.WithTraceReporter(str))
+ if err != nil {
+ log.Fatal(err)
+ }
+ server, err := libp2p.New(libp2p.ResourceManager(rmgr))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Make a bunch of clients that all ping the server at various times
+ wg := sync.WaitGroup{}
+ for i := 0; i < ClientCount; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ time.Sleep(time.Duration(i%100) * 100 * time.Millisecond)
+ newClient(peer.AddrInfo{
+ ID: server.ID(),
+ Addrs: server.Addrs(),
+ }, i)
+ }(i)
+ }
+ wg.Wait()
+}
+
+func newClient(serverInfo peer.AddrInfo, pings int) {
+ // Sleep some random amount of time to spread out the clients so the graphs look more interesting
+ time.Sleep(time.Duration(rand.Intn(100)) * time.Second)
+ fmt.Println("Started client", pings)
+
+ client, err := libp2p.New(
+ // We just want metrics from the server
+ libp2p.DisableMetrics(),
+ libp2p.NoListenAddrs,
+ )
+ defer func() {
+ _ = client.Close()
+ }()
+
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client.Connect(context.Background(), serverInfo)
+
+ p := ping.Ping(context.Background(), client, serverInfo.ID)
+
+ pingSoFar := 0
+ for pingSoFar < pings {
+ res := <-p
+ pingSoFar++
+ if res.Error != nil {
+ log.Fatal(res.Error)
+ }
+ time.Sleep(5 * time.Second)
+ }
+}
diff --git a/examples/metrics-and-dashboards/prometheus.yml b/examples/metrics-and-dashboards/prometheus.yml
new file mode 100644
index 0000000000..3f86ad0c26
--- /dev/null
+++ b/examples/metrics-and-dashboards/prometheus.yml
@@ -0,0 +1,16 @@
+global:
+ scrape_interval: 1m
+
+scrape_configs:
+ - job_name: "prometheus"
+ scrape_interval: 1m
+ static_configs:
+ - targets: ["localhost:9090"]
+
+ - job_name: "node"
+ static_configs:
+ - targets: ["node-exporter:9100"]
+ - job_name: "go-libp2p"
+ metrics_path: /debug/metrics/prometheus
+ static_configs:
+ - targets: ["go-libp2p-node:5001"]
diff --git a/examples/multipro/.gitignore b/examples/multipro/.gitignore
new file mode 100644
index 0000000000..06ffbb6bc6
--- /dev/null
+++ b/examples/multipro/.gitignore
@@ -0,0 +1 @@
+multipro
diff --git a/examples/multipro/README.md b/examples/multipro/README.md
index c60da8b173..6fb0b2e967 100644
--- a/examples/multipro/README.md
+++ b/examples/multipro/README.md
@@ -1,53 +1,34 @@
-# Protocol Multiplexing using rpc-style multicodecs, protobufs with libp2p
+# Protocol Multiplexing using rpc-style protobufs with libp2p
-This examples shows how to use multicodecs (i.e. protobufs) to encode and transmit information between LibP2P hosts using LibP2P Streams.
-Multicodecs present a common interface, making it very easy to swap the codec implementation if needed.
-This example expects that you area already familiar with the [echo example](https://github.com/libp2p/go-libp2p/tree/master/examples/echo).
+This example shows how to use protobufs to encode and transmit information between libp2p hosts using libp2p Streams.
+This example expects that you are already familiar with the [echo example](https://github.com/libp2p/go-libp2p/tree/master/examples/echo).
## Build
-Install gx:
-```sh
-> go get -u github.com/whyrusleeping/gx
-
-```
-
-Run GX from the root libp2p source dir:
-```sh
->gx install
-```
-
-Build libp2p:
-```sh
-> make deps
-> make
-```
-
-Run from `multipro` directory
+From the `go-libp2p/examples` directory run the following:
```sh
+> cd multipro/
> go build
```
-
## Usage
```sh
> ./multipro
-
```
## Details
-The example creates two LibP2P Hosts supporting 2 protocols: ping and echo.
+The example creates two libp2p Hosts supporting 2 protocols: ping and echo.
-Each protocol consists RPC-style requests and responses and each request and response is a typed protobufs message (and a go data object).
+Each protocol consists of RPC-style requests and responses and each request and response is a typed protobufs message (and a go data object).
-This is a different pattern then defining a whole p2p protocol as one protobuf message with lots of optional fields (as can be observed in various p2p-lib protocols using protobufs such as dht).
+This is a different pattern than defining a whole p2p protocol as one protobuf message with lots of optional fields (as can be observed in various p2p-lib protocols using protobufs such as dht).
The example shows how to match async received responses with their requests. This is useful when processing a response requires access to the request data.
-The idea is to use lib-p2p protocol multiplexing on a per-message basis.
+The idea is to use libp2p protocol multiplexing on a per-message basis.
### Features
1. 2 fully implemented protocols using an RPC-like request-response pattern - Ping and Echo
@@ -58,5 +39,3 @@ The idea is to use lib-p2p protocol multiplexing on a per-message basis.
## Author
@avive
-
-
diff --git a/examples/multipro/echo.go b/examples/multipro/echo.go
index 48403d7f72..8ea67927a9 100644
--- a/examples/multipro/echo.go
+++ b/examples/multipro/echo.go
@@ -1,17 +1,16 @@
package main
import (
- "bufio"
- "context"
"fmt"
+ "io"
"log"
- inet "github.com/libp2p/go-libp2p-net"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
- "github.com/libp2p/go-libp2p-host"
+ "github.com/gogo/protobuf/proto"
+ "github.com/google/uuid"
pb "github.com/libp2p/go-libp2p/examples/multipro/pb"
- protobufCodec "github.com/multiformats/go-multicodec/protobuf"
- uuid "github.com/satori/go.uuid"
)
// pattern: /protocol-name/request-or-response-message/version
@@ -36,11 +35,20 @@ func NewEchoProtocol(node *Node, done chan bool) *EchoProtocol {
}
// remote peer requests handler
-func (e *EchoProtocol) onEchoRequest(s inet.Stream) {
+func (e *EchoProtocol) onEchoRequest(s network.Stream) {
+
// get request data
data := &pb.EchoRequest{}
- decoder := protobufCodec.Multicodec(nil).Decoder(bufio.NewReader(s))
- err := decoder.Decode(data)
+ buf, err := io.ReadAll(s)
+ if err != nil {
+ s.Reset()
+ log.Println(err)
+ return
+ }
+ s.Close()
+
+ // unmarshal it
+ err = proto.Unmarshal(buf, data)
if err != nil {
log.Println(err)
return
@@ -71,27 +79,32 @@ func (e *EchoProtocol) onEchoRequest(s inet.Stream) {
}
// add the signature to the message
- resp.MessageData.Sign = string(signature)
+ resp.MessageData.Sign = signature
- s, respErr := e.node.NewStream(context.Background(), s.Conn().RemotePeer(), echoResponse)
- if respErr != nil {
- log.Println(respErr)
- return
- }
-
- ok := e.node.sendProtoMessage(resp, s)
+ ok := e.node.sendProtoMessage(s.Conn().RemotePeer(), echoResponse, resp)
if ok {
log.Printf("%s: Echo response to %s sent.", s.Conn().LocalPeer().String(), s.Conn().RemotePeer().String())
}
+ e.done <- true
}
// remote echo response handler
-func (e *EchoProtocol) onEchoResponse(s inet.Stream) {
+func (e *EchoProtocol) onEchoResponse(s network.Stream) {
+
data := &pb.EchoResponse{}
- decoder := protobufCodec.Multicodec(nil).Decoder(bufio.NewReader(s))
- err := decoder.Decode(data)
+ buf, err := io.ReadAll(s)
if err != nil {
+ s.Reset()
+ log.Println(err)
+ return
+ }
+ s.Close()
+
+ // unmarshal it
+ err = proto.Unmarshal(buf, data)
+ if err != nil {
+ log.Println(err)
return
}
@@ -126,7 +139,7 @@ func (e *EchoProtocol) Echo(host host.Host) bool {
// create message data
req := &pb.EchoRequest{
- MessageData: e.node.NewMessageData(uuid.Must(uuid.NewV4()).String(), false),
+ MessageData: e.node.NewMessageData(uuid.New().String(), false),
Message: fmt.Sprintf("Echo from %s", e.node.ID())}
signature, err := e.node.signProtoMessage(req)
@@ -136,15 +149,9 @@ func (e *EchoProtocol) Echo(host host.Host) bool {
}
// add the signature to the message
- req.MessageData.Sign = string(signature)
-
- s, err := e.node.NewStream(context.Background(), host.ID(), echoRequest)
- if err != nil {
- log.Println(err)
- return false
- }
+ req.MessageData.Sign = signature
- ok := e.node.sendProtoMessage(req, s)
+ ok := e.node.sendProtoMessage(host.ID(), echoRequest, req)
if !ok {
return false
diff --git a/examples/multipro/main.go b/examples/multipro/main.go
index 4f9e144c2a..f7b1ede5ff 100644
--- a/examples/multipro/main.go
+++ b/examples/multipro/main.go
@@ -1,39 +1,21 @@
package main
import (
- "context"
"fmt"
"log"
"math/rand"
- crypto "github.com/libp2p/go-libp2p-crypto"
- peer "github.com/libp2p/go-libp2p-peer"
- ps "github.com/libp2p/go-libp2p-peerstore"
- swarm "github.com/libp2p/go-libp2p-swarm"
- bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+
ma "github.com/multiformats/go-multiaddr"
)
-// helper method - create a lib-p2p host to listen on a port
-func makeRandomNode(port int, done chan bool) *Node {
- // Ignoring most errors for brevity
- // See echo example for more details and better implementation
- priv, pub, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256)
- pid, _ := peer.IDFromPublicKey(pub)
- listen, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port))
- peerStore := ps.NewPeerstore()
- peerStore.AddPrivKey(pid, priv)
- peerStore.AddPubKey(pid, pub)
- n, _ := swarm.NewNetwork(context.Background(), []ma.Multiaddr{listen}, pid, peerStore, nil)
- host := bhost.New(n)
-
- return NewNode(host, done)
-}
-
func main() {
+ rnd := rand.New(rand.NewSource(666))
// Choose random ports between 10000-10100
- rand.Seed(666)
- port1 := rand.Intn(100) + 10000
+ port1 := rnd.Intn(100) + 10000
port2 := port1 + 1
done := make(chan bool, 1)
@@ -41,11 +23,31 @@ func main() {
// Make 2 hosts
h1 := makeRandomNode(port1, done)
h2 := makeRandomNode(port2, done)
- h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), ps.PermanentAddrTTL)
- h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), ps.PermanentAddrTTL)
log.Printf("This is a conversation between %s and %s\n", h1.ID(), h2.ID())
+ run(h1, h2, done)
+}
+
+// helper method - create a lib-p2p host to listen on a port
+func makeRandomNode(port int, done chan bool) *Node {
+ // Ignoring most errors for brevity
+ // See echo example for more details and better implementation
+ priv, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256)
+ listen, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port))
+ host, _ := libp2p.New(
+ libp2p.ListenAddrs(listen),
+ libp2p.Identity(priv),
+ )
+
+ return NewNode(host, done)
+}
+
+func run(h1, h2 *Node, done <-chan bool) {
+ // connect peers
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.PermanentAddrTTL)
+ h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), peerstore.PermanentAddrTTL)
+
// send messages using the protocols
h1.Ping(h2.Host)
h2.Ping(h1.Host)
@@ -53,7 +55,7 @@ func main() {
h2.Echo(h1.Host)
// block until all responses have been processed
- for i := 0; i < 4; i++ {
+ for i := 0; i < 8; i++ {
<-done
}
}
diff --git a/examples/multipro/main_test.go b/examples/multipro/main_test.go
new file mode 100644
index 0000000000..545fcf294c
--- /dev/null
+++ b/examples/multipro/main_test.go
@@ -0,0 +1,55 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/examples/testutils"
+)
+
+func TestMain(t *testing.T) {
+ port1, err := testutils.FindFreePort(t, "", 5)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ port2, err := testutils.FindFreePort(t, "", 5)
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ done := make(chan bool, 1)
+ h1 := makeRandomNode(port1, done)
+ h2 := makeRandomNode(port2, done)
+
+ var h testutils.LogHarness
+
+ // Sequence of log messages when h1 pings h2
+ pingh1h2 := h.NewSequence("ping h1->h2")
+ pingh1h2.ExpectPrefix(fmt.Sprintf("%s: Sending ping to: %s", h1.ID(), h2.ID()))
+ pingh1h2.ExpectPrefix(fmt.Sprintf("%s: Received ping request from %s", h2.ID(), h1.ID()))
+ pingh1h2.ExpectPrefix(fmt.Sprintf("%s: Received ping response from %s", h1.ID(), h2.ID()))
+
+ // Sequence of log messages when h2 pings h1
+ pingh2h1 := h.NewSequence("ping h2->h1")
+ pingh2h1.ExpectPrefix(fmt.Sprintf("%s: Sending ping to: %s", h2.ID(), h1.ID()))
+ pingh2h1.ExpectPrefix(fmt.Sprintf("%s: Received ping request from %s", h1.ID(), h2.ID()))
+ pingh2h1.ExpectPrefix(fmt.Sprintf("%s: Received ping response from %s", h2.ID(), h1.ID()))
+
+ // Sequence of log messages when h1 sends echo to h2
+ echoh1h2 := h.NewSequence("echo h1->h2")
+ echoh1h2.ExpectPrefix(fmt.Sprintf("%s: Sending echo to: %s", h1.ID(), h2.ID()))
+ echoh1h2.ExpectPrefix(fmt.Sprintf("%s: Echo response to %s", h2.ID(), h1.ID()))
+
+ // Sequence of log messages when h1 sends echo to h2
+ echoh2h1 := h.NewSequence("echo h2->h1")
+ echoh2h1.ExpectPrefix(fmt.Sprintf("%s: Sending echo to: %s", h2.ID(), h1.ID()))
+ echoh2h1.ExpectPrefix(fmt.Sprintf("%s: Echo response to %s", h1.ID(), h2.ID()))
+
+ h.Run(t, func() {
+ run(h1, h2, done)
+ })
+}
diff --git a/examples/multipro/node.go b/examples/multipro/node.go
index 58b42330e1..55b63928a6 100644
--- a/examples/multipro/node.go
+++ b/examples/multipro/node.go
@@ -1,17 +1,18 @@
package main
import (
- "bufio"
+ "context"
"log"
"time"
- "github.com/gogo/protobuf/proto"
- crypto "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
p2p "github.com/libp2p/go-libp2p/examples/multipro/pb"
- protobufCodec "github.com/multiformats/go-multicodec/protobuf"
+
+ ggio "github.com/gogo/protobuf/io"
+ "github.com/gogo/protobuf/proto"
)
// node client version
@@ -34,13 +35,13 @@ func NewNode(host host.Host, done chan bool) *Node {
}
// Authenticate incoming p2p message
-// message: a protobufs go data object
+// message: a protobuf go data object
// data: common p2p message data
func (n *Node) authenticateMessage(message proto.Message, data *p2p.MessageData) bool {
// store a temp ref to signature and remove it from message data
// sign is a string to allow easy reset to zero-value (empty string)
sign := data.Sign
- data.Sign = ""
+ data.Sign = nil
// marshall data without the signature to protobufs3 binary format
bin, err := proto.Marshal(message)
@@ -53,7 +54,7 @@ func (n *Node) authenticateMessage(message proto.Message, data *p2p.MessageData)
data.Sign = sign
// restore peer id binary format from base58 encoded node id data
- peerId, err := peer.IDB58Decode(data.NodeId)
+ peerId, err := peer.Decode(data.NodeId)
if err != nil {
log.Println(err, "Failed to decode node id from base58")
return false
@@ -118,16 +119,16 @@ func (n *Node) verifyData(data []byte, signature []byte, peerId peer.ID, pubKeyD
// helper method - generate message data shared between all node's p2p protocols
// messageId: unique for requests, copied from request for responses
func (n *Node) NewMessageData(messageId string, gossip bool) *p2p.MessageData {
- // Add protobufs bin data for message author public key
+ // Add protobuf bin data for message author public key
// this is useful for authenticating messages forwarded by a node authored by another node
- nodePubKey, err := n.Peerstore().PubKey(n.ID()).Bytes()
+ nodePubKey, err := crypto.MarshalPublicKey(n.Peerstore().PubKey(n.ID()))
if err != nil {
panic("Failed to get public key for sender from local peer store.")
}
return &p2p.MessageData{ClientVersion: clientVersion,
- NodeId: peer.IDB58Encode(n.ID()),
+ NodeId: n.ID().String(),
NodePubKey: nodePubKey,
Timestamp: time.Now().Unix(),
Id: messageId,
@@ -137,14 +138,20 @@ func (n *Node) NewMessageData(messageId string, gossip bool) *p2p.MessageData {
// helper method - writes a protobuf go data object to a network stream
// data: reference of protobuf go data object to send (not the object itself)
// s: network stream to write the data to
-func (n *Node) sendProtoMessage(data proto.Message, s inet.Stream) bool {
- writer := bufio.NewWriter(s)
- enc := protobufCodec.Multicodec(nil).Encoder(writer)
- err := enc.Encode(data)
+func (n *Node) sendProtoMessage(id peer.ID, p protocol.ID, data proto.Message) bool {
+ s, err := n.NewStream(context.Background(), id, p)
+ if err != nil {
+ log.Println(err)
+ return false
+ }
+ defer s.Close()
+
+ writer := ggio.NewFullWriter(s)
+ err = writer.WriteMsg(data)
if err != nil {
log.Println(err)
+ s.Reset()
return false
}
- writer.Flush()
return true
}
diff --git a/examples/multipro/pb/p2p.pb.go b/examples/multipro/pb/p2p.pb.go
index 091ef4afec..2399383cd4 100644
--- a/examples/multipro/pb/p2p.pb.go
+++ b/examples/multipro/pb/p2p.pb.go
@@ -1,57 +1,148 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: p2p.proto
-// DO NOT EDIT!
-/*
-Package protocols_p2p is a generated protocol buffer package.
+package protocols_p2p
-It is generated from these files:
- p2p.proto
+import (
+ fmt "fmt"
-It has these top-level messages:
- MessageData
- PingRequest
- PingResponse
- EchoRequest
- EchoResponse
-*/
-package protocols_p2p
+ proto "github.com/gogo/protobuf/proto"
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
// designed to be shared between all app protocols
type MessageData struct {
// shared between all requests
- ClientVersion string `protobuf:"bytes,1,opt,name=clientVersion,proto3" json:"clientVersion,omitempty"`
- Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
- Gossip bool `protobuf:"varint,4,opt,name=gossip,proto3" json:"gossip,omitempty"`
- NodeId string `protobuf:"bytes,5,opt,name=nodeId,proto3" json:"nodeId,omitempty"`
- NodePubKey []byte `protobuf:"bytes,6,opt,name=nodePubKey,proto3" json:"nodePubKey,omitempty"`
- Sign string `protobuf:"bytes,7,opt,name=sign,proto3" json:"sign,omitempty"`
+ ClientVersion string `protobuf:"bytes,1,opt,name=clientVersion,proto3" json:"clientVersion,omitempty"`
+ Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"`
+ Gossip bool `protobuf:"varint,4,opt,name=gossip,proto3" json:"gossip,omitempty"`
+ NodeId string `protobuf:"bytes,5,opt,name=nodeId,proto3" json:"nodeId,omitempty"`
+ NodePubKey []byte `protobuf:"bytes,6,opt,name=nodePubKey,proto3" json:"nodePubKey,omitempty"`
+ Sign []byte `protobuf:"bytes,7,opt,name=sign,proto3" json:"sign,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *MessageData) Reset() { *m = MessageData{} }
func (m *MessageData) String() string { return proto.CompactTextString(m) }
func (*MessageData) ProtoMessage() {}
+func (*MessageData) Descriptor() ([]byte, []int) {
+ return fileDescriptor_p2p_c8fd4e6dd1b6d221, []int{0}
+}
+func (m *MessageData) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MessageData.Unmarshal(m, b)
+}
+func (m *MessageData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MessageData.Marshal(b, m, deterministic)
+}
+func (dst *MessageData) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MessageData.Merge(dst, src)
+}
+func (m *MessageData) XXX_Size() int {
+ return xxx_messageInfo_MessageData.Size(m)
+}
+func (m *MessageData) XXX_DiscardUnknown() {
+ xxx_messageInfo_MessageData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageData proto.InternalMessageInfo
+
+func (m *MessageData) GetClientVersion() string {
+ if m != nil {
+ return m.ClientVersion
+ }
+ return ""
+}
+
+func (m *MessageData) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+func (m *MessageData) GetId() string {
+ if m != nil {
+ return m.Id
+ }
+ return ""
+}
-// a protocol define a set of reuqest and responses
+func (m *MessageData) GetGossip() bool {
+ if m != nil {
+ return m.Gossip
+ }
+ return false
+}
+
+func (m *MessageData) GetNodeId() string {
+ if m != nil {
+ return m.NodeId
+ }
+ return ""
+}
+
+func (m *MessageData) GetNodePubKey() []byte {
+ if m != nil {
+ return m.NodePubKey
+ }
+ return nil
+}
+
+func (m *MessageData) GetSign() []byte {
+ if m != nil {
+ return m.Sign
+ }
+ return nil
+}
+
+// A protocol defines a set of requests and responses.
type PingRequest struct {
MessageData *MessageData `protobuf:"bytes,1,opt,name=messageData" json:"messageData,omitempty"`
// method specific data
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *PingRequest) Reset() { *m = PingRequest{} }
func (m *PingRequest) String() string { return proto.CompactTextString(m) }
func (*PingRequest) ProtoMessage() {}
+func (*PingRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_p2p_c8fd4e6dd1b6d221, []int{1}
+}
+func (m *PingRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PingRequest.Unmarshal(m, b)
+}
+func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic)
+}
+func (dst *PingRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PingRequest.Merge(dst, src)
+}
+func (m *PingRequest) XXX_Size() int {
+ return xxx_messageInfo_PingRequest.Size(m)
+}
+func (m *PingRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PingRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PingRequest proto.InternalMessageInfo
func (m *PingRequest) GetMessageData() *MessageData {
if m != nil {
@@ -60,15 +151,45 @@ func (m *PingRequest) GetMessageData() *MessageData {
return nil
}
+func (m *PingRequest) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
type PingResponse struct {
MessageData *MessageData `protobuf:"bytes,1,opt,name=messageData" json:"messageData,omitempty"`
// response specific data
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *PingResponse) Reset() { *m = PingResponse{} }
func (m *PingResponse) String() string { return proto.CompactTextString(m) }
func (*PingResponse) ProtoMessage() {}
+func (*PingResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_p2p_c8fd4e6dd1b6d221, []int{2}
+}
+func (m *PingResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_PingResponse.Unmarshal(m, b)
+}
+func (m *PingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_PingResponse.Marshal(b, m, deterministic)
+}
+func (dst *PingResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PingResponse.Merge(dst, src)
+}
+func (m *PingResponse) XXX_Size() int {
+ return xxx_messageInfo_PingResponse.Size(m)
+}
+func (m *PingResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_PingResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PingResponse proto.InternalMessageInfo
func (m *PingResponse) GetMessageData() *MessageData {
if m != nil {
@@ -77,16 +198,46 @@ func (m *PingResponse) GetMessageData() *MessageData {
return nil
}
-// a protocol define a set of reuqest and responses
+func (m *PingResponse) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+// A protocol defines a set of requests and responses
type EchoRequest struct {
MessageData *MessageData `protobuf:"bytes,1,opt,name=messageData" json:"messageData,omitempty"`
// method specific data
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *EchoRequest) Reset() { *m = EchoRequest{} }
func (m *EchoRequest) String() string { return proto.CompactTextString(m) }
func (*EchoRequest) ProtoMessage() {}
+func (*EchoRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_p2p_c8fd4e6dd1b6d221, []int{3}
+}
+func (m *EchoRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EchoRequest.Unmarshal(m, b)
+}
+func (m *EchoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EchoRequest.Marshal(b, m, deterministic)
+}
+func (dst *EchoRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EchoRequest.Merge(dst, src)
+}
+func (m *EchoRequest) XXX_Size() int {
+ return xxx_messageInfo_EchoRequest.Size(m)
+}
+func (m *EchoRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_EchoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EchoRequest proto.InternalMessageInfo
func (m *EchoRequest) GetMessageData() *MessageData {
if m != nil {
@@ -95,15 +246,45 @@ func (m *EchoRequest) GetMessageData() *MessageData {
return nil
}
+func (m *EchoRequest) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
type EchoResponse struct {
MessageData *MessageData `protobuf:"bytes,1,opt,name=messageData" json:"messageData,omitempty"`
// response specific data
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *EchoResponse) Reset() { *m = EchoResponse{} }
func (m *EchoResponse) String() string { return proto.CompactTextString(m) }
func (*EchoResponse) ProtoMessage() {}
+func (*EchoResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_p2p_c8fd4e6dd1b6d221, []int{4}
+}
+func (m *EchoResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EchoResponse.Unmarshal(m, b)
+}
+func (m *EchoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EchoResponse.Marshal(b, m, deterministic)
+}
+func (dst *EchoResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EchoResponse.Merge(dst, src)
+}
+func (m *EchoResponse) XXX_Size() int {
+ return xxx_messageInfo_EchoResponse.Size(m)
+}
+func (m *EchoResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_EchoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EchoResponse proto.InternalMessageInfo
func (m *EchoResponse) GetMessageData() *MessageData {
if m != nil {
@@ -112,6 +293,13 @@ func (m *EchoResponse) GetMessageData() *MessageData {
return nil
}
+func (m *EchoResponse) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
func init() {
proto.RegisterType((*MessageData)(nil), "protocols.p2p.MessageData")
proto.RegisterType((*PingRequest)(nil), "protocols.p2p.PingRequest")
@@ -119,3 +307,26 @@ func init() {
proto.RegisterType((*EchoRequest)(nil), "protocols.p2p.EchoRequest")
proto.RegisterType((*EchoResponse)(nil), "protocols.p2p.EchoResponse")
}
+
+func init() { proto.RegisterFile("p2p.proto", fileDescriptor_p2p_c8fd4e6dd1b6d221) }
+
+var fileDescriptor_p2p_c8fd4e6dd1b6d221 = []byte{
+ // 261 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
+ 0x10, 0x86, 0xe5, 0xb6, 0xa4, 0xe4, 0xdc, 0x32, 0xdc, 0x80, 0x2c, 0x84, 0x50, 0x14, 0x31, 0x64,
+ 0xca, 0x10, 0x56, 0x46, 0x18, 0x10, 0x42, 0xaa, 0x3c, 0xb0, 0xa7, 0xc9, 0x11, 0x2c, 0x35, 0xb6,
+ 0xe9, 0xb9, 0x03, 0x0f, 0xc8, 0x7b, 0xa1, 0xba, 0x41, 0x4d, 0x1f, 0xa0, 0x4c, 0xbe, 0xff, 0xf3,
+ 0xd9, 0xbf, 0x3e, 0x48, 0x7d, 0xe5, 0x4b, 0xbf, 0x75, 0xc1, 0xe1, 0x32, 0x1e, 0x8d, 0xdb, 0x70,
+ 0xe9, 0x2b, 0x9f, 0xff, 0x08, 0x90, 0x6f, 0xc4, 0x5c, 0x77, 0xf4, 0x54, 0x87, 0x1a, 0xef, 0x61,
+ 0xd9, 0x6c, 0x0c, 0xd9, 0xf0, 0x4e, 0x5b, 0x36, 0xce, 0x2a, 0x91, 0x89, 0x22, 0xd5, 0xa7, 0x10,
+ 0x6f, 0x21, 0x0d, 0xa6, 0x27, 0x0e, 0x75, 0xef, 0xd5, 0x24, 0x13, 0xc5, 0x54, 0x1f, 0x01, 0x5e,
+ 0xc1, 0xc4, 0xb4, 0x6a, 0x1a, 0x1f, 0x4e, 0x4c, 0x8b, 0xd7, 0x90, 0x74, 0x8e, 0xd9, 0x78, 0x35,
+ 0xcb, 0x44, 0x71, 0xa9, 0x87, 0xb4, 0xe7, 0xd6, 0xb5, 0xf4, 0xd2, 0xaa, 0x8b, 0xb8, 0x3b, 0x24,
+ 0xbc, 0x03, 0xd8, 0x4f, 0xab, 0xdd, 0xfa, 0x95, 0xbe, 0x55, 0x92, 0x89, 0x62, 0xa1, 0x47, 0x04,
+ 0x11, 0x66, 0x6c, 0x3a, 0xab, 0xe6, 0xf1, 0x26, 0xce, 0x39, 0x81, 0x5c, 0x19, 0xdb, 0x69, 0xfa,
+ 0xda, 0x11, 0x07, 0x7c, 0x04, 0xd9, 0x1f, 0xad, 0xa2, 0x84, 0xac, 0x6e, 0xca, 0x13, 0xf7, 0x72,
+ 0xe4, 0xad, 0xc7, 0xeb, 0xa8, 0x60, 0x3e, 0xc4, 0x28, 0x97, 0xea, 0xbf, 0x98, 0x7f, 0xc0, 0xe2,
+ 0x50, 0xc3, 0xde, 0x59, 0xa6, 0xb3, 0xf5, 0x10, 0xc8, 0xe7, 0xe6, 0xd3, 0xfd, 0x83, 0xce, 0xa1,
+ 0xe6, 0xbc, 0x3a, 0xeb, 0x24, 0xfe, 0xf0, 0xf0, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x47, 0x02,
+ 0x5e, 0x88, 0x02, 0x00, 0x00,
+}
diff --git a/examples/multipro/pb/p2p.proto b/examples/multipro/pb/p2p.proto
index 53e652154b..a360b24635 100644
--- a/examples/multipro/pb/p2p.proto
+++ b/examples/multipro/pb/p2p.proto
@@ -9,14 +9,14 @@ message MessageData {
int64 timestamp = 2; // unix time
string id = 3; // allows requesters to use request data when processing a response
bool gossip = 4; // true to have receiver peer gossip the message to neighbors
- string nodeId = 5; // id of node that created the message (not the peer that may have sent it). =base58(mh(sha256(nodePubKey)))
+ string nodeId = 5; // id of node that created the message (not the peer that may have sent it). =base58(multihash(nodePubKey))
bytes nodePubKey = 6; // Authoring node Secp256k1 public key (32bytes) - protobufs serielized
- string sign = 7; // signature of message data + method specific data by message authoring node. format: string([]bytes)
+ bytes sign = 7; // signature of message data + method specific data by message authoring node.
}
//// ping protocol
-// a protocol define a set of reuqest and responses
+// A protocol defines a set of requests and responses.
message PingRequest {
MessageData messageData = 1;
@@ -36,7 +36,7 @@ message PingResponse {
//// echo protocol
-// a protocol define a set of reuqest and responses
+// A protocol defines a set of requests and responses.
message EchoRequest {
MessageData messageData = 1;
diff --git a/examples/multipro/pb/readme.md b/examples/multipro/pb/readme.md
deleted file mode 100644
index ec7114f6ad..0000000000
--- a/examples/multipro/pb/readme.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# building p2p.pb.go:
-protoc --gogo_out=. --proto_path=../../../../../../:/usr/local/opt/protobuf/include:. *.proto
-
-
diff --git a/examples/multipro/ping.go b/examples/multipro/ping.go
index bcc63dc1cf..c274f5bf4f 100644
--- a/examples/multipro/ping.go
+++ b/examples/multipro/ping.go
@@ -1,16 +1,17 @@
package main
import (
- "bufio"
- "context"
"fmt"
+ "io"
"log"
+ "sync"
- "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+
+ proto "github.com/gogo/protobuf/proto"
+ uuid "github.com/google/uuid"
p2p "github.com/libp2p/go-libp2p/examples/multipro/pb"
- protobufCodec "github.com/multiformats/go-multicodec/protobuf"
- uuid "github.com/satori/go.uuid"
)
// pattern: /protocol-name/request-or-response-message/version
@@ -19,8 +20,9 @@ const pingResponse = "/ping/pingresp/0.0.1"
// PingProtocol type
type PingProtocol struct {
- node *Node // local host
- requests map[string]*p2p.PingRequest // used to access request data from response handlers
+ node *Node // local host
+ mu sync.Mutex
+ requests map[string]*p2p.PingRequest // used to access request data from response handlers. Protected by mu
done chan bool // only for demo purposes to stop main from terminating
}
@@ -32,12 +34,20 @@ func NewPingProtocol(node *Node, done chan bool) *PingProtocol {
}
// remote peer requests handler
-func (p *PingProtocol) onPingRequest(s inet.Stream) {
+func (p *PingProtocol) onPingRequest(s network.Stream) {
// get request data
data := &p2p.PingRequest{}
- decoder := protobufCodec.Multicodec(nil).Decoder(bufio.NewReader(s))
- err := decoder.Decode(data)
+ buf, err := io.ReadAll(s)
+ if err != nil {
+ s.Reset()
+ log.Println(err)
+ return
+ }
+ s.Close()
+
+ // unmarshal it
+ err = proto.Unmarshal(buf, data)
if err != nil {
log.Println(err)
return
@@ -66,28 +76,32 @@ func (p *PingProtocol) onPingRequest(s inet.Stream) {
}
// add the signature to the message
- resp.MessageData.Sign = string(signature)
+ resp.MessageData.Sign = signature
// send the response
- s, respErr := p.node.NewStream(context.Background(), s.Conn().RemotePeer(), pingResponse)
- if respErr != nil {
- log.Println(respErr)
- return
- }
-
- ok := p.node.sendProtoMessage(resp, s)
+ ok := p.node.sendProtoMessage(s.Conn().RemotePeer(), pingResponse, resp)
if ok {
log.Printf("%s: Ping response to %s sent.", s.Conn().LocalPeer().String(), s.Conn().RemotePeer().String())
}
+ p.done <- true
}
// remote ping response handler
-func (p *PingProtocol) onPingResponse(s inet.Stream) {
+func (p *PingProtocol) onPingResponse(s network.Stream) {
data := &p2p.PingResponse{}
- decoder := protobufCodec.Multicodec(nil).Decoder(bufio.NewReader(s))
- err := decoder.Decode(data)
+ buf, err := io.ReadAll(s)
if err != nil {
+ s.Reset()
+ log.Println(err)
+ return
+ }
+ s.Close()
+
+ // unmarshal it
+ err = proto.Unmarshal(buf, data)
+ if err != nil {
+ log.Println(err)
return
}
@@ -99,14 +113,17 @@ func (p *PingProtocol) onPingResponse(s inet.Stream) {
}
// locate request data and remove it if found
+ p.mu.Lock()
_, ok := p.requests[data.MessageData.Id]
if ok {
// remove request from map as we have processed it here
delete(p.requests, data.MessageData.Id)
} else {
log.Println("Failed to locate request data boject for response")
+ p.mu.Unlock()
return
}
+ p.mu.Unlock()
log.Printf("%s: Received ping response from %s. Message id:%s. Message: %s.", s.Conn().LocalPeer(), s.Conn().RemotePeer(), data.MessageData.Id, data.Message)
p.done <- true
@@ -116,7 +133,7 @@ func (p *PingProtocol) Ping(host host.Host) bool {
log.Printf("%s: Sending ping to: %s....", p.node.ID(), host.ID())
// create message data
- req := &p2p.PingRequest{MessageData: p.node.NewMessageData(uuid.Must(uuid.NewV4()).String(), false),
+ req := &p2p.PingRequest{MessageData: p.node.NewMessageData(uuid.New().String(), false),
Message: fmt.Sprintf("Ping from %s", p.node.ID())}
// sign the data
@@ -127,22 +144,18 @@ func (p *PingProtocol) Ping(host host.Host) bool {
}
// add the signature to the message
- req.MessageData.Sign = string(signature)
-
- s, err := p.node.NewStream(context.Background(), host.ID(), pingRequest)
- if err != nil {
- log.Println(err)
- return false
- }
+ req.MessageData.Sign = signature
- ok := p.node.sendProtoMessage(req, s)
+ // store ref request so response handler has access to it
+ p.mu.Lock()
+ p.requests[req.MessageData.Id] = req
+ p.mu.Unlock()
+ ok := p.node.sendProtoMessage(host.ID(), pingRequest, req)
if !ok {
return false
}
- // store ref request so response handler has access to it
- p.requests[req.MessageData.Id] = req
log.Printf("%s: Ping to: %s was sent. Message Id: %s, Message: %s", p.node.ID(), host.ID(), req.MessageData.Id, req.Message)
return true
}
diff --git a/examples/protocol-multiplexing-with-multicodecs/README.md b/examples/protocol-multiplexing-with-multicodecs/README.md
deleted file mode 100644
index 0b873f51a6..0000000000
--- a/examples/protocol-multiplexing-with-multicodecs/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-# Protocol Multiplexing using multicodecs with libp2p
-
-This examples shows how to use multicodecs (i.e. json) to encode and transmit information between LibP2P hosts using LibP2P Streams.
-
-Multicodecs present a common interface, making it very easy to swap the codec implementation if needed.
-
-This example expects that you area already familiar with the [echo example](https://github.com/libp2p/go-libp2p/tree/master/examples/echo).
-
-## Build
-
-From `go-libp2p` base folder:
-
-```
-> make deps-protocol-muxing
-> go build -o multicodecs ./examples/protocol-multiplexing-with-multicodecs
-```
-
-## Usage
-
-```
-> ./multicodecs
-
-```
-
-## Details
-
-The example creates two LibP2P Hosts. Host1 opens a stream to Host2. Host2 has an `StreamHandler` to deal with the incoming stream. This is covered in the `echo` example.
-
-Both hosts simulate a conversation. But rather than sending raw messages on the stream, each message in the conversation is encoded under a `json` object (using the `json` multicodec). For example:
-
-```
-{
- "Msg": "This is the message",
- "Index": 3,
- "HangUp": false
-}
-```
-
-The stream lasts until one of the sides closes it when the HangUp field is `true`.
diff --git a/examples/protocol-multiplexing-with-multicodecs/main.go b/examples/protocol-multiplexing-with-multicodecs/main.go
deleted file mode 100644
index 86c069b29c..0000000000
--- a/examples/protocol-multiplexing-with-multicodecs/main.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package main
-
-import (
- "bufio"
- "context"
- "fmt"
- "log"
- "math/rand"
- "time"
-
- crypto "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- ps "github.com/libp2p/go-libp2p-peerstore"
- swarm "github.com/libp2p/go-libp2p-swarm"
- ma "github.com/multiformats/go-multiaddr"
-
- bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
- multicodec "github.com/multiformats/go-multicodec"
- json "github.com/multiformats/go-multicodec/json"
-)
-
-const proto = "/example/1.0.0"
-
-// Message is a serializable/encodable object that we will send
-// on a Stream.
-type Message struct {
- Msg string
- Index int
- HangUp bool
-}
-
-// streamWrap wraps a libp2p stream. We encode/decode whenever we
-// write/read from a stream, so we can just carry the encoders
-// and bufios with us
-type WrappedStream struct {
- stream inet.Stream
- enc multicodec.Encoder
- dec multicodec.Decoder
- w *bufio.Writer
- r *bufio.Reader
-}
-
-// wrapStream takes a stream and complements it with r/w bufios and
-// decoder/encoder. In order to write raw data to the stream we can use
-// wrap.w.Write(). To encode something into it we can wrap.enc.Encode().
-// Finally, we should wrap.w.Flush() to actually send the data. Handling
-// incoming data works similarly with wrap.r.Read() for raw-reading and
-// wrap.dec.Decode() to decode.
-func WrapStream(s inet.Stream) *WrappedStream {
- reader := bufio.NewReader(s)
- writer := bufio.NewWriter(s)
- // This is where we pick our specific multicodec. In order to change the
- // codec, we only need to change this place.
- // See https://godoc.org/github.com/multiformats/go-multicodec/json
- dec := json.Multicodec(false).Decoder(reader)
- enc := json.Multicodec(false).Encoder(writer)
- return &WrappedStream{
- stream: s,
- r: reader,
- w: writer,
- enc: enc,
- dec: dec,
- }
-}
-
-// messages that will be sent between the hosts.
-var conversationMsgs = []string{
- "Hello!",
- "Hey!",
- "How are you doing?",
- "Very good! It is great that you can send data on a stream to me!",
- "Not only that, the data is encoded in a JSON object.",
- "Yeah, and we are using the multicodecs interface to encode and decode.",
- "This way we could swap it easily for, say, cbor, or msgpack!",
- "Let's leave that as an excercise for the reader...",
- "Agreed, our last message should activate the HangUp flag",
- "Yes, and the example code will close streams. So sad :(. Bye!",
-}
-
-func makeRandomHost(port int) host.Host {
- // Ignoring most errors for brevity
- // See echo example for more details and better implementation
- priv, pub, _ := crypto.GenerateKeyPair(crypto.RSA, 2048)
- pid, _ := peer.IDFromPublicKey(pub)
- listen, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port))
- ps := ps.NewPeerstore()
- ps.AddPrivKey(pid, priv)
- ps.AddPubKey(pid, pub)
- n, _ := swarm.NewNetwork(context.Background(),
- []ma.Multiaddr{listen}, pid, ps, nil)
- return bhost.New(n)
-}
-
-func main() {
- // Choose random ports between 10000-10100
- rand.Seed(666)
- port1 := rand.Intn(100) + 10000
- port2 := port1 + 1
-
- // Make 2 hosts
- h1 := makeRandomHost(port1)
- h2 := makeRandomHost(port2)
- h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), ps.PermanentAddrTTL)
- h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), ps.PermanentAddrTTL)
-
- log.Printf("This is a conversation between %s and %s\n", h1.ID(), h2.ID())
-
- // Define a stream handler for host number 2
- h2.SetStreamHandler(proto, func(stream inet.Stream) {
- log.Printf("%s: Received a stream", h2.ID())
- wrappedStream := WrapStream(stream)
- defer stream.Close()
- handleStream(wrappedStream)
- })
-
- // Create new stream from h1 to h2 and start the conversation
- stream, err := h1.NewStream(context.Background(), h2.ID(), proto)
- if err != nil {
- log.Fatal(err)
- }
- wrappedStream := WrapStream(stream)
- // This sends the first message
- sendMessage(0, wrappedStream)
- // We keep the conversation on the created stream so we launch
- // this to handle any responses
- handleStream(wrappedStream)
- // When we are done, close the stream on our side and exit.
- stream.Close()
-}
-
-// receiveMessage reads and decodes a message from the stream
-func receiveMessage(ws *WrappedStream) (*Message, error) {
- var msg Message
- err := ws.dec.Decode(&msg)
- if err != nil {
- return nil, err
- }
- return &msg, nil
-}
-
-// sendMessage encodes and writes a message to the stream
-func sendMessage(index int, ws *WrappedStream) error {
- msg := &Message{
- Msg: conversationMsgs[index],
- Index: index,
- HangUp: index >= len(conversationMsgs)-1,
- }
-
- err := ws.enc.Encode(msg)
- // Because output is buffered with bufio, we need to flush!
- ws.w.Flush()
- return err
-}
-
-// handleStream is a for loop which receives and then sends a message
-// an artificial delay of 500ms happens in-between.
-// When Message.HangUp is true, it exists. This will close the stream
-// on one of the sides. The other side's receiveMessage() will error
-// with EOF, thus also breaking out from the loop.
-func handleStream(ws *WrappedStream) {
- for {
- // Read
- msg, err := receiveMessage(ws)
- if err != nil {
- break
- }
- pid := ws.stream.Conn().LocalPeer()
- log.Printf("%s says: %s\n", pid, msg.Msg)
- time.Sleep(500 * time.Millisecond)
- if msg.HangUp {
- break
- }
- // Send response
- err = sendMessage(msg.Index+1, ws)
- if err != nil {
- break
- }
- }
-}
diff --git a/examples/pubsub/README.md b/examples/pubsub/README.md
new file mode 100644
index 0000000000..a41419ac31
--- /dev/null
+++ b/examples/pubsub/README.md
@@ -0,0 +1,7 @@
+# go-libp2p-pubsub examples
+
+This directory contains example projects that use [go-libp2p-pubsub](https://github.com/libp2p/go-libp2p-pubsub),
+the Go implementation of libp2p's [Publish / Subscribe system](https://docs.libp2p.io/concepts/publish-subscribe).
+
+- The [chat room example](./chat) covers the basics of using the PubSub API to build a peer-to-peer chat application.
+- The [pubsub rendezvous example](./basic-chat-with-rendezvous) allows multiple peers to chat among each other using go-libp2p-pubsub using rendezvous names.
diff --git a/examples/pubsub/basic-chat-with-rendezvous/.gitignore b/examples/pubsub/basic-chat-with-rendezvous/.gitignore
new file mode 100644
index 0000000000..76cadc92a3
--- /dev/null
+++ b/examples/pubsub/basic-chat-with-rendezvous/.gitignore
@@ -0,0 +1 @@
+chat
diff --git a/examples/pubsub/basic-chat-with-rendezvous/README.md b/examples/pubsub/basic-chat-with-rendezvous/README.md
new file mode 100644
index 0000000000..d8008fef47
--- /dev/null
+++ b/examples/pubsub/basic-chat-with-rendezvous/README.md
@@ -0,0 +1,36 @@
+# go-libp2p-pubsub chat with rendezvous example
+
+This example project allows multiple peers to chat among each other using go-libp2p-pubsub.
+
+Peers are discovered using a DHT, so no prior information (other than the rendezvous name) is required for each peer.
+
+## Running
+
+Clone this repo, then `cd` into the `examples/pubsub/basic-chat-with-rendezvous` directory:
+
+```shell
+git clone https://github.com/libp2p/go-libp2p
+cd go-libp2p/examples/pubsub/basic-chat-with-rendezvous
+```
+
+Now you can either run with `go run`, or build and run the binary:
+
+```shell
+go run .
+
+# or, build and run separately
+go build .
+./chat
+```
+
+To change the topic name, use the `-topicName` flag:
+
+```shell
+go run . -topicName=adifferenttopic
+```
+
+Try opening several terminals, each running the app. When you type a message and hit enter in one, it
+should appear in all others that are connected to the same topic.
+
+To quit, hit `Ctrl-C`.
+
diff --git a/examples/pubsub/basic-chat-with-rendezvous/go.mod b/examples/pubsub/basic-chat-with-rendezvous/go.mod
new file mode 100644
index 0000000000..dd82c6f526
--- /dev/null
+++ b/examples/pubsub/basic-chat-with-rendezvous/go.mod
@@ -0,0 +1,114 @@
+module github.com/libp2p/go-libp2p/examples/pubsub/chat
+
+go 1.24
+
+require (
+ github.com/libp2p/go-libp2p v0.33.0
+ github.com/libp2p/go-libp2p-kad-dht v0.25.1
+ github.com/libp2p/go-libp2p-pubsub v0.10.0
+)
+
+require (
+ github.com/benbjohnson/clock v1.3.5 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/containerd/cgroups v1.1.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/elastic/gosigar v0.14.2 // indirect
+ github.com/flynn/noise v1.1.0 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/go-logr/logr v1.3.0 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/google/gopacket v1.1.19 // indirect
+ github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
+ github.com/google/uuid v1.4.0 // indirect
+ github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
+ github.com/huin/goupnp v1.3.0 // indirect
+ github.com/ipfs/boxo v0.10.0 // indirect
+ github.com/ipfs/go-cid v0.4.1 // indirect
+ github.com/ipfs/go-datastore v0.6.0 // indirect
+ github.com/ipfs/go-log v1.0.5 // indirect
+ github.com/ipfs/go-log/v2 v2.5.1 // indirect
+ github.com/ipld/go-ipld-prime v0.20.0 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2 // indirect
+ github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
+ github.com/jbenet/goprocess v0.1.4 // indirect
+ github.com/klauspost/compress v1.17.6 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/koron/go-ssdp v0.0.4 // indirect
+ github.com/libp2p/go-buffer-pool v0.1.0 // indirect
+ github.com/libp2p/go-cidranger v1.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.1.0 // indirect
+ github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
+ github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect
+ github.com/libp2p/go-libp2p-record v0.2.0 // indirect
+ github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect
+ github.com/libp2p/go-msgio v0.3.0 // indirect
+ github.com/libp2p/go-nat v0.2.0 // indirect
+ github.com/libp2p/go-netroute v0.2.1 // indirect
+ github.com/libp2p/go-reuseport v0.4.0 // indirect
+ github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
+ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/miekg/dns v1.1.58 // indirect
+ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
+ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/mr-tron/base58 v1.2.0 // indirect
+ github.com/multiformats/go-base32 v0.1.0 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
+ github.com/multiformats/go-multiaddr v0.12.2 // indirect
+ github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
+ github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.0 // indirect
+ github.com/multiformats/go-multihash v0.2.3 // indirect
+ github.com/multiformats/go-multistream v0.5.0 // indirect
+ github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/onsi/ginkgo/v2 v2.15.0 // indirect
+ github.com/opencontainers/runtime-spec v1.2.0 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/polydawn/refmt v0.89.0 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.6.0 // indirect
+ github.com/prometheus/common v0.47.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/quic-go/qpack v0.4.0 // indirect
+ github.com/quic-go/quic-go v0.41.0 // indirect
+ github.com/quic-go/webtransport-go v0.6.0 // indirect
+ github.com/raulk/go-watchdog v1.3.0 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
+ go.opencensus.io v0.24.0 // indirect
+ go.opentelemetry.io/otel v1.16.0 // indirect
+ go.opentelemetry.io/otel/metric v1.16.0 // indirect
+ go.opentelemetry.io/otel/trace v1.16.0 // indirect
+ go.uber.org/dig v1.17.1 // indirect
+ go.uber.org/fx v1.20.1 // indirect
+ go.uber.org/mock v0.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ golang.org/x/crypto v0.19.0 // indirect
+ golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
+ golang.org/x/mod v0.15.0 // indirect
+ golang.org/x/net v0.21.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.17.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.18.0 // indirect
+ gonum.org/v1/gonum v0.13.0 // indirect
+ google.golang.org/protobuf v1.32.0 // indirect
+ lukechampine.com/blake3 v1.2.1 // indirect
+)
diff --git a/examples/pubsub/basic-chat-with-rendezvous/go.sum b/examples/pubsub/basic-chat-with-rendezvous/go.sum
new file mode 100644
index 0000000000..826d9e2dd8
--- /dev/null
+++ b/examples/pubsub/basic-chat-with-rendezvous/go.sum
@@ -0,0 +1,583 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
+github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
+github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
+github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
+github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
+github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
+github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
+github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
+github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
+github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/ipfs/boxo v0.10.0 h1:tdDAxq8jrsbRkYoF+5Rcqyeb91hgWe2hp7iLu7ORZLY=
+github.com/ipfs/boxo v0.10.0/go.mod h1:Fg+BnfxZ0RPzR0nOodzdIq3A7KgoWAOWsEIImrIQdBM=
+github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
+github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
+github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
+github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
+github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
+github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
+github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8=
+github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ=
+github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8=
+github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo=
+github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g=
+github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
+github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
+github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g=
+github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA=
+github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
+github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
+github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
+github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
+github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
+github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
+github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
+github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
+github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
+github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic=
+github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
+github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
+github.com/libp2p/go-libp2p v0.33.0 h1:yTPSr8sJRbfeEYXyeN8VPVSlTlFjtMUwGDRniwaf/xQ=
+github.com/libp2p/go-libp2p v0.33.0/go.mod h1:RIJFRQVUBKy82dnW7J5f1homqqv6NcsDJAl3e7CRGfE=
+github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
+github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
+github.com/libp2p/go-libp2p-kad-dht v0.25.1 h1:ofFNrf6MMEy4vi3R1VbJ7LOcTn3Csh0cDcaWHTxtWNA=
+github.com/libp2p/go-libp2p-kad-dht v0.25.1/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo=
+github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0=
+github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0=
+github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA=
+github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw=
+github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0=
+github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk=
+github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0=
+github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
+github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
+github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
+github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
+github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
+github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
+github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
+github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
+github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24=
+github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M=
+github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
+github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
+github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
+github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
+github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
+github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
+github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
+github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
+github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
+github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
+github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
+github.com/quic-go/quic-go v0.41.0 h1:aD8MmHfgqTURWNJy48IYFg2OnxwHT3JL7ahGs73lb4k=
+github.com/quic-go/quic-go v0.41.0/go.mod h1:qCkNjqczPEvgsOnxZ0eCD14lv+B2LHlFAB++CNOh9hA=
+github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
+github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
+github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
+github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
+github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
+github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
+github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
+github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
+github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
+go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
+go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
+go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
+go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
+go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
+go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk=
+go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
+golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
+gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
+lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/examples/pubsub/basic-chat-with-rendezvous/main.go b/examples/pubsub/basic-chat-with-rendezvous/main.go
new file mode 100644
index 0000000000..46d8ca093e
--- /dev/null
+++ b/examples/pubsub/basic-chat-with-rendezvous/main.go
@@ -0,0 +1,129 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "sync"
+
+ "github.com/libp2p/go-libp2p"
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ drouting "github.com/libp2p/go-libp2p/p2p/discovery/routing"
+ dutil "github.com/libp2p/go-libp2p/p2p/discovery/util"
+)
+
+var (
+ topicNameFlag = flag.String("topicName", "applesauce", "name of topic to join")
+)
+
+func main() {
+ flag.Parse()
+ ctx := context.Background()
+
+ h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0"))
+ if err != nil {
+ panic(err)
+ }
+ go discoverPeers(ctx, h)
+
+ ps, err := pubsub.NewGossipSub(ctx, h)
+ if err != nil {
+ panic(err)
+ }
+ topic, err := ps.Join(*topicNameFlag)
+ if err != nil {
+ panic(err)
+ }
+ go streamConsoleTo(ctx, topic)
+
+ sub, err := topic.Subscribe()
+ if err != nil {
+ panic(err)
+ }
+ printMessagesFrom(ctx, sub)
+}
+
+func initDHT(ctx context.Context, h host.Host) *dht.IpfsDHT {
+ // Start a DHT, for use in peer discovery. We can't just make a new DHT
+ // client because we want each peer to maintain its own local copy of the
+ // DHT, so that the bootstrapping node of the DHT can go down without
+ // inhibiting future peer discovery.
+ kademliaDHT, err := dht.New(ctx, h)
+ if err != nil {
+ panic(err)
+ }
+ if err = kademliaDHT.Bootstrap(ctx); err != nil {
+ panic(err)
+ }
+ var wg sync.WaitGroup
+ for _, peerAddr := range dht.DefaultBootstrapPeers {
+ peerinfo, _ := peer.AddrInfoFromP2pAddr(peerAddr)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ if err := h.Connect(ctx, *peerinfo); err != nil {
+ fmt.Println("Bootstrap warning:", err)
+ }
+ }()
+ }
+ wg.Wait()
+
+ return kademliaDHT
+}
+
+func discoverPeers(ctx context.Context, h host.Host) {
+ kademliaDHT := initDHT(ctx, h)
+ routingDiscovery := drouting.NewRoutingDiscovery(kademliaDHT)
+ dutil.Advertise(ctx, routingDiscovery, *topicNameFlag)
+
+ // Look for others who have announced and attempt to connect to them
+ anyConnected := false
+ for !anyConnected {
+ fmt.Println("Searching for peers...")
+ peerChan, err := routingDiscovery.FindPeers(ctx, *topicNameFlag)
+ if err != nil {
+ panic(err)
+ }
+ for peer := range peerChan {
+ if peer.ID == h.ID() {
+ continue // No self connection
+ }
+ err := h.Connect(ctx, peer)
+ if err != nil {
+ fmt.Printf("Failed connecting to %s, error: %s\n", peer.ID, err)
+ } else {
+ fmt.Println("Connected to:", peer.ID)
+ anyConnected = true
+ }
+ }
+ }
+ fmt.Println("Peer discovery complete")
+}
+
+func streamConsoleTo(ctx context.Context, topic *pubsub.Topic) {
+ reader := bufio.NewReader(os.Stdin)
+ for {
+ s, err := reader.ReadString('\n')
+ if err != nil {
+ panic(err)
+ }
+ if err := topic.Publish(ctx, []byte(s)); err != nil {
+ fmt.Println("### Publish error:", err)
+ }
+ }
+}
+
+func printMessagesFrom(ctx context.Context, sub *pubsub.Subscription) {
+ for {
+ m, err := sub.Next(ctx)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println(m.ReceivedFrom, ": ", string(m.Message.Data))
+ }
+}
diff --git a/examples/pubsub/chat/.gitignore b/examples/pubsub/chat/.gitignore
new file mode 100644
index 0000000000..76cadc92a3
--- /dev/null
+++ b/examples/pubsub/chat/.gitignore
@@ -0,0 +1 @@
+chat
diff --git a/examples/pubsub/chat/README.md b/examples/pubsub/chat/README.md
new file mode 100644
index 0000000000..a37af70543
--- /dev/null
+++ b/examples/pubsub/chat/README.md
@@ -0,0 +1,220 @@
+# go-libp2p-pubsub chat example
+
+This example project builds a chat room application using go-libp2p-pubsub. The app runs in the terminal,
+and uses a text UI to show messages from other peers:
+
+
+
+The goal of this example is to demonstrate the basic usage of the `PubSub` API, without getting into
+the details of configuration.
+
+## Running
+
+Clone this repo, then `cd` into the `examples/pubsub/chat` directory:
+
+```shell
+git clone https://github.com/libp2p/go-libp2p
+cd go-libp2p/examples/pubsub/chat
+```
+
+Now you can either run with `go run`, or build and run the binary:
+
+```shell
+go run .
+
+# or, build and run separately
+go build .
+./chat
+```
+
+To set a nickname, use the `-nick` flag:
+
+```shell
+go run . -nick=zoidberg
+```
+
+You can join a specific chat room with the `-room` flag:
+
+```shell
+go run . -room=planet-express
+```
+
+It's usually more fun to chat with others, so open a new terminal and run the app again.
+If you set a custom chat room name with the `-room` flag, make sure you use the same one
+for both apps. Once the new instance starts, the two chat apps should discover each other
+automatically using mDNS, and typing a message into one app will send it to any others that are open.
+
+To quit, hit `Ctrl-C`, or type `/quit` into the input field.
+
+## Code Overview
+
+In [`main.go`](./main.go), we create a new libp2p `Host` and then create a new `PubSub` service
+using the GossipSub router:
+
+```go
+func main() {
+ // (omitted) parse flags, etc...
+
+ // create a new libp2p Host that listens on a random TCP port
+ h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0"))
+ if err != nil {
+ panic(err)
+ }
+
+ // create a new PubSub service using the GossipSub router
+ ps, err := pubsub.NewGossipSub(ctx, h)
+ if err != nil {
+ panic(err)
+ }
+
+ // (omitted) setup mDNS discovery...
+
+}
+```
+
+We configure the host to use local mDNS discovery, so that we can find other peers to chat with
+on the local network. We also parse a few command line flags, so we can set a friendly nickname,
+or choose a chat room by name.
+
+Once we have a `Host` with an attached `PubSub` service, we join a `ChatRoom`:
+
+```go
+ // still in the main func
+ cr, err := JoinChatRoom(ctx, ps, h.ID(), nick, room)
+ if err != nil {
+ panic(err)
+ }
+```
+
+`ChatRoom` is a custom struct defined in [`chatroom.go`](./chatroom.go):
+
+```go
+// ChatRoom represents a subscription to a single PubSub topic. Messages
+// can be published to the topic with ChatRoom.Publish, and received
+// messages are pushed to the Messages channel.
+type ChatRoom struct {
+ // Messages is a channel of messages received from other peers in the chat room
+ Messages chan *ChatMessage
+
+ ctx context.Context
+ ps *pubsub.PubSub
+ topic *pubsub.Topic
+ sub *pubsub.Subscription
+
+ roomName string
+ self peer.ID
+ nick string
+}
+```
+
+A `ChatRoom` subscribes to a PubSub `Topic`, and reads messages from the `Subscription`. We're sending our messages
+wrapped inside of a `ChatMessage` struct:
+
+```go
+type ChatMessage struct {
+ Message string
+ SenderID string
+ SenderNick string
+}
+```
+
+This lets us attach friendly nicknames to the messages for display. A real app might want to make sure that
+nicks are unique, but we just let anyone claim whatever nick they want and send it along with their messages.
+
+The `ChatMessage`s are encoded to JSON and published to the PubSub topic, in the `Data` field of a `pubsub.Message`.
+We could have used any encoding, as long as everyone in the topic agrees on the format, but JSON is simple and good
+enough for our purposes.
+
+To send messages, we have a `Publish` method, which wraps messages in `ChatMessage` structs, encodes them, and publishes
+to the `pubsub.Topic`:
+
+```go
+func (cr *ChatRoom) Publish(message string) error {
+ m := ChatMessage{
+ Message: message,
+ SenderID: cr.self.String(),
+ SenderNick: cr.nick,
+ }
+ msgBytes, err := json.Marshal(m)
+ if err != nil {
+ return err
+ }
+ return cr.topic.Publish(cr.ctx, msgBytes)
+}
+```
+
+In the background, the `ChatRoom` runs a `readLoop` goroutine, which reads messages from the `pubsub.Subscription`,
+decodes the `ChatMessage` JSON, and sends the `ChatMessage`s on a channel:
+
+```go
+func (cr *ChatRoom) readLoop() {
+ for {
+ msg, err := cr.sub.Next(cr.ctx)
+ if err != nil {
+ close(cr.Messages)
+ return
+ }
+ // only forward messages delivered by others
+ if msg.ReceivedFrom == cr.self {
+ continue
+ }
+ cm := new(ChatMessage)
+ err = json.Unmarshal(msg.Data, cm)
+ if err != nil {
+ continue
+ }
+ // send valid messages onto the Messages channel
+ cr.Messages <- cm
+ }
+}
+```
+
+There's also a `ListPeers` method, which just wraps the method of the same name in the `PubSub` service:
+
+```go
+func (cr *ChatRoom) ListPeers() []peer.ID {
+ return cr.ps.ListPeers(topicName(cr.roomName))
+}
+```
+
+That's pretty much it for the `ChatRoom`!
+
+Back in `main.go`, once we've created our `ChatRoom`, we pass it
+to `NewChatUI`, which constructs a three panel text UI for entering and viewing chat messages, because UIs
+are fun.
+
+The `ChatUI` is defined in [`ui.go`](./ui.go), and the interesting bit is in the `handleEvents` event loop
+method:
+
+```go
+func (ui *ChatUI) handleEvents() {
+ peerRefreshTicker := time.NewTicker(time.Second)
+ defer peerRefreshTicker.Stop()
+
+ for {
+ select {
+ case input := <-ui.inputCh:
+ // when the user types in a line, publish it to the chat room and print to the message window
+ err := ui.cr.Publish(input)
+ if err != nil {
+ printErr("publish error: %s", err)
+ }
+ ui.displaySelfMessage(input)
+
+ case m := <-ui.cr.Messages:
+ // when we receive a message from the chat room, print it to the message window
+ ui.displayChatMessage(m)
+
+ case <-peerRefreshTicker.C:
+ // refresh the list of peers in the chat room periodically
+ ui.refreshPeers()
+
+ case <-ui.cr.ctx.Done():
+ return
+
+ case <-ui.doneCh:
+ return
+ }
+ }
+}
+```
diff --git a/examples/pubsub/chat/chat-example.gif b/examples/pubsub/chat/chat-example.gif
new file mode 100644
index 0000000000..4a6f56b049
Binary files /dev/null and b/examples/pubsub/chat/chat-example.gif differ
diff --git a/examples/pubsub/chat/chatroom.go b/examples/pubsub/chat/chatroom.go
new file mode 100644
index 0000000000..dc323d5018
--- /dev/null
+++ b/examples/pubsub/chat/chatroom.go
@@ -0,0 +1,112 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+)
+
+// ChatRoomBufSize is the number of incoming messages to buffer for each topic.
+const ChatRoomBufSize = 128
+
+// ChatRoom represents a subscription to a single PubSub topic. Messages
+// can be published to the topic with ChatRoom.Publish, and received
+// messages are pushed to the Messages channel.
+type ChatRoom struct {
+ // Messages is a channel of messages received from other peers in the chat room
+ Messages chan *ChatMessage
+
+ ctx context.Context
+ ps *pubsub.PubSub
+ topic *pubsub.Topic
+ sub *pubsub.Subscription
+
+ roomName string
+ self peer.ID
+ nick string
+}
+
+// ChatMessage gets converted to/from JSON and sent in the body of pubsub messages.
+type ChatMessage struct {
+ Message string
+ SenderID string
+ SenderNick string
+}
+
+// JoinChatRoom tries to subscribe to the PubSub topic for the room name, returning
+// a ChatRoom on success.
+func JoinChatRoom(ctx context.Context, ps *pubsub.PubSub, selfID peer.ID, nickname string, roomName string) (*ChatRoom, error) {
+ // join the pubsub topic
+ topic, err := ps.Join(topicName(roomName))
+ if err != nil {
+ return nil, err
+ }
+
+ // and subscribe to it
+ sub, err := topic.Subscribe()
+ if err != nil {
+ return nil, err
+ }
+
+ cr := &ChatRoom{
+ ctx: ctx,
+ ps: ps,
+ topic: topic,
+ sub: sub,
+ self: selfID,
+ nick: nickname,
+ roomName: roomName,
+ Messages: make(chan *ChatMessage, ChatRoomBufSize),
+ }
+
+ // start reading messages from the subscription in a loop
+ go cr.readLoop()
+ return cr, nil
+}
+
+// Publish sends a message to the pubsub topic.
+func (cr *ChatRoom) Publish(message string) error {
+ m := ChatMessage{
+ Message: message,
+ SenderID: cr.self.String(),
+ SenderNick: cr.nick,
+ }
+ msgBytes, err := json.Marshal(m)
+ if err != nil {
+ return err
+ }
+ return cr.topic.Publish(cr.ctx, msgBytes)
+}
+
+func (cr *ChatRoom) ListPeers() []peer.ID {
+ return cr.ps.ListPeers(topicName(cr.roomName))
+}
+
+// readLoop pulls messages from the pubsub topic and pushes them onto the Messages channel.
+func (cr *ChatRoom) readLoop() {
+ for {
+ msg, err := cr.sub.Next(cr.ctx)
+ if err != nil {
+ close(cr.Messages)
+ return
+ }
+ // only forward messages delivered by others
+ if msg.ReceivedFrom == cr.self {
+ continue
+ }
+ cm := new(ChatMessage)
+ err = json.Unmarshal(msg.Data, cm)
+ if err != nil {
+ continue
+ }
+ // send valid messages onto the Messages channel
+ cr.Messages <- cm
+ }
+}
+
+func topicName(roomName string) string {
+ return "chat-room:" + roomName
+}
diff --git a/examples/pubsub/chat/go.mod b/examples/pubsub/chat/go.mod
new file mode 100644
index 0000000000..e0905c483e
--- /dev/null
+++ b/examples/pubsub/chat/go.mod
@@ -0,0 +1,98 @@
+module github.com/libp2p/go-libp2p/examples/pubsub/chat
+
+go 1.24
+
+require (
+ github.com/gdamore/tcell/v2 v2.5.2
+ github.com/libp2p/go-libp2p v0.33.0
+ github.com/libp2p/go-libp2p-pubsub v0.10.0
+ github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8
+)
+
+require (
+ github.com/benbjohnson/clock v1.3.5 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/containerd/cgroups v1.1.0 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/elastic/gosigar v0.14.2 // indirect
+ github.com/flynn/noise v1.1.0 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/gdamore/encoding v1.0.0 // indirect
+ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+ github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/google/gopacket v1.1.19 // indirect
+ github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
+ github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect
+ github.com/huin/goupnp v1.3.0 // indirect
+ github.com/ipfs/go-cid v0.4.1 // indirect
+ github.com/ipfs/go-log/v2 v2.5.1 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2 // indirect
+ github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
+ github.com/klauspost/compress v1.17.6 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/koron/go-ssdp v0.0.4 // indirect
+ github.com/libp2p/go-buffer-pool v0.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.1.0 // indirect
+ github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
+ github.com/libp2p/go-msgio v0.3.0 // indirect
+ github.com/libp2p/go-nat v0.2.0 // indirect
+ github.com/libp2p/go-netroute v0.2.1 // indirect
+ github.com/libp2p/go-reuseport v0.4.0 // indirect
+ github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
+ github.com/libp2p/zeroconf/v2 v2.2.0 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
+ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.13 // indirect
+ github.com/miekg/dns v1.1.58 // indirect
+ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
+ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/mr-tron/base58 v1.2.0 // indirect
+ github.com/multiformats/go-base32 v0.1.0 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
+ github.com/multiformats/go-multiaddr v0.12.2 // indirect
+ github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect
+ github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.0 // indirect
+ github.com/multiformats/go-multihash v0.2.3 // indirect
+ github.com/multiformats/go-multistream v0.5.0 // indirect
+ github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/onsi/ginkgo/v2 v2.15.0 // indirect
+ github.com/opencontainers/runtime-spec v1.2.0 // indirect
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/prometheus/client_model v0.6.0 // indirect
+ github.com/prometheus/common v0.47.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/quic-go/qpack v0.4.0 // indirect
+ github.com/quic-go/quic-go v0.41.0 // indirect
+ github.com/quic-go/webtransport-go v0.6.0 // indirect
+ github.com/raulk/go-watchdog v1.3.0 // indirect
+ github.com/rivo/uniseg v0.2.0 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ go.uber.org/dig v1.17.1 // indirect
+ go.uber.org/fx v1.20.1 // indirect
+ go.uber.org/mock v0.4.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ golang.org/x/crypto v0.19.0 // indirect
+ golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
+ golang.org/x/mod v0.15.0 // indirect
+ golang.org/x/net v0.21.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.17.0 // indirect
+ golang.org/x/term v0.17.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.18.0 // indirect
+ google.golang.org/protobuf v1.32.0 // indirect
+ lukechampine.com/blake3 v1.2.1 // indirect
+)
diff --git a/examples/pubsub/chat/go.sum b/examples/pubsub/chat/go.sum
new file mode 100644
index 0000000000..fa89b78b86
--- /dev/null
+++ b/examples/pubsub/chat/go.sum
@@ -0,0 +1,478 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
+github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
+github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
+github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
+github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
+github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
+github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
+github.com/gdamore/tcell/v2 v2.4.1-0.20210905002822-f057f0a857a1/go.mod h1:Az6Jt+M5idSED2YPGtwnfJV0kXohgdCBPmHGSYc1r04=
+github.com/gdamore/tcell/v2 v2.5.2 h1:tKzG29kO9p2V++3oBY2W9zUjYu7IK1MENFeY/BzJSVY=
+github.com/gdamore/tcell/v2 v2.5.2/go.mod h1:wSkrPaXoiIWZqW/g7Px4xc79di6FTcpB8tvaKJ6uGBo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
+github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
+github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
+github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
+github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
+github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
+github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
+github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
+github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
+github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
+github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
+github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
+github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
+github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
+github.com/libp2p/go-libp2p v0.33.0 h1:yTPSr8sJRbfeEYXyeN8VPVSlTlFjtMUwGDRniwaf/xQ=
+github.com/libp2p/go-libp2p v0.33.0/go.mod h1:RIJFRQVUBKy82dnW7J5f1homqqv6NcsDJAl3e7CRGfE=
+github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
+github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
+github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA=
+github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
+github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
+github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU=
+github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
+github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
+github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
+github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
+github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
+github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
+github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
+github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4=
+github.com/multiformats/go-multiaddr v0.12.2 h1:9G9sTY/wCYajKa9lyfWPmpZAwe6oV+Wb1zcmMS1HG24=
+github.com/multiformats/go-multiaddr v0.12.2/go.mod h1:GKyaTYjZRdcUhyOetrxTk9z0cW+jA/YrnqTOvKgi44M=
+github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A=
+github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk=
+github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
+github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
+github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
+github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE=
+github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA=
+github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY=
+github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
+github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
+github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
+github.com/quic-go/quic-go v0.41.0 h1:aD8MmHfgqTURWNJy48IYFg2OnxwHT3JL7ahGs73lb4k=
+github.com/quic-go/quic-go v0.41.0/go.mod h1:qCkNjqczPEvgsOnxZ0eCD14lv+B2LHlFAB++CNOh9hA=
+github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY=
+github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc=
+github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
+github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
+github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8 h1:xe+mmCnDN82KhC010l3NfYlA8ZbOuzbXAzSYBa6wbMc=
+github.com/rivo/tview v0.0.0-20220307222120-9994674d60a8/go.mod h1:WIfMkQNY+oq/mWwtsjOYHIZBuwthioY2srOmljJkTnk=
+github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc=
+go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk=
+go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg=
+go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220318055525-2edf467146b5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
+golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
+lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/examples/pubsub/chat/main.go b/examples/pubsub/chat/main.go
new file mode 100644
index 0000000000..593e41e245
--- /dev/null
+++ b/examples/pubsub/chat/main.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ pubsub "github.com/libp2p/go-libp2p-pubsub"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/discovery/mdns"
+)
+
+// DiscoveryInterval is how often we re-publish our mDNS records.
+const DiscoveryInterval = time.Hour
+
+// DiscoveryServiceTag is used in our mDNS advertisements to discover other chat peers.
+const DiscoveryServiceTag = "pubsub-chat-example"
+
+func main() {
+ // parse some flags to set our nickname and the room to join
+ nickFlag := flag.String("nick", "", "nickname to use in chat. will be generated if empty")
+ roomFlag := flag.String("room", "awesome-chat-room", "name of chat room to join")
+ flag.Parse()
+
+ ctx := context.Background()
+
+ // create a new libp2p Host that listens on a random TCP port
+ h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0"))
+ if err != nil {
+ panic(err)
+ }
+
+ // create a new PubSub service using the GossipSub router
+ ps, err := pubsub.NewGossipSub(ctx, h)
+ if err != nil {
+ panic(err)
+ }
+
+ // setup local mDNS discovery
+ if err := setupDiscovery(h); err != nil {
+ panic(err)
+ }
+
+ // use the nickname from the cli flag, or a default if blank
+ nick := *nickFlag
+ if len(nick) == 0 {
+ nick = defaultNick(h.ID())
+ }
+
+ // join the room from the cli flag, or the flag default
+ room := *roomFlag
+
+ // join the chat room
+ cr, err := JoinChatRoom(ctx, ps, h.ID(), nick, room)
+ if err != nil {
+ panic(err)
+ }
+
+ // draw the UI
+ ui := NewChatUI(cr)
+ if err = ui.Run(); err != nil {
+ printErr("error running text UI: %s", err)
+ }
+}
+
+// printErr is like fmt.Printf, but writes to stderr.
+func printErr(m string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, m, args...)
+}
+
+// defaultNick generates a nickname based on the $USER environment variable and
+// the last 8 chars of a peer ID.
+func defaultNick(p peer.ID) string {
+ return fmt.Sprintf("%s-%s", os.Getenv("USER"), shortID(p))
+}
+
+// shortID returns the last 8 chars of a base58-encoded peer id.
+func shortID(p peer.ID) string {
+ pretty := p.String()
+ return pretty[len(pretty)-8:]
+}
+
+// discoveryNotifee gets notified when we find a new peer via mDNS discovery
+type discoveryNotifee struct {
+ h host.Host
+}
+
+// HandlePeerFound connects to peers discovered via mDNS. Once they're connected,
+// the PubSub system will automatically start interacting with them if they also
+// support PubSub.
+func (n *discoveryNotifee) HandlePeerFound(pi peer.AddrInfo) {
+ fmt.Printf("discovered new peer %s\n", pi.ID)
+ err := n.h.Connect(context.Background(), pi)
+ if err != nil {
+ fmt.Printf("error connecting to peer %s: %s\n", pi.ID, err)
+ }
+}
+
+// setupDiscovery creates an mDNS discovery service and attaches it to the libp2p Host.
+// This lets us automatically discover peers on the same LAN and connect to them.
+func setupDiscovery(h host.Host) error {
+ // setup mDNS discovery to find local peers
+ s := mdns.NewMdnsService(h, DiscoveryServiceTag, &discoveryNotifee{h: h})
+ return s.Start()
+}
diff --git a/examples/pubsub/chat/ui.go b/examples/pubsub/chat/ui.go
new file mode 100644
index 0000000000..413b6d3989
--- /dev/null
+++ b/examples/pubsub/chat/ui.go
@@ -0,0 +1,185 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+// ChatUI is a Text User Interface (TUI) for a ChatRoom.
+// The Run method will draw the UI to the terminal in "fullscreen"
+// mode. You can quit with Ctrl-C, or by typing "/quit" into the
+// chat prompt.
+type ChatUI struct {
+ cr *ChatRoom
+ app *tview.Application
+ peersList *tview.TextView
+
+ msgW io.Writer
+ inputCh chan string
+ doneCh chan struct{}
+}
+
+// NewChatUI returns a new ChatUI struct that controls the text UI.
+// It won't actually do anything until you call Run().
+func NewChatUI(cr *ChatRoom) *ChatUI {
+ app := tview.NewApplication()
+
+ // make a text view to contain our chat messages
+ msgBox := tview.NewTextView()
+ msgBox.SetDynamicColors(true)
+ msgBox.SetBorder(true)
+ msgBox.SetTitle(fmt.Sprintf("Room: %s", cr.roomName))
+
+ // text views are io.Writers, but they don't automatically refresh.
+ // this sets a change handler to force the app to redraw when we get
+ // new messages to display.
+ msgBox.SetChangedFunc(func() {
+ app.Draw()
+ })
+
+ // an input field for typing messages into
+ inputCh := make(chan string, 32)
+ input := tview.NewInputField().
+ SetLabel(cr.nick + " > ").
+ SetFieldWidth(0).
+ SetFieldBackgroundColor(tcell.ColorBlack)
+
+ // the done func is called when the user hits enter, or tabs out of the field
+ input.SetDoneFunc(func(key tcell.Key) {
+ if key != tcell.KeyEnter {
+ // we don't want to do anything if they just tabbed away
+ return
+ }
+ line := input.GetText()
+ if len(line) == 0 {
+ // ignore blank lines
+ return
+ }
+
+ // bail if requested
+ if line == "/quit" {
+ app.Stop()
+ return
+ }
+
+ // send the line onto the input chan and reset the field text
+ inputCh <- line
+ input.SetText("")
+ })
+
+ // make a text view to hold the list of peers in the room, updated by ui.refreshPeers()
+ peersList := tview.NewTextView()
+ peersList.SetBorder(true)
+ peersList.SetTitle("Peers")
+ peersList.SetChangedFunc(func() { app.Draw() })
+
+ // chatPanel is a horizontal box with messages on the left and peers on the right
+ // the peers list takes 20 columns, and the messages take the remaining space
+ chatPanel := tview.NewFlex().
+ AddItem(msgBox, 0, 1, false).
+ AddItem(peersList, 20, 1, false)
+
+ // flex is a vertical box with the chatPanel on top and the input field at the bottom.
+
+ flex := tview.NewFlex().
+ SetDirection(tview.FlexRow).
+ AddItem(chatPanel, 0, 1, false).
+ AddItem(input, 1, 1, true)
+
+ app.SetRoot(flex, true)
+
+ return &ChatUI{
+ cr: cr,
+ app: app,
+ peersList: peersList,
+ msgW: msgBox,
+ inputCh: inputCh,
+ doneCh: make(chan struct{}, 1),
+ }
+}
+
+// Run starts the chat event loop in the background, then starts
+// the event loop for the text UI.
+func (ui *ChatUI) Run() error {
+ go ui.handleEvents()
+ defer ui.end()
+
+ return ui.app.Run()
+}
+
+// end signals the event loop to exit gracefully
+func (ui *ChatUI) end() {
+ ui.doneCh <- struct{}{}
+}
+
+// refreshPeers pulls the list of peers currently in the chat room and
+// displays the last 8 chars of their peer id in the Peers panel in the ui.
+func (ui *ChatUI) refreshPeers() {
+ peers := ui.cr.ListPeers()
+
+ // clear is thread-safe
+ ui.peersList.Clear()
+
+ for _, p := range peers {
+ fmt.Fprintln(ui.peersList, shortID(p))
+ }
+
+ ui.app.Draw()
+}
+
+// displayChatMessage writes a ChatMessage from the room to the message window,
+// with the sender's nick highlighted in green.
+func (ui *ChatUI) displayChatMessage(cm *ChatMessage) {
+ prompt := withColor("green", fmt.Sprintf("<%s>:", cm.SenderNick))
+ fmt.Fprintf(ui.msgW, "%s %s\n", prompt, cm.Message)
+}
+
+// displaySelfMessage writes a message from ourselves to the message window,
+// with our nick highlighted in yellow.
+func (ui *ChatUI) displaySelfMessage(msg string) {
+ prompt := withColor("yellow", fmt.Sprintf("<%s>:", ui.cr.nick))
+ fmt.Fprintf(ui.msgW, "%s %s\n", prompt, msg)
+}
+
+// handleEvents runs an event loop that sends user input to the chat room
+// and displays messages received from the chat room. It also periodically
+// refreshes the list of peers in the UI.
+func (ui *ChatUI) handleEvents() {
+ peerRefreshTicker := time.NewTicker(time.Second)
+ defer peerRefreshTicker.Stop()
+
+ for {
+ select {
+ case input := <-ui.inputCh:
+ // when the user types in a line, publish it to the chat room and print to the message window
+ err := ui.cr.Publish(input)
+ if err != nil {
+ printErr("publish error: %s", err)
+ }
+ ui.displaySelfMessage(input)
+
+ case m := <-ui.cr.Messages:
+ // when we receive a message from the chat room, print it to the message window
+ ui.displayChatMessage(m)
+
+ case <-peerRefreshTicker.C:
+ // refresh the list of peers in the chat room periodically
+ ui.refreshPeers()
+
+ case <-ui.cr.ctx.Done():
+ return
+
+ case <-ui.doneCh:
+ return
+ }
+ }
+}
+
+// withColor wraps a string with color tags for display in the messages text box.
+func withColor(color, msg string) string {
+ return fmt.Sprintf("[%s]%s[-]", color, msg)
+}
diff --git a/examples/relay/.gitignore b/examples/relay/.gitignore
new file mode 100644
index 0000000000..32e541af93
--- /dev/null
+++ b/examples/relay/.gitignore
@@ -0,0 +1 @@
+relay
diff --git a/examples/relay/README.md b/examples/relay/README.md
new file mode 100644
index 0000000000..a4bd5c5044
--- /dev/null
+++ b/examples/relay/README.md
@@ -0,0 +1,15 @@
+# Relay-based P2P Communication Example
+
+## Overview
+This project demonstrates the setup of a relay-based peer-to-peer communication using the libp2p library in Go. It features creating unreachable libp2p hosts and facilitating their communication through a relay node.
+
+## Features
+- Creation of two "unreachable" libp2p hosts.
+- Setup of a relay node to enable communication between these hosts.
+
+## Usage
+
+Run the program
+ ```bash
+ go run .
+ ```
\ No newline at end of file
diff --git a/examples/relay/main.go b/examples/relay/main.go
new file mode 100644
index 0000000000..a92ab96812
--- /dev/null
+++ b/examples/relay/main.go
@@ -0,0 +1,160 @@
+package main
+
+import (
+ "context"
+ "log"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func main() {
+ run()
+}
+
+func run() {
+ // Create two "unreachable" libp2p hosts that want to communicate.
+ // We are configuring them with no listen addresses to mimic hosts
+ // that cannot be directly dialed due to problematic firewall/NAT
+ // configurations.
+ unreachable1, err := libp2p.New(
+ libp2p.NoListenAddrs,
+ // Usually EnableRelay() is not required as it is enabled by default
+ // but NoListenAddrs overrides this, so we're adding it in explicitly again.
+ libp2p.EnableRelay(),
+ )
+ if err != nil {
+ log.Printf("Failed to create unreachable1: %v", err)
+ return
+ }
+
+ unreachable2, err := libp2p.New(
+ libp2p.NoListenAddrs,
+ libp2p.EnableRelay(),
+ )
+ if err != nil {
+ log.Printf("Failed to create unreachable2: %v", err)
+ return
+ }
+
+ log.Println("First let's attempt to directly connect")
+
+ // Attempt to connect the unreachable hosts directly
+ unreachable2info := peer.AddrInfo{
+ ID: unreachable2.ID(),
+ Addrs: unreachable2.Addrs(),
+ }
+
+ err = unreachable1.Connect(context.Background(), unreachable2info)
+ if err == nil {
+ log.Printf("This actually should have failed.")
+ return
+ }
+
+ log.Println("As suspected we cannot directly dial between the unreachable hosts")
+
+ // Create a host to act as a middleman to relay messages on our behalf
+ relay1, err := libp2p.New()
+ if err != nil {
+ log.Printf("Failed to create relay1: %v", err)
+ return
+ }
+
+ // Configure the host to offer the circuit relay service.
+ // Any host that is directly dialable in the network (or on the internet)
+ // can offer a circuit relay service, this isn't just the job of
+ // "dedicated" relay services.
+ // In circuit relay v2 (which we're using here!) it is rate limited so that
+ // any node can offer this service safely
+ _, err = relay.New(relay1)
+ if err != nil {
+ log.Printf("Failed to instantiate the relay: %v", err)
+ return
+ }
+
+ relay1info := peer.AddrInfo{
+ ID: relay1.ID(),
+ Addrs: relay1.Addrs(),
+ }
+
+ // Connect both unreachable1 and unreachable2 to relay1
+ if err := unreachable1.Connect(context.Background(), relay1info); err != nil {
+ log.Printf("Failed to connect unreachable1 and relay1: %v", err)
+ return
+ }
+
+ if err := unreachable2.Connect(context.Background(), relay1info); err != nil {
+ log.Printf("Failed to connect unreachable2 and relay1: %v", err)
+ return
+ }
+
+ // Now, to test the communication, let's set up a protocol handler on unreachable2
+ unreachable2.SetStreamHandler("/customprotocol", func(s network.Stream) {
+ log.Println("Awesome! We're now communicating via the relay!")
+
+ // End the example
+ s.Close()
+ })
+
+ // Hosts that want to have messages relayed on their behalf need to reserve a slot
+ // with the circuit relay service host
+ // As we will open a stream to unreachable2, unreachable2 needs to make the
+ // reservation
+ _, err = client.Reserve(context.Background(), unreachable2, relay1info)
+ if err != nil {
+ log.Printf("unreachable2 failed to receive a relay reservation from relay1. %v", err)
+ return
+ }
+
+ // Now create a new address for unreachable2 that specifies to communicate via
+ // relay1 using a circuit relay
+ relayaddr, err := ma.NewMultiaddr("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + unreachable2.ID().String())
+ if err != nil {
+ log.Println(err)
+ return
+ }
+
+ // Since we just tried and failed to dial, the dialer system will, by default
+ // prevent us from redialing again so quickly. Since we know what we're doing, we
+ // can use this ugly hack (it's on our TODO list to make it a little cleaner)
+ // to tell the dialer "no, its okay, let's try this again"
+ unreachable1.Network().(*swarm.Swarm).Backoff().Clear(unreachable2.ID())
+
+ log.Println("Now let's attempt to connect the hosts via the relay node")
+
+ // Open a connection to the previously unreachable host via the relay address
+ unreachable2relayinfo := peer.AddrInfo{
+ ID: unreachable2.ID(),
+ Addrs: []ma.Multiaddr{relayaddr},
+ }
+ if err := unreachable1.Connect(context.Background(), unreachable2relayinfo); err != nil {
+ log.Printf("Unexpected error here. Failed to connect unreachable1 and unreachable2: %v", err)
+ return
+ }
+
+ log.Println("Yep, that worked!")
+
+ // Woohoo! we're connected!
+ // Let's start talking!
+
+ // Because we don't have a direct connection to the destination node - we have a relayed connection -
+ // the connection is marked as transient. Since the relay limits the amount of data that can be
+ // exchanged over the relayed connection, the application needs to explicitly opt-in into using a
+ // relayed connection. In general, we should only do this if we have low bandwidth requirements,
+ // and we're happy for the connection to be killed when the relayed connection is replaced with a
+ // direct (holepunched) connection.
+ s, err := unreachable1.NewStream(network.WithAllowLimitedConn(context.Background(), "customprotocol"), unreachable2.ID(), "/customprotocol")
+ if err != nil {
+ log.Println("Whoops, this should have worked...: ", err)
+ return
+ }
+
+ s.Read(make([]byte, 1)) // block until the handler closes the stream
+}
diff --git a/examples/relay/main_test.go b/examples/relay/main_test.go
new file mode 100644
index 0000000000..d42c576663
--- /dev/null
+++ b/examples/relay/main_test.go
@@ -0,0 +1,18 @@
+package main
+
+import (
+ "os"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/examples/testutils"
+)
+
+func TestMain(t *testing.T) {
+ if os.Getenv("CI") != "" {
+ t.Skip("This test is flaky on CI, see https://github.com/libp2p/go-libp2p/issues/1158.")
+ }
+ var h testutils.LogHarness
+ h.ExpectPrefix("As suspected we cannot directly dial between the unreachable hosts")
+ h.ExpectPrefix("Awesome! We're now communicating via the relay!")
+ h.Run(t, run)
+}
diff --git a/examples/routed-echo/.gitignore b/examples/routed-echo/.gitignore
new file mode 100644
index 0000000000..da2e2c8644
--- /dev/null
+++ b/examples/routed-echo/.gitignore
@@ -0,0 +1 @@
+routed-echo
diff --git a/examples/routed-echo/README.md b/examples/routed-echo/README.md
new file mode 100644
index 0000000000..961a499879
--- /dev/null
+++ b/examples/routed-echo/README.md
@@ -0,0 +1,52 @@
+# Routed Host: echo client/server
+
+This example is intended to follow up the basic host and echo examples by adding use of the ipfs distributed hash table to lookup peers.
+
+Functionally, this example works similarly to the echo example, however setup of the host includes wrapping it with a Kademila hash table, so it can find peers using only their IDs.
+
+We'll also enable NAT port mapping to illustrate the setup, although it isn't guaranteed to actually be used to make the connections. Additionally, this example uses the newer `libp2p.New` constructor.
+
+## Build
+
+From `go-libp2p/examples` base folder:
+
+```
+> cd routed-echo/
+> go build
+```
+
+## Usage
+
+
+```
+> ./routed-echo -l 10000
+2018/02/19 12:22:32 I can be reached at:
+2018/02/19 12:22:32 /ip4/127.0.0.1/tcp/10000/p2p/QmfRY4vuKpU2tApACrbmYFn9xoeNzMQhLXg7nKnyvnzHeL
+2018/02/19 12:22:32 /ip4/192.168.1.203/tcp/10000/p2p/QmfRY4vuKpU2tApACrbmYFn9xoeNzMQhLXg7nKnyvnzHeL
+2018/02/19 12:22:32 Now run "./routed-echo -l 10001 -d QmfRY4vuKpU2tApACrbmYFn9xoeNzMQhLXg7nKnyvnzHeL" on a different terminal
+2018/02/19 12:22:32 listening for connections
+```
+
+The listener libp2p host will print its randomly generated Base58 encoded ID string, which combined with the ipfs DHT, can be used to reach the host, despite lacking other connection details. By default, this example will bootstrap off your local IPFS peer (assuming one is running). If you'd rather bootstrap off the same peers go-ipfs uses, pass the `-global` flag in both terminals.
+
+Now, launch another node that talks to the listener:
+
+```
+> ./routed-echo -l 10001 -d QmfRY4vuKpU2tApACrbmYFn9xoeNzMQhLXg7nKnyvnzHeL
+```
+
+As in other examples, the new node will send the message `"Hello, world!"` to the listener, which will in turn echo it over the stream and close it. The listener logs the message, and the sender logs the response.
+
+## Details
+
+The `makeRoutedHost()` function creates a [go-libp2p routedhost](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/routed) object. `routedhost` objects wrap [go-libp2p basichost](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic) and add the ability to lookup a peers address using the ipfs distributed hash table as implemented by [go-libp2p-kad-dht](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht).
+
+In order to create the routed host, the example needs:
+
+- A [go-libp2p basichost](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic) as in other examples.
+- A [go-libp2p-kad-dht](https://godoc.org/github.com/libp2p/go-libp2p-kad-dht) which provides the ability to lookup peers by ID. Wrapping takes place via `routedHost := rhost.Wrap(basicHost, dht)`
+
+A `routedhost` can now open streams (bi-directional channel between to peers) using [NewStream](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.NewStream) and use them to send and receive data tagged with a `Protocol.ID` (a string). The host can also listen for incoming connections for a given
+`Protocol` with [`SetStreamHandle()`](https://godoc.org/github.com/libp2p/go-libp2p/p2p/host/basic#BasicHost.SetStreamHandler). The advantage of the routed host is that only the Peer ID is required to make the connection, not the underlying address details, since they are provided by the DHT.
+
+The example makes use of all of this to enable communication between a listener and a sender using protocol `/echo/1.0.0` (which could be any other thing).
diff --git a/examples/routed-echo/bootstrap.go b/examples/routed-echo/bootstrap.go
new file mode 100644
index 0000000000..b09228e878
--- /dev/null
+++ b/examples/routed-echo/bootstrap.go
@@ -0,0 +1,130 @@
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var (
+ IPFS_PEERS = convertPeers([]string{
+ "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
+ "/ip4/104.236.179.241/tcp/4001/p2p/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM",
+ "/ip4/128.199.219.111/tcp/4001/p2p/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu",
+ "/ip4/104.236.76.40/tcp/4001/p2p/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
+ "/ip4/178.62.158.247/tcp/4001/p2p/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd",
+ "/ip6/2604:a880:1:20::203:d001/tcp/4001/p2p/QmSoLPppuBtQSGwKDZT2M73ULpjvfd3aZ6ha4oFGL1KrGM",
+ "/ip6/2400:6180:0:d0::151:6001/tcp/4001/p2p/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu",
+ "/ip6/2604:a880:800:10::4a:5001/tcp/4001/p2p/QmSoLV4Bbm51jM9C4gDYZQ9Cy3U6aXMJDAbzgu2fzaDs64",
+ "/ip6/2a03:b0c0:0:1010::23:1001/tcp/4001/p2p/QmSoLer265NRgSp2LA3dPaeykiS1J6DifTC88f5uVQKNAd",
+ })
+ LOCAL_PEER_ENDPOINT = "http://localhost:5001/api/v0/id"
+)
+
+// Borrowed from ipfs code to parse the results of the command `ipfs id`
+type IdOutput struct {
+ ID string
+ PublicKey string
+ Addresses []string
+ AgentVersion string
+ ProtocolVersion string
+}
+
+// quick and dirty function to get the local ipfs daemons address for bootstrapping
+func getLocalPeerInfo() []peer.AddrInfo {
+ resp, err := http.PostForm(LOCAL_PEER_ENDPOINT, nil)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ var js IdOutput
+ err = json.Unmarshal(body, &js)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ for _, addr := range js.Addresses {
+ // For some reason, possibly NAT traversal, we need to grab the loopback ip address
+ if addr[0:8] == "/ip4/127" {
+ return convertPeers([]string{addr})
+ }
+ }
+ log.Fatalln(err)
+ return make([]peer.AddrInfo, 1) // not reachable, but keeps the compiler happy
+}
+
+func convertPeers(peers []string) []peer.AddrInfo {
+ pinfos := make([]peer.AddrInfo, len(peers))
+ for i, addr := range peers {
+ maddr := ma.StringCast(addr)
+ p, err := peer.AddrInfoFromP2pAddr(maddr)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ pinfos[i] = *p
+ }
+ return pinfos
+}
+
+// This code is borrowed from the go-ipfs bootstrap process
+func bootstrapConnect(ctx context.Context, ph host.Host, peers []peer.AddrInfo) error {
+ if len(peers) < 1 {
+ return errors.New("not enough bootstrap peers")
+ }
+
+ errs := make(chan error, len(peers))
+ var wg sync.WaitGroup
+ for _, p := range peers {
+
+ // performed asynchronously because when performed synchronously, if
+ // one `Connect` call hangs, subsequent calls are more likely to
+ // fail/abort due to an expiring context.
+ // Also, performed asynchronously for dial speed.
+
+ wg.Add(1)
+ go func(p peer.AddrInfo) {
+ defer wg.Done()
+ defer log.Println(ctx, "bootstrapDial", ph.ID(), p.ID)
+ log.Printf("%s bootstrapping to %s", ph.ID(), p.ID)
+
+ ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL)
+ if err := ph.Connect(ctx, p); err != nil {
+ log.Println(ctx, "bootstrapDialFailed", p.ID)
+ log.Printf("failed to bootstrap with %v: %s", p.ID, err)
+ errs <- err
+ return
+ }
+ log.Println(ctx, "bootstrapDialSuccess", p.ID)
+ log.Printf("bootstrapped with %v", p.ID)
+ }(p)
+ }
+ wg.Wait()
+
+ // our failure condition is when no connection attempt succeeded.
+ // So drain the errs channel, counting the results.
+ close(errs)
+ count := 0
+ var err error
+ for err = range errs {
+ if err != nil {
+ count++
+ }
+ }
+ if count == len(peers) {
+ return fmt.Errorf("failed to bootstrap. %s", err)
+ }
+ return nil
+}
diff --git a/examples/routed-echo/main.go b/examples/routed-echo/main.go
new file mode 100644
index 0000000000..342661b619
--- /dev/null
+++ b/examples/routed-echo/main.go
@@ -0,0 +1,194 @@
+package main
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ mrand "math/rand"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ds "github.com/ipfs/go-datastore"
+ dsync "github.com/ipfs/go-datastore/sync"
+ golog "github.com/ipfs/go-log/v2"
+
+ dht "github.com/libp2p/go-libp2p-kad-dht"
+ rhost "github.com/libp2p/go-libp2p/p2p/host/routed"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// makeRoutedHost creates a LibP2P host with a random peer ID listening on the
+// given multiaddress. It will bootstrap using the provided PeerInfo.
+func makeRoutedHost(listenPort int, randseed int64, bootstrapPeers []peer.AddrInfo, globalFlag string) (host.Host, error) {
+ // If the seed is zero, use real cryptographic randomness. Otherwise, use a
+ // deterministic randomness source to make generated keys stay the same
+ // across multiple runs
+ var r io.Reader
+ if randseed == 0 {
+ r = rand.Reader
+ } else {
+ r = mrand.New(mrand.NewSource(randseed))
+ }
+
+ // Generate a key pair for this host. We will use it at least
+ // to obtain a valid host ID.
+ priv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r)
+ if err != nil {
+ return nil, err
+ }
+
+ opts := []libp2p.Option{
+ libp2p.ListenAddrStrings(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", listenPort)),
+ libp2p.Identity(priv),
+ libp2p.DefaultTransports,
+ libp2p.DefaultMuxers,
+ libp2p.DefaultSecurity,
+ libp2p.NATPortMap(),
+ }
+
+ ctx := context.Background()
+
+ basicHost, err := libp2p.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Construct a datastore (needed by the DHT). This is just a simple, in-memory thread-safe datastore.
+ dstore := dsync.MutexWrap(ds.NewMapDatastore())
+
+ // Make the DHT
+ dht := dht.NewDHT(ctx, basicHost, dstore)
+
+ // Make the routed host
+ routedHost := rhost.Wrap(basicHost, dht)
+
+ // connect to the chosen ipfs nodes
+ err = bootstrapConnect(ctx, routedHost, bootstrapPeers)
+ if err != nil {
+ return nil, err
+ }
+
+ // Bootstrap the host
+ err = dht.Bootstrap(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Build host multiaddress
+ hostAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ipfs/%s", routedHost.ID()))
+
+ // Now we can build a full multiaddress to reach this host
+ // by encapsulating both addresses:
+ // addr := routedHost.Addrs()[0]
+ addrs := routedHost.Addrs()
+ log.Println("I can be reached at:")
+ for _, addr := range addrs {
+ log.Println(addr.Encapsulate(hostAddr))
+ }
+
+ log.Printf("Now run \"./routed-echo -l %d -d %s%s\" on a different terminal\n", listenPort+1, routedHost.ID(), globalFlag)
+
+ return routedHost, nil
+}
+
+func main() {
+ // LibP2P code uses golog to log messages. They log with different
+ // string IDs (i.e. "swarm"). We can control the verbosity level for
+ // all loggers with:
+ golog.SetAllLoggers(golog.LevelInfo) // Change to INFO for extra info
+
+ // Parse options from the command line
+ listenF := flag.Int("l", 0, "wait for incoming connections")
+ target := flag.String("d", "", "target peer to dial")
+ seed := flag.Int64("seed", 0, "set random seed for id generation")
+ global := flag.Bool("global", false, "use global ipfs peers for bootstrapping")
+ flag.Parse()
+
+ if *listenF == 0 {
+ log.Fatal("Please provide a port to bind on with -l")
+ }
+
+ // Make a host that listens on the given multiaddress
+ var bootstrapPeers []peer.AddrInfo
+ var globalFlag string
+ if *global {
+ log.Println("using global bootstrap")
+ bootstrapPeers = IPFS_PEERS
+ globalFlag = " -global"
+ } else {
+ log.Println("using local bootstrap")
+ bootstrapPeers = getLocalPeerInfo()
+ globalFlag = ""
+ }
+ ha, err := makeRoutedHost(*listenF, *seed, bootstrapPeers, globalFlag)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Set a stream handler on host A. /echo/1.0.0 is
+ // a user-defined protocol name.
+ ha.SetStreamHandler("/echo/1.0.0", func(s network.Stream) {
+ log.Println("Got a new stream!")
+ if err := doEcho(s); err != nil {
+ log.Println(err)
+ s.Reset()
+ } else {
+ s.Close()
+ }
+ })
+
+ if *target == "" {
+ log.Println("listening for connections")
+ select {} // hang forever
+ }
+ /**** This is where the listener code ends ****/
+
+ peerid, err := peer.Decode(*target)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // peerinfo := peer.AddrInfo{ID: peerid}
+ log.Println("opening stream")
+ // make a new stream from host B to host A
+ // it should be handled on host A by the handler we set above because
+ // we use the same /echo/1.0.0 protocol
+ s, err := ha.NewStream(context.Background(), peerid, "/echo/1.0.0")
+
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ _, err = s.Write([]byte("Hello, world!\n"))
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ out, err := io.ReadAll(s)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("read reply: %q\n", out)
+}
+
+// doEcho reads a line of data from a stream and writes it back
+func doEcho(s network.Stream) error {
+ buf := bufio.NewReader(s)
+ str, err := buf.ReadString('\n')
+ if err != nil {
+ return err
+ }
+
+ log.Printf("read: %s\n", str)
+ _, err = s.Write([]byte(str))
+ return err
+}
diff --git a/examples/testutils/logharness.go b/examples/testutils/logharness.go
new file mode 100644
index 0000000000..2f075645eb
--- /dev/null
+++ b/examples/testutils/logharness.go
@@ -0,0 +1,120 @@
+package testutils
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+ "testing"
+)
+
+// A LogHarness runs sets of assertions against the log output of a function. Assertions are grouped
+// into sequences of messages that are expected to be found in the log output. Calling one of the Expect
+// methods on the harness adds an expectation to the default sequence of messages. Additional sequences
+// can be created by calling NewSequence.
+type LogHarness struct {
+ buf bytes.Buffer
+ sequences []*Sequence
+}
+
+type Expectation interface {
+ IsMatch(line string) bool
+ String() string
+}
+
+// Run executes the function f and captures any output written using Go's standard log. Each sequence
+// of expected messages is then
+func (h *LogHarness) Run(t *testing.T, f func()) {
+ // Capture raw log output
+ fl := log.Flags()
+ log.SetFlags(0)
+ log.SetOutput(&h.buf)
+ f()
+ log.SetFlags(fl)
+ log.SetOutput(os.Stderr)
+
+ for _, seq := range h.sequences {
+ seq.Assert(t, bufio.NewScanner(bytes.NewReader(h.buf.Bytes())))
+ }
+}
+
+// Expect adds an expectation to the default sequence that the log contains a line equal to s
+func (h *LogHarness) Expect(s string) {
+ if len(h.sequences) == 0 {
+ h.sequences = append(h.sequences, &Sequence{name: ""})
+ }
+ h.sequences[0].Expect(s)
+}
+
+// ExpectPrefix adds an to the default sequence expectation that the log contains a line starting with s
+func (h *LogHarness) ExpectPrefix(s string) {
+ if len(h.sequences) == 0 {
+ h.sequences = append(h.sequences, &Sequence{name: ""})
+ }
+ h.sequences[0].ExpectPrefix(s)
+}
+
+// NewSequence creates a new sequence of expected log messages
+func (h *LogHarness) NewSequence(name string) *Sequence {
+ seq := &Sequence{name: name}
+ h.sequences = append(h.sequences, seq)
+ return seq
+}
+
+type prefix string
+
+func (p prefix) IsMatch(line string) bool {
+ return strings.HasPrefix(line, string(p))
+}
+
+func (p prefix) String() string {
+ return fmt.Sprintf("prefix %q", string(p))
+}
+
+type text string
+
+func (t text) IsMatch(line string) bool {
+ return line == string(t)
+}
+
+func (t text) String() string {
+ return fmt.Sprintf("text %q", string(t))
+}
+
+type Sequence struct {
+ name string
+ exp []Expectation
+}
+
+func (seq *Sequence) Assert(t *testing.T, s *bufio.Scanner) {
+ var tag string
+ if seq.name != "" {
+ tag = fmt.Sprintf("[%s] ", seq.name)
+ }
+ // Match raw log lines against expectations
+exploop:
+ for _, e := range seq.exp {
+ for s.Scan() {
+ if e.IsMatch(s.Text()) {
+ t.Logf("%ssaw: %s", tag, s.Text())
+ continue exploop
+ }
+ }
+ if s.Err() == nil {
+ t.Errorf("%sdid not see expected %s", tag, e.String())
+ return
+ }
+ }
+}
+
+// Expect adds an expectation that the log contains a line equal to s
+func (seq *Sequence) Expect(s string) {
+ seq.exp = append(seq.exp, text(s))
+}
+
+// ExpectPrefix adds an expectation that the log contains a line starting with s
+func (seq *Sequence) ExpectPrefix(s string) {
+ seq.exp = append(seq.exp, prefix(s))
+}
diff --git a/examples/testutils/net.go b/examples/testutils/net.go
new file mode 100644
index 0000000000..d33bd4a11d
--- /dev/null
+++ b/examples/testutils/net.go
@@ -0,0 +1,37 @@
+package testutils
+
+import (
+ "fmt"
+ "net"
+ "testing"
+)
+
+// FindFreePort attempts to find an unused tcp port
+func FindFreePort(t *testing.T, host string, maxAttempts int) (int, error) {
+ t.Helper()
+
+ if host == "" {
+ host = "localhost"
+ }
+
+ for i := 0; i < maxAttempts; i++ {
+ addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(host, "0"))
+ if err != nil {
+ t.Logf("unable to resolve tcp addr: %v", err)
+ continue
+ }
+ l, err := net.ListenTCP("tcp", addr)
+ if err != nil {
+ l.Close()
+ t.Logf("unable to listen on addr %q: %v", addr, err)
+ continue
+ }
+
+ port := l.Addr().(*net.TCPAddr).Port
+ l.Close()
+ return port, nil
+
+ }
+
+ return 0, fmt.Errorf("no free port found")
+}
diff --git a/fx_options_test.go b/fx_options_test.go
new file mode 100644
index 0000000000..48ac79b53d
--- /dev/null
+++ b/fx_options_test.go
@@ -0,0 +1,60 @@
+package libp2p
+
+import (
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/fx"
+)
+
+func TestGetPeerID(t *testing.T) {
+ var id peer.ID
+ host, err := New(
+ WithFxOption(fx.Populate(&id)),
+ )
+ require.NoError(t, err)
+ defer host.Close()
+
+ require.Equal(t, host.ID(), id)
+
+}
+
+func TestGetEventBus(t *testing.T) {
+ var eb event.Bus
+ host, err := New(
+ NoTransports,
+ WithFxOption(fx.Populate(&eb)),
+ )
+ require.NoError(t, err)
+ defer host.Close()
+
+ require.NotNil(t, eb)
+}
+
+func TestGetHost(t *testing.T) {
+ var h host.Host
+ host, err := New(
+ NoTransports,
+ WithFxOption(fx.Populate(&h)),
+ )
+ require.NoError(t, err)
+ defer host.Close()
+
+ require.NotNil(t, h)
+}
+
+func TestGetIDService(t *testing.T) {
+ var id identify.IDService
+ host, err := New(
+ NoTransports,
+ WithFxOption(fx.Populate(&id)),
+ )
+ require.NoError(t, err)
+ defer host.Close()
+
+ require.NotNil(t, id)
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000000..ecfb024612
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,110 @@
+module github.com/libp2p/go-libp2p
+
+go 1.24.6
+
+retract v0.26.1 // Tag was applied incorrectly due to a bug in the release workflow.
+
+retract v0.36.0 // Accidentally modified the tag.
+
+require (
+ github.com/benbjohnson/clock v1.3.5
+ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0
+ github.com/flynn/noise v1.1.0
+ github.com/google/gopacket v1.1.19
+ github.com/gorilla/websocket v1.5.3
+ github.com/hashicorp/golang-lru/arc/v2 v2.0.7
+ github.com/hashicorp/golang-lru/v2 v2.0.7
+ github.com/huin/goupnp v1.3.0
+ github.com/ipfs/go-cid v0.5.0
+ github.com/ipfs/go-datastore v0.8.2
+ github.com/jackpal/go-nat-pmp v1.0.2
+ github.com/jbenet/go-temp-err-catcher v0.1.0
+ github.com/klauspost/compress v1.18.0
+ github.com/koron/go-ssdp v0.0.6
+ github.com/libp2p/go-buffer-pool v0.1.0
+ github.com/libp2p/go-flow-metrics v0.2.0
+ github.com/libp2p/go-libp2p-asn-util v0.4.1
+ github.com/libp2p/go-libp2p-testing v0.12.0
+ github.com/libp2p/go-msgio v0.3.0
+ github.com/libp2p/go-netroute v0.2.2
+ github.com/libp2p/go-reuseport v0.4.0
+ github.com/libp2p/go-yamux/v5 v5.0.1
+ github.com/libp2p/zeroconf/v2 v2.2.0
+ github.com/marcopolo/simnet v0.0.1
+ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd
+ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b
+ github.com/mr-tron/base58 v1.2.0
+ github.com/multiformats/go-base32 v0.1.0
+ github.com/multiformats/go-multiaddr v0.16.0
+ github.com/multiformats/go-multiaddr-dns v0.4.1
+ github.com/multiformats/go-multiaddr-fmt v0.1.0
+ github.com/multiformats/go-multibase v0.2.0
+ github.com/multiformats/go-multicodec v0.9.1
+ github.com/multiformats/go-multihash v0.2.3
+ github.com/multiformats/go-multistream v0.6.1
+ github.com/multiformats/go-varint v0.0.7
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
+ github.com/pion/datachannel v1.5.10
+ github.com/pion/ice/v4 v4.0.10
+ github.com/pion/logging v0.2.3
+ github.com/pion/sctp v1.8.39
+ github.com/pion/stun v0.6.1
+ github.com/pion/webrtc/v4 v4.1.2
+ github.com/prometheus/client_golang v1.22.0
+ github.com/prometheus/client_model v0.6.2
+ github.com/quic-go/quic-go v0.54.0
+ github.com/quic-go/webtransport-go v0.9.0
+ github.com/stretchr/testify v1.10.0
+ go.uber.org/fx v1.24.0
+ go.uber.org/goleak v1.3.0
+ go.uber.org/mock v0.5.2
+ golang.org/x/crypto v0.39.0
+ golang.org/x/sync v0.15.0
+ golang.org/x/sys v0.33.0
+ golang.org/x/time v0.12.0
+ golang.org/x/tools v0.34.0
+ google.golang.org/protobuf v1.36.6
+)
+
+require (
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/miekg/dns v1.1.66 // indirect
+ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/pion/dtls/v2 v2.2.12 // indirect
+ github.com/pion/dtls/v3 v3.0.6 // indirect
+ github.com/pion/interceptor v0.1.40 // indirect
+ github.com/pion/mdns/v2 v2.0.7 // indirect
+ github.com/pion/randutil v0.1.0 // indirect
+ github.com/pion/rtcp v1.2.15 // indirect
+ github.com/pion/rtp v1.8.19 // indirect
+ github.com/pion/sdp/v3 v3.0.13 // indirect
+ github.com/pion/srtp/v3 v3.0.6 // indirect
+ github.com/pion/stun/v3 v3.0.0 // indirect
+ github.com/pion/transport/v2 v2.2.10 // indirect
+ github.com/pion/transport/v3 v3.0.7 // indirect
+ github.com/pion/turn/v4 v4.0.2 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/common v0.64.0 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/quic-go/qpack v0.5.1 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ github.com/wlynxg/anet v0.0.5 // indirect
+ go.uber.org/dig v1.19.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
+ golang.org/x/mod v0.25.0 // indirect
+ golang.org/x/net v0.41.0 // indirect
+ golang.org/x/text v0.26.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+ lukechampine.com/blake3 v1.4.1 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000000..41f001038d
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,452 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
+github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
+github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
+github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
+github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ=
+github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/ipfs/go-datastore v0.8.2 h1:Jy3wjqQR6sg/LhyY0NIePZC3Vux19nLtg7dx0TVqr6U=
+github.com/ipfs/go-datastore v0.8.2/go.mod h1:W+pI1NsUsz3tcsAACMtfC+IZdnQTnC/7VfPoJBQuts0=
+github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
+github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
+github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
+github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
+github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
+github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
+github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
+github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
+github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
+github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
+github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
+github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
+github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
+github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
+github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
+github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
+github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
+github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
+github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
+github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
+github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
+github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
+github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
+github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
+github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
+github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
+github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
+github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
+github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
+github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
+github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
+github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
+github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
+github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
+github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
+github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
+github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
+github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
+github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
+github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
+github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
+github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
+github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
+github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
+github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
+github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
+github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
+github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
+github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
+github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
+github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
+github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
+github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
+github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
+github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
+github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
+github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
+github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
+github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
+github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
+github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
+github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
+github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
+github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
+github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
+github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
+github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
+github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
+go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
+go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
+go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
+golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
+lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/gologshim/gologshim.go b/gologshim/gologshim.go
new file mode 100644
index 0000000000..f41800eafb
--- /dev/null
+++ b/gologshim/gologshim.go
@@ -0,0 +1,146 @@
+package gologshim
+
+import (
+ "fmt"
+ "log/slog"
+ "os"
+ "strings"
+ "sync"
+)
+
+var lvlToLower = map[slog.Level]slog.Value{
+ slog.LevelDebug: slog.StringValue("debug"),
+ slog.LevelInfo: slog.StringValue("info"),
+ slog.LevelWarn: slog.StringValue("warn"),
+ slog.LevelError: slog.StringValue("error"),
+}
+
+// Logger returns a *slog.Logger with a logging level defined by the
+// GOLOG_LOG_LEVEL env var. Supports different levels for different systems. e.g.
+// GOLOG_LOG_LEVEL=foo=info,bar=debug,warn
+// sets the foo system at level info, the bar system at level debug and the
+// fallback level to warn.
+//
+// Prefer a parameterized logger over a global logger.
+func Logger(system string) *slog.Logger {
+ var h slog.Handler
+ c := ConfigFromEnv()
+ handlerOpts := &slog.HandlerOptions{
+ Level: c.LevelForSystem(system),
+ AddSource: c.addSource,
+ ReplaceAttr: func(_ []string, a slog.Attr) slog.Attr {
+ if a.Key == slog.TimeKey {
+ // ipfs go-log uses "ts" for time
+ a.Key = "ts"
+ } else if a.Key == slog.LevelKey {
+ // ipfs go-log uses lowercase level names
+ if lvl, ok := a.Value.Any().(slog.Level); ok {
+ if s, ok := lvlToLower[lvl]; ok {
+ a.Value = s
+ }
+ }
+ }
+ return a
+ },
+ }
+ if c.format == logFormatText {
+ h = slog.NewTextHandler(os.Stderr, handlerOpts)
+ } else {
+ h = slog.NewJSONHandler(os.Stderr, handlerOpts)
+ }
+ attrs := make([]slog.Attr, 1+len(c.labels))
+ attrs = append(attrs, slog.String("logger", system))
+ attrs = append(attrs, c.labels...)
+ h = h.WithAttrs(attrs)
+ return slog.New(h)
+}
+
+type logFormat = int
+
+const (
+ logFormatText logFormat = iota
+ logFormatJSON
+)
+
+type Config struct {
+ fallbackLvl slog.Level
+ systemToLevel map[string]slog.Level
+ format logFormat
+ addSource bool
+ labels []slog.Attr
+}
+
+func (c *Config) LevelForSystem(system string) slog.Level {
+ if lvl, ok := c.systemToLevel[system]; ok {
+ return lvl
+ }
+ return c.fallbackLvl
+}
+
+var ConfigFromEnv func() *Config = sync.OnceValue(func() *Config {
+ fallback, systemToLevel, err := parseIPFSGoLogEnv(os.Getenv("GOLOG_LOG_LEVEL"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to parse GOLOG_LOG_LEVEL: %v", err)
+ fallback = slog.LevelInfo
+ }
+ c := &Config{
+ fallbackLvl: fallback,
+ systemToLevel: systemToLevel,
+ addSource: true,
+ }
+
+ logFmt := os.Getenv("GOLOG_LOG_FORMAT")
+ if logFmt == "" {
+ logFmt = os.Getenv("GOLOG_LOG_FMT")
+ }
+ if logFmt == "json" {
+ c.format = logFormatJSON
+ }
+
+ logFmt = os.Getenv("GOLOG_LOG_ADD_SOURCE")
+ if logFmt == "0" || logFmt == "false" {
+ c.addSource = false
+ }
+
+ labels := os.Getenv("GOLOG_LOG_LABELS")
+ if labels != "" {
+ labels := strings.Split(labels, ",")
+ if len(labels) > 0 {
+ for _, label := range labels {
+ kv := strings.SplitN(label, "=", 2)
+ if len(kv) == 2 {
+ c.labels = append(c.labels, slog.String(kv[0], kv[1]))
+ } else {
+ fmt.Fprintf(os.Stderr, "Invalid label format: %s", label)
+ }
+ }
+ }
+ }
+
+ return c
+})
+
+func parseIPFSGoLogEnv(loggingLevelEnvStr string) (slog.Level, map[string]slog.Level, error) {
+ fallbackLvl := slog.LevelError
+ var systemToLevel map[string]slog.Level
+ if loggingLevelEnvStr != "" {
+ for _, kvs := range strings.Split(loggingLevelEnvStr, ",") {
+ kv := strings.SplitN(kvs, "=", 2)
+ var lvl slog.Level
+ err := lvl.UnmarshalText([]byte(kv[len(kv)-1]))
+ if err != nil {
+ return lvl, nil, err
+ }
+ switch len(kv) {
+ case 1:
+ fallbackLvl = lvl
+ case 2:
+ if systemToLevel == nil {
+ systemToLevel = make(map[string]slog.Level)
+ }
+ systemToLevel[kv[0]] = lvl
+ }
+ }
+ }
+ return fallbackLvl, systemToLevel, nil
+}
diff --git a/leaky_tests/README.md b/leaky_tests/README.md
new file mode 100644
index 0000000000..398a91a8e7
--- /dev/null
+++ b/leaky_tests/README.md
@@ -0,0 +1 @@
+Tests that leak goroutines for various reasons. Mostly because libp2p node shutdown logic doesn't run if we fail to construct the node.
diff --git a/leaky_tests/leaky_test.go b/leaky_tests/leaky_test.go
new file mode 100644
index 0000000000..172b656149
--- /dev/null
+++ b/leaky_tests/leaky_test.go
@@ -0,0 +1,19 @@
+package leaky_test
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/libp2p/go-libp2p"
+)
+
+func TestBadTransportConstructor(t *testing.T) {
+ h, err := libp2p.New(libp2p.Transport(func() {}))
+ if err == nil {
+ h.Close()
+ t.Fatal("expected an error")
+ }
+ if !strings.Contains(err.Error(), "_test.go") {
+ t.Error("expected error to contain debugging info")
+ }
+}
diff --git a/libp2p.go b/libp2p.go
index bfb0c18193..1440648db3 100644
--- a/libp2p.go
+++ b/libp2p.go
@@ -1,241 +1,68 @@
package libp2p
import (
- "context"
- "crypto/rand"
- "fmt"
-
- crypto "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
- pnet "github.com/libp2p/go-libp2p-interface-pnet"
- metrics "github.com/libp2p/go-libp2p-metrics"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- swarm "github.com/libp2p/go-libp2p-swarm"
- transport "github.com/libp2p/go-libp2p-transport"
- bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
- mux "github.com/libp2p/go-stream-muxer"
- ma "github.com/multiformats/go-multiaddr"
- mplex "github.com/whyrusleeping/go-smux-multiplex"
- msmux "github.com/whyrusleeping/go-smux-multistream"
- yamux "github.com/whyrusleeping/go-smux-yamux"
+ "github.com/libp2p/go-libp2p/config"
+ "github.com/libp2p/go-libp2p/core/host"
)
-// Config describes a set of settings for a libp2p node
-type Config struct {
- Transports []transport.Transport
- Muxer mux.Transport
- ListenAddrs []ma.Multiaddr
- PeerKey crypto.PrivKey
- Peerstore pstore.Peerstore
- Protector pnet.Protector
- Reporter metrics.Reporter
- DisableSecio bool
- EnableNAT bool
-}
-
-type Option func(cfg *Config) error
+// Config describes a set of settings for a libp2p node.
+type Config = config.Config
-func Transports(tpts ...transport.Transport) Option {
- return func(cfg *Config) error {
- cfg.Transports = append(cfg.Transports, tpts...)
- return nil
- }
-}
+// Option is a libp2p config option that can be given to the libp2p constructor
+// (`libp2p.New`).
+type Option = config.Option
-func ListenAddrStrings(s ...string) Option {
+// ChainOptions chains multiple options into a single option.
+func ChainOptions(opts ...Option) Option {
return func(cfg *Config) error {
- for _, addrstr := range s {
- a, err := ma.NewMultiaddr(addrstr)
- if err != nil {
+ for _, opt := range opts {
+ if opt == nil {
+ continue
+ }
+ if err := opt(cfg); err != nil {
return err
}
- cfg.ListenAddrs = append(cfg.ListenAddrs, a)
- }
- return nil
- }
-}
-
-func ListenAddrs(addrs ...ma.Multiaddr) Option {
- return func(cfg *Config) error {
- cfg.ListenAddrs = append(cfg.ListenAddrs, addrs...)
- return nil
- }
-}
-
-type transportEncOpt int
-
-const (
- EncPlaintext = transportEncOpt(0)
- EncSecio = transportEncOpt(1)
-)
-
-func TransportEncryption(tenc ...transportEncOpt) Option {
- return func(cfg *Config) error {
- if len(tenc) != 1 {
- return fmt.Errorf("can only specify a single transport encryption option right now")
- }
-
- // TODO: actually make this pluggable, otherwise tls will get tricky
- switch tenc[0] {
- case EncPlaintext:
- cfg.DisableSecio = true
- case EncSecio:
- // noop
- default:
- return fmt.Errorf("unrecognized transport encryption option: %d", tenc[0])
- }
- return nil
- }
-}
-
-func NoEncryption() Option {
- return TransportEncryption(EncPlaintext)
-}
-
-func NATPortMap() Option {
- return func(cfg *Config) error {
- cfg.EnableNAT = true
- return nil
- }
-}
-
-func Muxer(m mux.Transport) Option {
- return func(cfg *Config) error {
- if cfg.Muxer != nil {
- return fmt.Errorf("cannot specify multiple muxer options")
- }
-
- cfg.Muxer = m
- return nil
- }
-}
-
-func Peerstore(ps pstore.Peerstore) Option {
- return func(cfg *Config) error {
- if cfg.Peerstore != nil {
- return fmt.Errorf("cannot specify multiple peerstore options")
- }
-
- cfg.Peerstore = ps
- return nil
- }
-}
-
-func PrivateNetwork(prot pnet.Protector) Option {
- return func(cfg *Config) error {
- if cfg.Protector != nil {
- return fmt.Errorf("cannot specify multiple private network options")
- }
-
- cfg.Protector = prot
- return nil
- }
-}
-
-func BandwidthReporter(rep metrics.Reporter) Option {
- return func(cfg *Config) error {
- if cfg.Reporter != nil {
- return fmt.Errorf("cannot specify multiple bandwidth reporter options")
- }
-
- cfg.Reporter = rep
- return nil
- }
-}
-
-func Identity(sk crypto.PrivKey) Option {
- return func(cfg *Config) error {
- if cfg.PeerKey != nil {
- return fmt.Errorf("cannot specify multiple identities")
}
-
- cfg.PeerKey = sk
return nil
}
}
-func New(ctx context.Context, opts ...Option) (host.Host, error) {
+// New constructs a new libp2p node with the given options, falling back on
+// reasonable defaults. The defaults are:
+//
+// - If no transport and listen addresses are provided, the node listens to
+// the multiaddresses "/ip4/0.0.0.0/tcp/0" and "/ip6/::/tcp/0";
+//
+// - If no transport options are provided, the node uses TCP, websocket and QUIC
+// transport protocols;
+//
+// - If no multiplexer configuration is provided, the node is configured by
+// default to use yamux;
+//
+// - If no security transport is provided, the host uses the go-libp2p's noise
+// and/or tls encrypted transport to encrypt all traffic;
+//
+// - If no peer identity is provided, it generates a random Ed25519 key-pair
+// and derives a new identity from it;
+//
+// - If no peerstore is provided, the host is initialized with an empty
+// peerstore.
+//
+// To stop/shutdown the returned libp2p node, the user needs to call `Close` on the returned Host.
+func New(opts ...Option) (host.Host, error) {
+ return NewWithoutDefaults(append(opts, FallbackDefaults)...)
+}
+
+// NewWithoutDefaults constructs a new libp2p node with the given options but
+// *without* falling back on reasonable defaults.
+//
+// Warning: This function should not be considered a stable interface. We may
+// choose to add required services at any time and, by using this function, you
+// opt-out of any defaults we may provide.
+func NewWithoutDefaults(opts ...Option) (host.Host, error) {
var cfg Config
- for _, opt := range opts {
- if err := opt(&cfg); err != nil {
- return nil, err
- }
- }
-
- return newWithCfg(ctx, &cfg)
-}
-
-func newWithCfg(ctx context.Context, cfg *Config) (host.Host, error) {
- // If no key was given, generate a random 2048 bit RSA key
- if cfg.PeerKey == nil {
- priv, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, rand.Reader)
- if err != nil {
- return nil, err
- }
- cfg.PeerKey = priv
- }
-
- // Obtain Peer ID from public key
- pid, err := peer.IDFromPublicKey(cfg.PeerKey.GetPublic())
- if err != nil {
- return nil, err
- }
-
- // Create a new blank peerstore if none was passed in
- ps := cfg.Peerstore
- if ps == nil {
- ps = pstore.NewPeerstore()
- }
-
- // Set default muxer if none was passed in
- muxer := cfg.Muxer
- if muxer == nil {
- muxer = DefaultMuxer()
- }
-
- // If secio is disabled, don't add our private key to the peerstore
- if !cfg.DisableSecio {
- ps.AddPrivKey(pid, cfg.PeerKey)
- ps.AddPubKey(pid, cfg.PeerKey.GetPublic())
- }
-
- swrm, err := swarm.NewSwarmWithProtector(ctx, cfg.ListenAddrs, pid, ps, cfg.Protector, muxer, cfg.Reporter)
- if err != nil {
+ if err := cfg.Apply(opts...); err != nil {
return nil, err
}
-
- netw := (*swarm.Network)(swrm)
-
- hostOpts := &bhost.HostOpts{}
-
- if cfg.EnableNAT {
- hostOpts.NATManager = bhost.NewNATManager(netw)
- }
-
- return bhost.NewHost(ctx, netw, hostOpts)
-}
-
-func DefaultMuxer() mux.Transport {
- // Set up stream multiplexer
- tpt := msmux.NewBlankTransport()
-
- // By default, support yamux and multiplex
- tpt.AddTransport("/yamux/1.0.0", yamux.DefaultTransport)
- tpt.AddTransport("/mplex/6.3.0", mplex.DefaultTransport)
-
- return tpt
-}
-
-func Defaults(cfg *Config) error {
- // Create a multiaddress that listens on a random port on all interfaces
- addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
- if err != nil {
- return err
- }
-
- cfg.ListenAddrs = []ma.Multiaddr{addr}
- cfg.Peerstore = pstore.NewPeerstore()
- cfg.Muxer = DefaultMuxer()
- return nil
+ return cfg.NewNode()
}
diff --git a/libp2p_test.go b/libp2p_test.go
index d2ab62e4d1..df1c793d53 100644
--- a/libp2p_test.go
+++ b/libp2p_test.go
@@ -2,33 +2,843 @@ package libp2p
import (
"context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "errors"
"fmt"
+ "io"
+ "math/big"
+ "net"
+ "net/netip"
+ "regexp"
+ "strconv"
+ "strings"
"testing"
+ "time"
- crypto "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/routing"
+ "github.com/libp2p/go-libp2p/core/transport"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ sectls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ quic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+ webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ "go.uber.org/goleak"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
)
func TestNewHost(t *testing.T) {
- _, err := makeRandomHost(t, 9000)
+ h, err := makeRandomHost(t, 9000)
if err != nil {
t.Fatal(err)
}
+ h.Close()
}
-func makeRandomHost(t *testing.T, port int) (host.Host, error) {
+func TestTransportConstructor(t *testing.T) {
+ ctor := func(
+ _ host.Host,
+ _ connmgr.ConnectionGater,
+ upgrader transport.Upgrader,
+ ) transport.Transport {
+ tpt, err := tcp.NewTCPTransport(upgrader, nil, nil)
+ require.NoError(t, err)
+ return tpt
+ }
+ h, err := New(Transport(ctor))
+ require.NoError(t, err)
+ h.Close()
+}
+
+func TestNoListenAddrs(t *testing.T) {
+ h, err := New(NoListenAddrs)
+ require.NoError(t, err)
+ defer h.Close()
+ if len(h.Addrs()) != 0 {
+ t.Fatal("expected no addresses")
+ }
+}
+
+func TestNoTransports(t *testing.T) {
ctx := context.Background()
- priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
- if err != nil {
- t.Fatal(err)
+ a, err := New(NoTransports)
+ require.NoError(t, err)
+ defer a.Close()
+
+ b, err := New(ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ defer b.Close()
+
+ err = a.Connect(ctx, peer.AddrInfo{
+ ID: b.ID(),
+ Addrs: b.Addrs(),
+ })
+ if err == nil {
+ t.Error("dial should have failed as no transports have been configured")
+ }
+}
+
+func TestInsecure(t *testing.T) {
+ h, err := New(NoSecurity)
+ require.NoError(t, err)
+ h.Close()
+}
+
+func TestDefaultListenAddrs(t *testing.T) {
+ reTCP := regexp.MustCompile("/(ip)[4|6]/((0.0.0.0)|(::))/tcp/")
+ reQUIC := regexp.MustCompile("/(ip)[4|6]/((0.0.0.0)|(::))/udp/([0-9]*)/quic-v1")
+ reWebRTC := regexp.MustCompile("/(ip)[4|6]/((0.0.0.0)|(::))/udp/([0-9]*)/webrtc-direct/certhash/(.*)")
+ reCircuit := regexp.MustCompile("/p2p-circuit")
+
+ // Test 1: Setting the correct listen addresses if userDefined.Transport == nil && userDefined.ListenAddrs == nil
+ h, err := New()
+ require.NoError(t, err)
+ for _, addr := range h.Network().ListenAddresses() {
+ if reTCP.FindStringSubmatchIndex(addr.String()) == nil &&
+ reQUIC.FindStringSubmatchIndex(addr.String()) == nil &&
+ reWebRTC.FindStringSubmatchIndex(addr.String()) == nil &&
+ reCircuit.FindStringSubmatchIndex(addr.String()) == nil {
+ t.Error("expected ip4 or ip6 or relay interface")
+ }
}
- opts := []Option{
+ h.Close()
+
+ // Test 2: Listen addr only include relay if user defined transport is passed.
+ h, err = New(Transport(tcp.NewTCPTransport))
+ require.NoError(t, err)
+
+ if len(h.Network().ListenAddresses()) != 1 {
+ t.Error("expected one listen addr with user defined transport")
+ }
+ if reCircuit.FindStringSubmatchIndex(h.Network().ListenAddresses()[0].String()) == nil {
+ t.Error("expected relay address")
+ }
+ h.Close()
+}
+
+func makeRandomHost(t *testing.T, port int) (host.Host, error) {
+ priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
+ require.NoError(t, err)
+
+ return New([]Option{
ListenAddrStrings(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port)),
Identity(priv),
- Muxer(DefaultMuxer()),
+ DefaultTransports,
+ DefaultMuxers,
+ DefaultSecurity,
NATPortMap(),
+ }...)
+}
+
+func TestChainOptions(t *testing.T) {
+ var cfg Config
+ var optsRun []int
+ optcount := 0
+ newOpt := func() Option {
+ index := optcount
+ optcount++
+ return func(_ *Config) error {
+ optsRun = append(optsRun, index)
+ return nil
+ }
+ }
+
+ if err := cfg.Apply(newOpt(), nil, ChainOptions(newOpt(), newOpt(), ChainOptions(), ChainOptions(nil, newOpt()))); err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure we ran all options.
+ if optcount != 4 {
+ t.Errorf("expected to have handled %d options, handled %d", optcount, len(optsRun))
+ }
+
+ // Make sure we ran the options in-order.
+ for i, x := range optsRun {
+ if i != x {
+ t.Errorf("expected opt %d, got opt %d", i, x)
+ }
+ }
+}
+
+func TestTransportConstructorTCP(t *testing.T) {
+ h, err := New(
+ Transport(tcp.NewTCPTransport, tcp.DisableReuseport()),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")))
+ err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), swarm.ErrNoTransport.Error())
+}
+
+func TestTransportConstructorQUIC(t *testing.T) {
+ h, err := New(
+ Transport(quic.NewTransport),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")))
+ err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), swarm.ErrNoTransport.Error())
+}
+
+type mockTransport struct{}
+
+func (m mockTransport) Dial(context.Context, ma.Multiaddr, peer.ID) (transport.CapableConn, error) {
+ panic("implement me")
+}
+
+func (m mockTransport) CanDial(ma.Multiaddr) bool { panic("implement me") }
+func (m mockTransport) Listen(ma.Multiaddr) (transport.Listener, error) { panic("implement me") }
+func (m mockTransport) Protocols() []int { return []int{1337} }
+func (m mockTransport) Proxy() bool { panic("implement me") }
+
+var _ transport.Transport = &mockTransport{}
+
+func TestTransportConstructorWithoutOpts(t *testing.T) {
+ t.Run("successful", func(t *testing.T) {
+ var called bool
+ constructor := func() transport.Transport {
+ called = true
+ return &mockTransport{}
+ }
+
+ h, err := New(
+ Transport(constructor),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ require.True(t, called, "expected constructor to be called")
+ defer h.Close()
+ })
+
+ t.Run("with options", func(t *testing.T) {
+ var called bool
+ constructor := func() transport.Transport {
+ called = true
+ return &mockTransport{}
+ }
+
+ _, err := New(
+ Transport(constructor, tcp.DisableReuseport()),
+ DisableRelay(),
+ )
+ require.EqualError(t, err, "transport constructor doesn't take any options")
+ require.False(t, called, "didn't expected constructor to be called")
+ })
+}
+
+func TestTransportConstructorWithWrongOpts(t *testing.T) {
+ _, err := New(
+ Transport(quic.NewTransport, tcp.DisableReuseport()),
+ DisableRelay(),
+ )
+ require.EqualError(t, err, "transport constructor doesn't take any options")
+}
+
+func TestSecurityConstructor(t *testing.T) {
+ h, err := New(
+ Transport(tcp.NewTCPTransport),
+ Security("/noisy", noise.New),
+ Security("/tls", sectls.New),
+ DefaultListenAddrs,
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+
+ h1, err := New(
+ NoListenAddrs,
+ Transport(tcp.NewTCPTransport),
+ Security("/noise", noise.New), // different name
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h1.Close()
+
+ h2, err := New(
+ NoListenAddrs,
+ Transport(tcp.NewTCPTransport),
+ Security("/noisy", noise.New),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h2.Close()
+
+ ai := peer.AddrInfo{
+ ID: h.ID(),
+ Addrs: h.Addrs(),
}
+ err = h1.Connect(context.Background(), ai)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "failed to negotiate security protocol")
+ require.NoError(t, h2.Connect(context.Background(), ai))
+}
+
+func TestTransportConstructorWebTransport(t *testing.T) {
+ h, err := New(
+ Transport(webtransport.New),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")))
+ err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/"))
+ require.Error(t, err)
+ require.Contains(t, err.Error(), swarm.ErrNoTransport.Error())
+}
+
+func TestTransportCustomAddressWebTransport(t *testing.T) {
+ customAddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := New(
+ Transport(webtransport.New),
+ ListenAddrs(customAddr),
+ DisableRelay(),
+ AddrsFactory(func(_ []ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{customAddr}
+ }),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")))
+ addrs := h.Addrs()
+ require.Len(t, addrs, 1)
+ require.NotEqual(t, addrs[0], customAddr)
+ restOfAddr, lastComp := ma.SplitLast(addrs[0])
+ restOfAddr, secondToLastComp := ma.SplitLast(restOfAddr)
+ require.Equal(t, ma.P_CERTHASH, lastComp.Protocol().Code)
+ require.Equal(t, ma.P_CERTHASH, secondToLastComp.Protocol().Code)
+ require.True(t, restOfAddr.Equal(customAddr))
+}
- return New(ctx, opts...)
+// TestTransportCustomAddressWebTransportDoesNotStall tests that if the user
+// manually returns a webtransport address from AddrsFactory, but we aren't
+// listening on a webtranport address, we don't stall.
+func TestTransportCustomAddressWebTransportDoesNotStall(t *testing.T) {
+ customAddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := New(
+ Transport(webtransport.New),
+ // Purposely not listening on the custom address so that we make sure the node doesn't stall if it fails to add a certhash to the multiaddr
+ // ListenAddrs(customAddr),
+ DisableRelay(),
+ AddrsFactory(func(_ []ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{customAddr}
+ }),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ addrs := h.Addrs()
+ require.Len(t, addrs, 1)
+ _, lastComp := ma.SplitLast(addrs[0])
+ require.NotEqual(t, ma.P_CERTHASH, lastComp.Protocol().Code)
+ // We did not add the certhash to the multiaddr
+ require.Equal(t, addrs[0], customAddr)
+}
+
+type mockPeerRouting struct {
+ queried []peer.ID
+}
+
+func (r *mockPeerRouting) FindPeer(_ context.Context, id peer.ID) (peer.AddrInfo, error) {
+ r.queried = append(r.queried, id)
+ return peer.AddrInfo{}, errors.New("mock peer routing error")
+}
+
+func TestRoutedHost(t *testing.T) {
+ mockRouter := &mockPeerRouting{}
+ h, err := New(
+ NoListenAddrs,
+ Routing(func(host.Host) (routing.PeerRouting, error) { return mockRouter, nil }),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ require.EqualError(t, h.Connect(context.Background(), peer.AddrInfo{ID: id}), "mock peer routing error")
+ require.Equal(t, []peer.ID{id}, mockRouter.queried)
+}
+
+func TestAutoNATService(t *testing.T) {
+ h, err := New(EnableNATService())
+ require.NoError(t, err)
+ h.Close()
+}
+
+func TestInsecureConstructor(t *testing.T) {
+ h, err := New(
+ EnableNATService(),
+ NoSecurity,
+ )
+ require.NoError(t, err)
+ h.Close()
+
+ h, err = New(
+ NoSecurity,
+ )
+ require.NoError(t, err)
+ h.Close()
+}
+
+func TestAutoNATv2Service(t *testing.T) {
+ h, err := New(EnableAutoNATv2())
+ require.NoError(t, err)
+ h.Close()
+}
+
+func TestDisableIdentifyAddressDiscovery(t *testing.T) {
+ h, err := New(DisableIdentifyAddressDiscovery())
+ require.NoError(t, err)
+ h.Close()
+}
+
+func TestMain(m *testing.M) {
+ goleak.VerifyTestMain(
+ m,
+ // This will return eventually (5s timeout) but doesn't take a context.
+ goleak.IgnoreAnyFunction("github.com/koron/go-ssdp.Search"),
+ goleak.IgnoreAnyFunction("github.com/pion/sctp.(*Stream).SetReadDeadline.func1"),
+ // Stats
+ goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
+ // nat-pmp
+ goleak.IgnoreAnyFunction("github.com/jackpal/go-nat-pmp.(*Client).GetExternalAddress"),
+ )
+}
+
+func TestDialCircuitAddrWithWrappedResourceManager(t *testing.T) {
+ relay, err := New(EnableRelayService(), ForceReachabilityPublic())
+ require.NoError(t, err)
+ defer relay.Close()
+
+ peerBehindRelay, err := New(
+ EnableAutoRelayWithStaticRelays([]peer.AddrInfo{{ID: relay.ID(), Addrs: relay.Addrs()}}),
+ ForceReachabilityPrivate())
+ require.NoError(t, err)
+ defer peerBehindRelay.Close()
+
+ // Use a wrapped resource manager to test that the circuit dialing works
+ // with it. Look at the PR introducing this test for context
+ type wrappedRcmgr struct{ network.ResourceManager }
+ mgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.DefaultLimits.AutoScale()))
+ require.NoError(t, err)
+ wmgr := wrappedRcmgr{mgr}
+ h, err := New(ResourceManager(wmgr))
+ require.NoError(t, err)
+ defer h.Close()
+
+ h.Peerstore().AddAddrs(relay.ID(), relay.Addrs(), 10*time.Minute)
+ h.Peerstore().AddAddr(peerBehindRelay.ID(),
+ ma.StringCast(
+ fmt.Sprintf("/p2p/%s/p2p-circuit", relay.ID()),
+ ),
+ peerstore.TempAddrTTL,
+ )
+
+ require.Eventually(t, func() bool {
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancel()
+ res := <-ping.Ping(ctx, h, peerBehindRelay.ID())
+ return res.Error == nil
+ }, 5*time.Second, 50*time.Millisecond)
+}
+
+func TestHostAddrsFactoryAddsCerthashes(t *testing.T) {
+ addr := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")
+ h, err := New(
+ AddrsFactory(func(_ []ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{addr}
+ }),
+ )
+ require.NoError(t, err)
+ require.Eventually(t, func() bool {
+ addrs := h.Addrs()
+ for _, a := range addrs {
+ first, last := ma.SplitFunc(a, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_CERTHASH
+ })
+ if addr.Equal(first) && last != nil {
+ return true
+ }
+ }
+ return false
+ }, 5*time.Second, 50*time.Millisecond)
+ h.Close()
+}
+
+func newRandomPort(t *testing.T) string {
+ t.Helper()
+ // Find an available port
+ c, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0})
+ require.NoError(t, err)
+ c.LocalAddr().Network()
+ ipPort := netip.MustParseAddrPort(c.LocalAddr().String())
+ port := strconv.Itoa(int(ipPort.Port()))
+ require.NoError(t, c.Close())
+ return port
+}
+
+func TestWebRTCReuseAddrWithQUIC(t *testing.T) {
+ port := newRandomPort(t)
+ order := [][]string{
+ {"/ip4/127.0.0.1/udp/" + port + "/quic-v1", "/ip4/127.0.0.1/udp/" + port + "/webrtc-direct"},
+ {"/ip4/127.0.0.1/udp/" + port + "/webrtc-direct", "/ip4/127.0.0.1/udp/" + port + "/quic-v1"},
+ // We do not support WebRTC automatically reusing QUIC addresses if port is not specified, yet.
+ // {"/ip4/127.0.0.1/udp/0/webrtc-direct", "/ip4/127.0.0.1/udp/0/quic-v1"},
+ }
+ for i, addrs := range order {
+ t.Run("Order "+strconv.Itoa(i), func(t *testing.T) {
+ h1, err := New(ListenAddrStrings(addrs...), Transport(quic.NewTransport), Transport(libp2pwebrtc.New))
+ require.NoError(t, err)
+ defer h1.Close()
+
+ seenPorts := make(map[string]struct{})
+ for _, addr := range h1.Addrs() {
+ s, err := addr.ValueForProtocol(ma.P_UDP)
+ require.NoError(t, err)
+ seenPorts[s] = struct{}{}
+ }
+ require.Len(t, seenPorts, 1)
+
+ quicClient, err := New(NoListenAddrs, Transport(quic.NewTransport))
+ require.NoError(t, err)
+ defer quicClient.Close()
+
+ webrtcClient, err := New(NoListenAddrs, Transport(libp2pwebrtc.New))
+ require.NoError(t, err)
+ defer webrtcClient.Close()
+
+ for _, client := range []host.Host{quicClient, webrtcClient} {
+ err := client.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()})
+ require.NoError(t, err)
+ }
+
+ t.Run("quic client can connect", func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ p := ping.NewPingService(quicClient)
+ resCh := p.Ping(ctx, h1.ID())
+ res := <-resCh
+ require.NoError(t, res.Error)
+ })
+
+ t.Run("webrtc client can connect", func(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ p := ping.NewPingService(webrtcClient)
+ resCh := p.Ping(ctx, h1.ID())
+ res := <-resCh
+ require.NoError(t, res.Error)
+ })
+ })
+ }
+
+ swapPort := func(addrStrs []string, oldPort, newPort string) []string {
+ out := make([]string, 0, len(addrStrs))
+ for _, addrStr := range addrStrs {
+ out = append(out, strings.Replace(addrStr, oldPort, newPort, 1))
+ }
+ return out
+ }
+
+ t.Run("setup with no reuseport. Should fail", func(t *testing.T) {
+ oldPort := port
+ newPort := newRandomPort(t)
+ h1, err := New(ListenAddrStrings(swapPort(order[0], oldPort, newPort)...), Transport(quic.NewTransport), Transport(libp2pwebrtc.New), QUICReuse(quicreuse.NewConnManager, quicreuse.DisableReuseport()))
+ require.NoError(t, err) // It's a bug/feature that swarm.Listen does not error if at least one transport succeeds in listening.
+ defer h1.Close()
+ // Check that webrtc did fail to listen
+ require.Equal(t, 1, len(h1.Addrs()))
+ require.Contains(t, h1.Addrs()[0].String(), "quic-v1")
+ })
+
+ t.Run("setup with autonat", func(t *testing.T) {
+ oldPort := port
+ newPort := newRandomPort(t)
+ h1, err := New(EnableAutoNATv2(), ListenAddrStrings(swapPort(order[0], oldPort, newPort)...), Transport(quic.NewTransport), Transport(libp2pwebrtc.New), QUICReuse(quicreuse.NewConnManager, quicreuse.DisableReuseport()))
+ require.NoError(t, err) // It's a bug/feature that swarm.Listen does not error if at least one transport succeeds in listening.
+ defer h1.Close()
+ // Check that webrtc did fail to listen
+ require.Equal(t, 1, len(h1.Addrs()))
+ require.Contains(t, h1.Addrs()[0].String(), "quic-v1")
+ })
+}
+
+func TestUseCorrectTransportForDialOut(t *testing.T) {
+ listAddrOrder := [][]string{
+ {"/ip4/127.0.0.1/udp/0/quic-v1", "/ip4/127.0.0.1/udp/0/quic-v1/webtransport"},
+ {"/ip4/127.0.0.1/udp/0/quic-v1/webtransport", "/ip4/127.0.0.1/udp/0/quic-v1"},
+ {"/ip4/0.0.0.0/udp/0/quic-v1", "/ip4/0.0.0.0/udp/0/quic-v1/webtransport"},
+ {"/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip4/0.0.0.0/udp/0/quic-v1"},
+ }
+ for _, order := range listAddrOrder {
+ h1, err := New(ListenAddrStrings(order...), Transport(quic.NewTransport), Transport(webtransport.New))
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ h1.Close()
+ })
+
+ go func() {
+ h1.SetStreamHandler("/echo-port", func(s network.Stream) {
+ m := s.Conn().RemoteMultiaddr()
+ v, err := m.ValueForProtocol(ma.P_UDP)
+ if err != nil {
+ s.Reset()
+ return
+ }
+ s.Write([]byte(v))
+ s.Close()
+ })
+ }()
+
+ for _, addr := range h1.Addrs() {
+ t.Run("order "+strings.Join(order, ",")+" Dial to "+addr.String(), func(t *testing.T) {
+ h2, err := New(ListenAddrStrings(
+ "/ip4/0.0.0.0/udp/0/quic-v1",
+ "/ip4/0.0.0.0/udp/0/quic-v1/webtransport",
+ ), Transport(quic.NewTransport), Transport(webtransport.New))
+ require.NoError(t, err)
+ defer h2.Close()
+ t.Log("H2 Addrs", h2.Addrs())
+ var myExpectedDialOutAddr ma.Multiaddr
+ addrIsWT, _ := webtransport.IsWebtransportMultiaddr(addr)
+ isLocal := func(a ma.Multiaddr) bool {
+ return strings.Contains(a.String(), "127.0.0.1")
+ }
+ addrIsLocal := isLocal(addr)
+ for _, a := range h2.Addrs() {
+ aIsWT, _ := webtransport.IsWebtransportMultiaddr(a)
+ if addrIsWT == aIsWT && isLocal(a) == addrIsLocal {
+ myExpectedDialOutAddr = a
+ break
+ }
+ }
+
+ err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: []ma.Multiaddr{addr}})
+ require.NoError(t, err)
+
+ s, err := h2.NewStream(context.Background(), h1.ID(), "/echo-port")
+ require.NoError(t, err)
+
+ port, err := io.ReadAll(s)
+ require.NoError(t, err)
+
+ myExpectedPort, err := myExpectedDialOutAddr.ValueForProtocol(ma.P_UDP)
+ require.NoError(t, err)
+ require.Equal(t, myExpectedPort, string(port))
+ })
+ }
+ }
+}
+
+func TestCircuitBehindWSS(t *testing.T) {
+ relayTLSConf := getTLSConf(t, net.IPv4(127, 0, 0, 1), time.Now(), time.Now().Add(time.Hour))
+ serverNameChan := make(chan string, 2) // Channel that returns what server names the client hello specified
+ relayTLSConf.GetConfigForClient = func(chi *tls.ClientHelloInfo) (*tls.Config, error) {
+ serverNameChan <- chi.ServerName
+ return relayTLSConf, nil
+ }
+
+ relay, err := New(
+ EnableRelayService(),
+ ForceReachabilityPublic(),
+ Transport(websocket.New, websocket.WithTLSConfig(relayTLSConf)),
+ ListenAddrStrings("/ip4/127.0.0.1/tcp/0/wss"),
+ )
+ require.NoError(t, err)
+ defer relay.Close()
+
+ relayAddrPort, _ := relay.Addrs()[0].ValueForProtocol(ma.P_TCP)
+ relayAddrWithSNIString := fmt.Sprintf(
+ "/dns4/localhost/tcp/%s/wss", relayAddrPort,
+ )
+ relayAddrWithSNI := []ma.Multiaddr{ma.StringCast(relayAddrWithSNIString)}
+
+ h, err := New(
+ NoListenAddrs,
+ EnableRelay(),
+ Transport(websocket.New, websocket.WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true})),
+ ForceReachabilityPrivate())
+ require.NoError(t, err)
+ defer h.Close()
+
+ peerBehindRelay, err := New(
+ NoListenAddrs,
+ Transport(websocket.New, websocket.WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true})),
+ EnableRelay(),
+ EnableAutoRelayWithStaticRelays([]peer.AddrInfo{{ID: relay.ID(), Addrs: relayAddrWithSNI}}),
+ ForceReachabilityPrivate())
+ require.NoError(t, err)
+ defer peerBehindRelay.Close()
+
+ require.Equal(t,
+ "localhost",
+ <-serverNameChan, // The server connects to the relay
+ )
+
+ // Connect to the peer behind the relay
+ h.Connect(context.Background(), peer.AddrInfo{
+ ID: peerBehindRelay.ID(),
+ Addrs: []ma.Multiaddr{ma.StringCast(
+ fmt.Sprintf("%s/p2p/%s/p2p-circuit", relayAddrWithSNIString, relay.ID()),
+ )},
+ })
+ require.NoError(t, err)
+
+ require.Equal(t,
+ "localhost",
+ <-serverNameChan, // The client connects to the relay and sends the SNI
+ )
+}
+
+// getTLSConf is a helper to generate a self-signed TLS config
+func getTLSConf(t *testing.T, ip net.IP, start, end time.Time) *tls.Config {
+ t.Helper()
+ certTempl := &x509.Certificate{
+ SerialNumber: big.NewInt(1234),
+ Subject: pkix.Name{Organization: []string{"websocket"}},
+ NotBefore: start,
+ NotAfter: end,
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IPAddresses: []net.IP{ip},
+ }
+ priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ caBytes, err := x509.CreateCertificate(rand.Reader, certTempl, certTempl, &priv.PublicKey, priv)
+ require.NoError(t, err)
+ cert, err := x509.ParseCertificate(caBytes)
+ require.NoError(t, err)
+ return &tls.Config{
+ Certificates: []tls.Certificate{{
+ Certificate: [][]byte{cert.Raw},
+ PrivateKey: priv,
+ Leaf: cert,
+ }},
+ }
+}
+
+func TestSharedTCPAddr(t *testing.T) {
+ h, err := New(
+ ShareTCPListener(),
+ Transport(tcp.NewTCPTransport),
+ Transport(websocket.New),
+ ListenAddrStrings("/ip4/0.0.0.0/tcp/8888"),
+ ListenAddrStrings("/ip4/0.0.0.0/tcp/8888/ws"),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ sawTCP := false
+ sawWS := false
+ for _, addr := range h.Addrs() {
+ if strings.HasSuffix(addr.String(), "/tcp/8888") {
+ sawTCP = true
+ }
+ if strings.HasSuffix(addr.String(), "/tcp/8888/ws") {
+ sawWS = true
+ }
+ }
+ require.True(t, sawTCP)
+ require.True(t, sawWS)
+
+ _, err = New(
+ ShareTCPListener(),
+ Transport(tcp.NewTCPTransport),
+ Transport(websocket.New),
+ PrivateNetwork(pnet.PSK([]byte{1, 2, 3})),
+ )
+ require.ErrorContains(t, err, "cannot use shared TCP listener with PSK")
+}
+
+func TestCustomTCPDialer(t *testing.T) {
+ expectedErr := errors.New("custom dialer called, but not implemented")
+ customDialer := func(_ ma.Multiaddr) (tcp.ContextDialer, error) {
+ // Normally a user would implement this by returning a custom dialer
+ // Here, we just test that this is called.
+ return nil, expectedErr
+ }
+
+ h, err := New(
+ Transport(tcp.NewTCPTransport, tcp.WithDialerForAddr(customDialer)),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+
+ var randID peer.ID
+ priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 256)
+ require.NoError(t, err)
+ randID, err = peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+
+ err = h.Connect(context.Background(), peer.AddrInfo{
+ ID: randID,
+ // This won't actually be dialed since we return an error above
+ Addrs: []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/4")},
+ })
+ require.ErrorContains(t, err, expectedErr.Error())
+}
+
+func TestBasicHostInterfaceAssertion(t *testing.T) {
+ mockRouter := &mockPeerRouting{}
+ h, err := New(
+ NoListenAddrs,
+ Routing(func(host.Host) (routing.PeerRouting, error) { return mockRouter, nil }),
+ DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+
+ require.NotNil(t, h)
+ require.NotEmpty(t, h.ID())
+
+ _, ok := h.(interface{ AllAddrs() []ma.Multiaddr })
+ require.True(t, ok)
+}
+
+func BenchmarkAllAddrs(b *testing.B) {
+ h, err := New()
+
+ addrsHost := h.(interface{ AllAddrs() []ma.Multiaddr })
+ require.NoError(b, err)
+ defer h.Close()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ addrsHost.AllAddrs()
+ }
}
diff --git a/limits.go b/limits.go
new file mode 100644
index 0000000000..5871577e51
--- /dev/null
+++ b/limits.go
@@ -0,0 +1,113 @@
+package libp2p
+
+import (
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ circuit "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+)
+
+// SetDefaultServiceLimits sets the default limits for bundled libp2p services
+func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) {
+ // identify
+ config.AddServiceLimit(
+ identify.ServiceName,
+ rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 128, Memory: 4 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 64, StreamsOutbound: 64, Streams: 128, Memory: 4 << 20},
+ )
+ config.AddServicePeerLimit(
+ identify.ServiceName,
+ rcmgr.BaseLimit{StreamsInbound: 16, StreamsOutbound: 16, Streams: 32, Memory: 1 << 20},
+ rcmgr.BaseLimitIncrease{},
+ )
+ for _, id := range [...]protocol.ID{identify.ID, identify.IDPush} {
+ config.AddProtocolLimit(
+ id,
+ rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 128, Memory: 4 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 64, StreamsOutbound: 64, Streams: 128, Memory: 4 << 20},
+ )
+ config.AddProtocolPeerLimit(
+ id,
+ rcmgr.BaseLimit{StreamsInbound: 16, StreamsOutbound: 16, Streams: 32, Memory: 32 * (256<<20 + 16<<10)},
+ rcmgr.BaseLimitIncrease{},
+ )
+ }
+
+ // ping
+ addServiceAndProtocolLimit(config,
+ ping.ServiceName, ping.ID,
+ rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 64, Memory: 4 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 64, StreamsOutbound: 64, Streams: 64, Memory: 4 << 20},
+ )
+ addServicePeerAndProtocolPeerLimit(
+ config,
+ ping.ServiceName, ping.ID,
+ rcmgr.BaseLimit{StreamsInbound: 2, StreamsOutbound: 3, Streams: 4, Memory: 32 * (256<<20 + 16<<10)},
+ rcmgr.BaseLimitIncrease{},
+ )
+
+ // autonat
+ addServiceAndProtocolLimit(config,
+ autonat.ServiceName, autonat.AutoNATProto,
+ rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 64, Memory: 4 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 4, StreamsOutbound: 4, Streams: 4, Memory: 2 << 20},
+ )
+ addServicePeerAndProtocolPeerLimit(
+ config,
+ autonat.ServiceName, autonat.AutoNATProto,
+ rcmgr.BaseLimit{StreamsInbound: 2, StreamsOutbound: 2, Streams: 2, Memory: 1 << 20},
+ rcmgr.BaseLimitIncrease{},
+ )
+
+ // holepunch
+ addServiceAndProtocolLimit(config,
+ holepunch.ServiceName, holepunch.Protocol,
+ rcmgr.BaseLimit{StreamsInbound: 32, StreamsOutbound: 32, Streams: 64, Memory: 4 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 8, StreamsOutbound: 8, Streams: 16, Memory: 4 << 20},
+ )
+ addServicePeerAndProtocolPeerLimit(config,
+ holepunch.ServiceName, holepunch.Protocol,
+ rcmgr.BaseLimit{StreamsInbound: 2, StreamsOutbound: 2, Streams: 2, Memory: 1 << 20},
+ rcmgr.BaseLimitIncrease{},
+ )
+
+ // relay/v2
+ config.AddServiceLimit(
+ relayv2.ServiceName,
+ rcmgr.BaseLimit{StreamsInbound: 256, StreamsOutbound: 256, Streams: 256, Memory: 16 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 256, StreamsOutbound: 256, Streams: 256, Memory: 16 << 20},
+ )
+ config.AddServicePeerLimit(
+ relayv2.ServiceName,
+ rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 64, Memory: 1 << 20},
+ rcmgr.BaseLimitIncrease{},
+ )
+
+ // circuit protocols, both client and service
+ for _, proto := range [...]protocol.ID{circuit.ProtoIDv2Hop, circuit.ProtoIDv2Stop} {
+ config.AddProtocolLimit(
+ proto,
+ rcmgr.BaseLimit{StreamsInbound: 640, StreamsOutbound: 640, Streams: 640, Memory: 16 << 20},
+ rcmgr.BaseLimitIncrease{StreamsInbound: 640, StreamsOutbound: 640, Streams: 640, Memory: 16 << 20},
+ )
+ config.AddProtocolPeerLimit(
+ proto,
+ rcmgr.BaseLimit{StreamsInbound: 128, StreamsOutbound: 128, Streams: 128, Memory: 32 << 20},
+ rcmgr.BaseLimitIncrease{},
+ )
+ }
+}
+
+func addServiceAndProtocolLimit(config *rcmgr.ScalingLimitConfig, service string, proto protocol.ID, limit rcmgr.BaseLimit, increase rcmgr.BaseLimitIncrease) {
+ config.AddServiceLimit(service, limit, increase)
+ config.AddProtocolLimit(proto, limit, increase)
+}
+
+func addServicePeerAndProtocolPeerLimit(config *rcmgr.ScalingLimitConfig, service string, proto protocol.ID, limit rcmgr.BaseLimit, increase rcmgr.BaseLimitIncrease) {
+ config.AddServicePeerLimit(service, limit, increase)
+ config.AddProtocolPeerLimit(proto, limit, increase)
+}
diff --git a/options.go b/options.go
new file mode 100644
index 0000000000..0329b7e60b
--- /dev/null
+++ b/options.go
@@ -0,0 +1,657 @@
+package libp2p
+
+// This file contains all libp2p configuration options (except the defaults,
+// those are in defaults.go).
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/libp2p/go-libp2p/config"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/metrics"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/host/autorelay"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/prometheus/client_golang/prometheus"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "go.uber.org/fx"
+)
+
+// ListenAddrStrings configures libp2p to listen on the given (unparsed)
+// addresses.
+func ListenAddrStrings(s ...string) Option {
+ return func(cfg *Config) error {
+ for _, addrstr := range s {
+ a, err := ma.NewMultiaddr(addrstr)
+ if err != nil {
+ return err
+ }
+ cfg.ListenAddrs = append(cfg.ListenAddrs, a)
+ }
+ return nil
+ }
+}
+
+// ListenAddrs configures libp2p to listen on the given addresses.
+func ListenAddrs(addrs ...ma.Multiaddr) Option {
+ return func(cfg *Config) error {
+ cfg.ListenAddrs = append(cfg.ListenAddrs, addrs...)
+ return nil
+ }
+}
+
+// Security configures libp2p to use the given security transport (or transport
+// constructor).
+//
+// Name is the protocol name.
+//
+// The transport can be a constructed security.Transport or a function taking
+// any subset of this libp2p node's:
+// * Public key
+// * Private key
+// * Peer ID
+// * Host
+// * Network
+// * Peerstore
+func Security(name string, constructor interface{}) Option {
+ return func(cfg *Config) error {
+ if cfg.Insecure {
+ return fmt.Errorf("cannot use security transports with an insecure libp2p configuration")
+ }
+ cfg.SecurityTransports = append(cfg.SecurityTransports, config.Security{ID: protocol.ID(name), Constructor: constructor})
+ return nil
+ }
+}
+
+// NoSecurity is an option that completely disables all transport security.
+// It's incompatible with all other transport security protocols.
+var NoSecurity Option = func(cfg *Config) error {
+ if len(cfg.SecurityTransports) > 0 {
+ return fmt.Errorf("cannot use security transports with an insecure libp2p configuration")
+ }
+ cfg.Insecure = true
+ return nil
+}
+
+// Muxer configures libp2p to use the given stream multiplexer.
+// name is the protocol name.
+func Muxer(name string, muxer network.Multiplexer) Option {
+ return func(cfg *Config) error {
+ cfg.Muxers = append(cfg.Muxers, tptu.StreamMuxer{Muxer: muxer, ID: protocol.ID(name)})
+ return nil
+ }
+}
+
+func QUICReuse(constructor interface{}, opts ...quicreuse.Option) Option {
+ return func(cfg *Config) error {
+ tag := `group:"quicreuseopts"`
+ typ := reflect.ValueOf(constructor).Type()
+ numParams := typ.NumIn()
+ isVariadic := typ.IsVariadic()
+
+ if !isVariadic && len(opts) > 0 {
+ return errors.New("QUICReuse constructor doesn't take any options")
+ }
+
+ var params []string
+ if isVariadic && len(opts) > 0 {
+ // If there are options, apply the tag.
+ // Since options are variadic, they have to be the last argument of the constructor.
+ params = make([]string, numParams)
+ params[len(params)-1] = tag
+ }
+
+ cfg.QUICReuse = append(cfg.QUICReuse, fx.Provide(fx.Annotate(constructor, fx.ParamTags(params...))))
+ for _, opt := range opts {
+ cfg.QUICReuse = append(cfg.QUICReuse, fx.Supply(fx.Annotate(opt, fx.ResultTags(tag))))
+ }
+ return nil
+ }
+}
+
+// Transport configures libp2p to use the given transport (or transport
+// constructor).
+//
+// The transport can be a constructed transport.Transport or a function taking
+// any subset of this libp2p node's:
+// * Transport Upgrader (*tptu.Upgrader)
+// * Host
+// * Stream muxer (muxer.Transport)
+// * Security transport (security.Transport)
+// * Private network protector (pnet.Protector)
+// * Peer ID
+// * Private Key
+// * Public Key
+// * Address filter (filter.Filter)
+// * Peerstore
+func Transport(constructor interface{}, opts ...interface{}) Option {
+ return func(cfg *Config) error {
+ // generate a random identifier, so that fx can associate the constructor with its options
+ b := make([]byte, 8)
+ rand.Read(b)
+ id := binary.BigEndian.Uint64(b)
+
+ tag := fmt.Sprintf(`group:"transportopt_%d"`, id)
+
+ typ := reflect.ValueOf(constructor).Type()
+ numParams := typ.NumIn()
+ isVariadic := typ.IsVariadic()
+
+ if !isVariadic && len(opts) > 0 {
+ return errors.New("transport constructor doesn't take any options")
+ }
+ if isVariadic && numParams >= 1 {
+ paramType := typ.In(numParams - 1).Elem()
+ for _, opt := range opts {
+ if typ := reflect.TypeOf(opt); !typ.AssignableTo(paramType) {
+ return fmt.Errorf("transport option of type %s not assignable to %s", typ, paramType)
+ }
+ }
+ }
+
+ var params []string
+ if isVariadic && len(opts) > 0 {
+ // If there are transport options, apply the tag.
+ // Since options are variadic, they have to be the last argument of the constructor.
+ params = make([]string, numParams)
+ params[len(params)-1] = tag
+ }
+
+ cfg.Transports = append(cfg.Transports, fx.Provide(
+ fx.Annotate(
+ constructor,
+ fx.ParamTags(params...),
+ fx.As(new(transport.Transport)),
+ fx.ResultTags(`group:"transport"`),
+ ),
+ ))
+ for _, opt := range opts {
+ cfg.Transports = append(cfg.Transports, fx.Supply(
+ fx.Annotate(
+ opt,
+ fx.ResultTags(tag),
+ ),
+ ))
+ }
+ return nil
+ }
+}
+
+// Peerstore configures libp2p to use the given peerstore.
+func Peerstore(ps peerstore.Peerstore) Option {
+ return func(cfg *Config) error {
+ if cfg.Peerstore != nil {
+ return fmt.Errorf("cannot specify multiple peerstore options")
+ }
+
+ cfg.Peerstore = ps
+ return nil
+ }
+}
+
+// PrivateNetwork configures libp2p to use the given private network protector.
+func PrivateNetwork(psk pnet.PSK) Option {
+ return func(cfg *Config) error {
+ if cfg.PSK != nil {
+ return fmt.Errorf("cannot specify multiple private network options")
+ }
+
+ cfg.PSK = psk
+ return nil
+ }
+}
+
+// BandwidthReporter configures libp2p to use the given bandwidth reporter.
+func BandwidthReporter(rep metrics.Reporter) Option {
+ return func(cfg *Config) error {
+ if cfg.Reporter != nil {
+ return fmt.Errorf("cannot specify multiple bandwidth reporter options")
+ }
+
+ cfg.Reporter = rep
+ return nil
+ }
+}
+
+// Identity configures libp2p to use the given private key to identify itself.
+func Identity(sk crypto.PrivKey) Option {
+ return func(cfg *Config) error {
+ if cfg.PeerKey != nil {
+ return fmt.Errorf("cannot specify multiple identities")
+ }
+
+ cfg.PeerKey = sk
+ return nil
+ }
+}
+
+// ConnectionManager configures libp2p to use the given connection manager.
+//
+// The current "standard" connection manager lives in github.com/libp2p/go-libp2p-connmgr. See
+// https://pkg.go.dev/github.com/libp2p/go-libp2p-connmgr?utm_source=godoc#NewConnManager.
+func ConnectionManager(connman connmgr.ConnManager) Option {
+ return func(cfg *Config) error {
+ if cfg.ConnManager != nil {
+ return fmt.Errorf("cannot specify multiple connection managers")
+ }
+ cfg.ConnManager = connman
+ return nil
+ }
+}
+
+// AddrsFactory configures libp2p to use the given address factory.
+func AddrsFactory(factory config.AddrsFactory) Option {
+ return func(cfg *Config) error {
+ if cfg.AddrsFactory != nil {
+ return fmt.Errorf("cannot specify multiple address factories")
+ }
+ cfg.AddrsFactory = factory
+ return nil
+ }
+}
+
+// EnableRelay configures libp2p to enable the relay transport.
+// This option only configures libp2p to accept inbound connections from relays
+// and make outbound connections_through_ relays when requested by the remote peer.
+// This option supports both circuit v1 and v2 connections.
+// (default: enabled)
+func EnableRelay() Option {
+ return func(cfg *Config) error {
+ cfg.RelayCustom = true
+ cfg.Relay = true
+ return nil
+ }
+}
+
+// DisableRelay configures libp2p to disable the relay transport.
+func DisableRelay() Option {
+ return func(cfg *Config) error {
+ cfg.RelayCustom = true
+ cfg.Relay = false
+ return nil
+ }
+}
+
+// EnableRelayService configures libp2p to run a circuit v2 relay,
+// if we detect that we're publicly reachable.
+func EnableRelayService(opts ...relayv2.Option) Option {
+ return func(cfg *Config) error {
+ cfg.EnableRelayService = true
+ cfg.RelayServiceOpts = opts
+ return nil
+ }
+}
+
+// EnableAutoRelay configures libp2p to enable the AutoRelay subsystem.
+//
+// Dependencies:
+// - Relay (enabled by default)
+// - Either:
+// 1. A list of static relays
+// 2. A PeerSource function that provides a chan of relays. See `autorelay.WithPeerSource`
+//
+// This subsystem performs automatic address rewriting to advertise relay addresses when it
+// detects that the node is publicly unreachable (e.g. behind a NAT).
+//
+// Deprecated: Use EnableAutoRelayWithStaticRelays or EnableAutoRelayWithPeerSource
+func EnableAutoRelay(opts ...autorelay.Option) Option {
+ return func(cfg *Config) error {
+ cfg.EnableAutoRelay = true
+ cfg.AutoRelayOpts = opts
+ return nil
+ }
+}
+
+// EnableAutoRelayWithStaticRelays configures libp2p to enable the AutoRelay subsystem using
+// the provided relays as relay candidates.
+// This subsystem performs automatic address rewriting to advertise relay addresses when it
+// detects that the node is publicly unreachable (e.g. behind a NAT).
+func EnableAutoRelayWithStaticRelays(static []peer.AddrInfo, opts ...autorelay.Option) Option {
+ return func(cfg *Config) error {
+ cfg.EnableAutoRelay = true
+ cfg.AutoRelayOpts = append([]autorelay.Option{autorelay.WithStaticRelays(static)}, opts...)
+ return nil
+ }
+}
+
+// EnableAutoRelayWithPeerSource configures libp2p to enable the AutoRelay
+// subsystem using the provided PeerSource callback to get more relay
+// candidates. This subsystem performs automatic address rewriting to advertise
+// relay addresses when it detects that the node is publicly unreachable (e.g.
+// behind a NAT).
+func EnableAutoRelayWithPeerSource(peerSource autorelay.PeerSource, opts ...autorelay.Option) Option {
+ return func(cfg *Config) error {
+ cfg.EnableAutoRelay = true
+ cfg.AutoRelayOpts = append([]autorelay.Option{autorelay.WithPeerSource(peerSource)}, opts...)
+ return nil
+ }
+}
+
+// ForceReachabilityPublic overrides automatic reachability detection in the AutoNAT subsystem,
+// forcing the local node to believe it is reachable externally.
+func ForceReachabilityPublic() Option {
+ return func(cfg *Config) error {
+ public := network.ReachabilityPublic
+ cfg.AutoNATConfig.ForceReachability = &public
+ return nil
+ }
+}
+
+// ForceReachabilityPrivate overrides automatic reachability detection in the AutoNAT subsystem,
+// forceing the local node to believe it is behind a NAT and not reachable externally.
+func ForceReachabilityPrivate() Option {
+ return func(cfg *Config) error {
+ private := network.ReachabilityPrivate
+ cfg.AutoNATConfig.ForceReachability = &private
+ return nil
+ }
+}
+
+// EnableNATService configures libp2p to provide a service to peers for determining
+// their reachability status. When enabled, the host will attempt to dial back
+// to peers, and then tell them if it was successful in making such connections.
+func EnableNATService() Option {
+ return func(cfg *Config) error {
+ cfg.AutoNATConfig.EnableService = true
+ return nil
+ }
+}
+
+// AutoNATServiceRateLimit changes the default rate limiting configured in helping
+// other peers determine their reachability status. When set, the host will limit
+// the number of requests it responds to in each 60 second period to the set
+// numbers. A value of '0' disables throttling.
+func AutoNATServiceRateLimit(global, perPeer int, interval time.Duration) Option {
+ return func(cfg *Config) error {
+ cfg.AutoNATConfig.ThrottleGlobalLimit = global
+ cfg.AutoNATConfig.ThrottlePeerLimit = perPeer
+ cfg.AutoNATConfig.ThrottleInterval = interval
+ return nil
+ }
+}
+
+// ConnectionGater configures libp2p to use the given ConnectionGater
+// to actively reject inbound/outbound connections based on the lifecycle stage
+// of the connection.
+//
+// For more information, refer to go-libp2p/core.ConnectionGater.
+func ConnectionGater(cg connmgr.ConnectionGater) Option {
+ return func(cfg *Config) error {
+ if cfg.ConnectionGater != nil {
+ return errors.New("cannot configure multiple connection gaters, or cannot configure both Filters and ConnectionGater")
+ }
+ cfg.ConnectionGater = cg
+ return nil
+ }
+}
+
+// ResourceManager configures libp2p to use the given ResourceManager.
+// When using the p2p/host/resource-manager implementation of the ResourceManager interface,
+// it is recommended to set limits for libp2p protocol by calling SetDefaultServiceLimits.
+func ResourceManager(rcmgr network.ResourceManager) Option {
+ return func(cfg *Config) error {
+ if cfg.ResourceManager != nil {
+ return errors.New("cannot configure multiple resource managers")
+ }
+ cfg.ResourceManager = rcmgr
+ return nil
+ }
+}
+
+// NATPortMap configures libp2p to use the default NATManager. The default
+// NATManager will attempt to open a port in your network's firewall using UPnP.
+func NATPortMap() Option {
+ return NATManager(bhost.NewNATManager)
+}
+
+// NATManager will configure libp2p to use the requested NATManager. This
+// function should be passed a NATManager *constructor* that takes a libp2p Network.
+func NATManager(nm config.NATManagerC) Option {
+ return func(cfg *Config) error {
+ if cfg.NATManager != nil {
+ return fmt.Errorf("cannot specify multiple NATManagers")
+ }
+ cfg.NATManager = nm
+ return nil
+ }
+}
+
+// Ping will configure libp2p to support the ping service; enable by default.
+func Ping(enable bool) Option {
+ return func(cfg *Config) error {
+ cfg.DisablePing = !enable
+ return nil
+ }
+}
+
+// Routing will configure libp2p to use routing.
+func Routing(rt config.RoutingC) Option {
+ return func(cfg *Config) error {
+ if cfg.Routing != nil {
+ return fmt.Errorf("cannot specify multiple routing options")
+ }
+ cfg.Routing = rt
+ return nil
+ }
+}
+
+// NoListenAddrs will configure libp2p to not listen by default.
+//
+// This will both clear any configured listen addrs and prevent libp2p from
+// applying the default listen address option. It also disables relay, unless the
+// user explicitly specifies with an option, as the transport creates an implicit
+// listen address that would make the node dialable through any relay it was connected to.
+var NoListenAddrs = func(cfg *Config) error {
+ cfg.ListenAddrs = []ma.Multiaddr{}
+ if !cfg.RelayCustom {
+ cfg.RelayCustom = true
+ cfg.Relay = false
+ }
+ return nil
+}
+
+// NoTransports will configure libp2p to not enable any transports.
+//
+// This will both clear any configured transports (specified in prior libp2p
+// options) and prevent libp2p from applying the default transports.
+var NoTransports = func(cfg *Config) error {
+ cfg.Transports = []fx.Option{}
+ return nil
+}
+
+// ProtocolVersion sets the protocolVersion string required by the
+// libp2p Identify protocol.
+func ProtocolVersion(s string) Option {
+ return func(cfg *Config) error {
+ cfg.ProtocolVersion = s
+ return nil
+ }
+}
+
+// UserAgent sets the libp2p user-agent sent along with the identify protocol
+func UserAgent(userAgent string) Option {
+ return func(cfg *Config) error {
+ cfg.UserAgent = userAgent
+ return nil
+ }
+}
+
+// MultiaddrResolver sets the libp2p dns resolver
+func MultiaddrResolver(rslv network.MultiaddrDNSResolver) Option {
+ return func(cfg *Config) error {
+ cfg.MultiaddrResolver = rslv
+ return nil
+ }
+}
+
+// Experimental
+// EnableHolePunching enables NAT traversal by enabling NATT'd peers to both initiate and respond to hole punching attempts
+// to create direct/NAT-traversed connections with other peers. (default: disabled)
+//
+// Dependencies:
+// - Relay (enabled by default)
+//
+// This subsystem performs two functions:
+//
+// 1. On receiving an inbound Relay connection, it attempts to create a direct connection with the remote peer
+// by initiating and co-ordinating a hole punch over the Relayed connection.
+// 2. If a peer sees a request to co-ordinate a hole punch on an outbound Relay connection,
+// it will participate in the hole-punch to create a direct connection with the remote peer.
+//
+// If the hole punch is successful, all new streams will thereafter be created on the hole-punched connection.
+// The Relayed connection will eventually be closed after a grace period.
+//
+// All existing indefinite long-lived streams on the Relayed connection will have to re-opened on the hole-punched connection by the user.
+// Users can make use of the `Connected`/`Disconnected` notifications emitted by the Network for this purpose.
+//
+// It is not mandatory but nice to also enable the `AutoRelay` option (See `EnableAutoRelay`)
+// so the peer can discover and connect to Relay servers if it discovers that it is NATT'd and has private reachability via AutoNAT.
+// This will then enable it to advertise Relay addresses which can be used to accept inbound Relay connections to then co-ordinate
+// a hole punch.
+//
+// If `EnableAutoRelay` is configured and the user is confident that the peer has private reachability/is NATT'd,
+// the `ForceReachabilityPrivate` option can be configured to short-circuit reachability discovery via AutoNAT
+// so the peer can immediately start connecting to Relay servers.
+//
+// If `EnableAutoRelay` is configured, the `StaticRelays` option can be used to configure a static set of Relay servers
+// for `AutoRelay` to connect to so that it does not need to discover Relay servers via Routing.
+func EnableHolePunching(opts ...holepunch.Option) Option {
+ return func(cfg *Config) error {
+ cfg.EnableHolePunching = true
+ cfg.HolePunchingOptions = opts
+ return nil
+ }
+}
+
+func WithDialTimeout(t time.Duration) Option {
+ return func(cfg *Config) error {
+ if t <= 0 {
+ return errors.New("dial timeout needs to be non-negative")
+ }
+ cfg.DialTimeout = t
+ return nil
+ }
+}
+
+// DisableMetrics configures libp2p to disable prometheus metrics
+func DisableMetrics() Option {
+ return func(cfg *Config) error {
+ cfg.DisableMetrics = true
+ return nil
+ }
+}
+
+// PrometheusRegisterer configures libp2p to use reg as the Registerer for all metrics subsystems
+func PrometheusRegisterer(reg prometheus.Registerer) Option {
+ return func(cfg *Config) error {
+ if cfg.DisableMetrics {
+ return errors.New("cannot set registerer when metrics are disabled")
+ }
+ if cfg.PrometheusRegisterer != nil {
+ return errors.New("registerer already set")
+ }
+ if reg == nil {
+ return errors.New("registerer cannot be nil")
+ }
+ cfg.PrometheusRegisterer = reg
+ return nil
+ }
+}
+
+// DialRanker configures libp2p to use d as the dial ranker. To enable smart
+// dialing use `swarm.DefaultDialRanker`. use `swarm.NoDelayDialRanker` to
+// disable smart dialing.
+//
+// Deprecated: use SwarmOpts(swarm.WithDialRanker(d)) instead
+func DialRanker(d network.DialRanker) Option {
+ return func(cfg *Config) error {
+ if cfg.DialRanker != nil {
+ return errors.New("dial ranker already configured")
+ }
+ cfg.DialRanker = d
+ return nil
+ }
+}
+
+// SwarmOpts configures libp2p to use swarm with opts
+func SwarmOpts(opts ...swarm.Option) Option {
+ return func(cfg *Config) error {
+ cfg.SwarmOpts = opts
+ return nil
+ }
+}
+
+// DisableIdentifyAddressDiscovery disables address discovery using peer provided observed addresses
+// in identify. If you know your public addresses upfront, the recommended way is to use
+// AddressFactory to provide the external adddress to the host and use this option to disable
+// discovery from identify.
+func DisableIdentifyAddressDiscovery() Option {
+ return func(cfg *Config) error {
+ cfg.DisableIdentifyAddressDiscovery = true
+ return nil
+ }
+}
+
+// EnableAutoNATv2 enables autonat v2
+func EnableAutoNATv2() Option {
+ return func(cfg *Config) error {
+ cfg.EnableAutoNATv2 = true
+ return nil
+ }
+}
+
+// UDPBlackHoleSuccessCounter configures libp2p to use f as the black hole filter for UDP addrs
+func UDPBlackHoleSuccessCounter(f *swarm.BlackHoleSuccessCounter) Option {
+ return func(cfg *Config) error {
+ cfg.UDPBlackHoleSuccessCounter = f
+ cfg.CustomUDPBlackHoleSuccessCounter = true
+ return nil
+ }
+}
+
+// IPv6BlackHoleSuccessCounter configures libp2p to use f as the black hole filter for IPv6 addrs
+func IPv6BlackHoleSuccessCounter(f *swarm.BlackHoleSuccessCounter) Option {
+ return func(cfg *Config) error {
+ cfg.IPv6BlackHoleSuccessCounter = f
+ cfg.CustomIPv6BlackHoleSuccessCounter = true
+ return nil
+ }
+}
+
+// WithFxOption adds a user provided fx.Option to the libp2p constructor.
+// Experimental: This option is subject to change or removal.
+func WithFxOption(opts ...fx.Option) Option {
+ return func(cfg *Config) error {
+ cfg.UserFxOptions = append(cfg.UserFxOptions, opts...)
+ return nil
+ }
+}
+
+// ShareTCPListener shares the same listen address between TCP and Websocket
+// transports. This lets both transports use the same TCP port.
+//
+// Currently this behavior is Opt-in. In a future release this will be the
+// default, and this option will be removed.
+func ShareTCPListener() Option {
+ return func(cfg *Config) error {
+ cfg.ShareTCPListener = true
+ return nil
+ }
+}
diff --git a/options_filter.go b/options_filter.go
new file mode 100644
index 0000000000..fa8df7a2f7
--- /dev/null
+++ b/options_filter.go
@@ -0,0 +1,36 @@
+package libp2p
+
+import (
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/control"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// filtersConnectionGater is an adapter that turns multiaddr.Filter into a
+// connmgr.ConnectionGater.
+type filtersConnectionGater ma.Filters
+
+var _ connmgr.ConnectionGater = (*filtersConnectionGater)(nil)
+
+func (f *filtersConnectionGater) InterceptAddrDial(_ peer.ID, addr ma.Multiaddr) (allow bool) {
+ return !(*ma.Filters)(f).AddrBlocked(addr)
+}
+
+func (f *filtersConnectionGater) InterceptPeerDial(_ peer.ID) (allow bool) {
+ return true
+}
+
+func (f *filtersConnectionGater) InterceptAccept(connAddr network.ConnMultiaddrs) (allow bool) {
+ return !(*ma.Filters)(f).AddrBlocked(connAddr.RemoteMultiaddr())
+}
+
+func (f *filtersConnectionGater) InterceptSecured(_ network.Direction, _ peer.ID, connAddr network.ConnMultiaddrs) (allow bool) {
+ return !(*ma.Filters)(f).AddrBlocked(connAddr.RemoteMultiaddr())
+}
+
+func (f *filtersConnectionGater) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
+ return true, 0
+}
diff --git a/p2p/discovery/backoff/backoff.go b/p2p/discovery/backoff/backoff.go
new file mode 100644
index 0000000000..3fb3cfc624
--- /dev/null
+++ b/p2p/discovery/backoff/backoff.go
@@ -0,0 +1,232 @@
+package backoff
+
+import (
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("discovery-backoff")
+
+type BackoffFactory func() BackoffStrategy
+
+// BackoffStrategy describes how backoff will be implemented. BackoffStrategies are stateful.
+type BackoffStrategy interface {
+ // Delay calculates how long the next backoff duration should be, given the prior calls to Delay
+ Delay() time.Duration
+ // Reset clears the internal state of the BackoffStrategy
+ Reset()
+}
+
+// Jitter implementations taken roughly from https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
+
+// Jitter must return a duration between min and max. Min must be lower than, or equal to, max.
+type Jitter func(duration, min, max time.Duration, rng *rand.Rand) time.Duration
+
+// FullJitter returns a random number, uniformly chosen from the range [min, boundedDur].
+// boundedDur is the duration bounded between min and max.
+func FullJitter(duration, min, max time.Duration, rng *rand.Rand) time.Duration {
+ if duration <= min {
+ return min
+ }
+
+ normalizedDur := boundedDuration(duration, min, max) - min
+
+ return boundedDuration(time.Duration(rng.Int63n(int64(normalizedDur)))+min, min, max)
+}
+
+// NoJitter returns the duration bounded between min and max
+func NoJitter(duration, min, max time.Duration, _ *rand.Rand) time.Duration {
+ return boundedDuration(duration, min, max)
+}
+
+type randomizedBackoff struct {
+ min time.Duration
+ max time.Duration
+ rng *rand.Rand
+}
+
+func (b *randomizedBackoff) BoundedDelay(duration time.Duration) time.Duration {
+ return boundedDuration(duration, b.min, b.max)
+}
+
+func boundedDuration(d, min, max time.Duration) time.Duration {
+ if d < min {
+ return min
+ }
+ if d > max {
+ return max
+ }
+ return d
+}
+
+type attemptBackoff struct {
+ attempt int
+ jitter Jitter
+ randomizedBackoff
+}
+
+func (b *attemptBackoff) Reset() {
+ b.attempt = 0
+}
+
+// NewFixedBackoff creates a BackoffFactory with a constant backoff duration
+func NewFixedBackoff(delay time.Duration) BackoffFactory {
+ return func() BackoffStrategy {
+ return &fixedBackoff{delay: delay}
+ }
+}
+
+type fixedBackoff struct {
+ delay time.Duration
+}
+
+func (b *fixedBackoff) Delay() time.Duration {
+ return b.delay
+}
+
+func (b *fixedBackoff) Reset() {}
+
+// NewPolynomialBackoff creates a BackoffFactory with backoff of the form c0*x^0, c1*x^1, ...cn*x^n where x is the attempt number
+// jitter is the function for adding randomness around the backoff
+// timeUnits are the units of time the polynomial is evaluated in
+// polyCoefs is the array of polynomial coefficients from [c0, c1, ... cn]
+func NewPolynomialBackoff(min, max time.Duration, jitter Jitter,
+ timeUnits time.Duration, polyCoefs []float64, rngSrc rand.Source) BackoffFactory {
+ rng := rand.New(&lockedSource{src: rngSrc})
+ return func() BackoffStrategy {
+ return &polynomialBackoff{
+ attemptBackoff: attemptBackoff{
+ randomizedBackoff: randomizedBackoff{
+ min: min,
+ max: max,
+ rng: rng,
+ },
+ jitter: jitter,
+ },
+ timeUnits: timeUnits,
+ poly: polyCoefs,
+ }
+ }
+}
+
+type polynomialBackoff struct {
+ attemptBackoff
+ timeUnits time.Duration
+ poly []float64
+}
+
+func (b *polynomialBackoff) Delay() time.Duration {
+ var polySum float64
+ switch len(b.poly) {
+ case 0:
+ return 0
+ case 1:
+ polySum = b.poly[0]
+ default:
+ polySum = b.poly[0]
+ exp := 1
+ attempt := b.attempt
+ b.attempt++
+
+ for _, c := range b.poly[1:] {
+ exp *= attempt
+ polySum += float64(exp) * c
+ }
+ }
+ return b.jitter(time.Duration(float64(b.timeUnits)*polySum), b.min, b.max, b.rng)
+}
+
+// NewExponentialBackoff creates a BackoffFactory with backoff of the form base^x + offset where x is the attempt number
+// jitter is the function for adding randomness around the backoff
+// timeUnits are the units of time the base^x is evaluated in
+func NewExponentialBackoff(min, max time.Duration, jitter Jitter,
+ timeUnits time.Duration, base float64, offset time.Duration, rngSrc rand.Source) BackoffFactory {
+ rng := rand.New(&lockedSource{src: rngSrc})
+ return func() BackoffStrategy {
+ return &exponentialBackoff{
+ attemptBackoff: attemptBackoff{
+ randomizedBackoff: randomizedBackoff{
+ min: min,
+ max: max,
+ rng: rng,
+ },
+ jitter: jitter,
+ },
+ timeUnits: timeUnits,
+ base: base,
+ offset: offset,
+ }
+ }
+}
+
+type exponentialBackoff struct {
+ attemptBackoff
+ timeUnits time.Duration
+ base float64
+ offset time.Duration
+}
+
+func (b *exponentialBackoff) Delay() time.Duration {
+ attempt := b.attempt
+ b.attempt++
+ return b.jitter(
+ time.Duration(math.Pow(b.base, float64(attempt))*float64(b.timeUnits))+b.offset, b.min, b.max, b.rng)
+}
+
+// NewExponentialDecorrelatedJitter creates a BackoffFactory with backoff of the roughly of the form base^x where x is the attempt number.
+// Delays start at the minimum duration and after each attempt delay = rand(min, delay * base), bounded by the max
+// See https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ for more information
+func NewExponentialDecorrelatedJitter(min, max time.Duration, base float64, rngSrc rand.Source) BackoffFactory {
+ rng := rand.New(&lockedSource{src: rngSrc})
+ return func() BackoffStrategy {
+ return &exponentialDecorrelatedJitter{
+ randomizedBackoff: randomizedBackoff{
+ min: min,
+ max: max,
+ rng: rng,
+ },
+ base: base,
+ }
+ }
+}
+
+type exponentialDecorrelatedJitter struct {
+ randomizedBackoff
+ base float64
+ lastDelay time.Duration
+}
+
+func (b *exponentialDecorrelatedJitter) Delay() time.Duration {
+ if b.lastDelay < b.min {
+ b.lastDelay = b.min
+ return b.lastDelay
+ }
+
+ nextMax := int64(float64(b.lastDelay) * b.base)
+ b.lastDelay = boundedDuration(time.Duration(b.rng.Int63n(nextMax-int64(b.min)))+b.min, b.min, b.max)
+ return b.lastDelay
+}
+
+func (b *exponentialDecorrelatedJitter) Reset() { b.lastDelay = 0 }
+
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
diff --git a/p2p/discovery/backoff/backoff_test.go b/p2p/discovery/backoff/backoff_test.go
new file mode 100644
index 0000000000..e31ea1c809
--- /dev/null
+++ b/p2p/discovery/backoff/backoff_test.go
@@ -0,0 +1,190 @@
+package backoff
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+)
+
+func checkDelay(bkf BackoffStrategy, expected time.Duration, t *testing.T) {
+ t.Helper()
+ if calculated := bkf.Delay(); calculated != expected {
+ t.Fatalf("expected %v, got %v", expected, calculated)
+ }
+}
+
+func TestFixedBackoff(t *testing.T) {
+ startDelay := time.Second
+ delay := startDelay
+
+ bkf := NewFixedBackoff(delay)
+ b1 := bkf()
+ b2 := bkf()
+
+ if b1.Delay() != startDelay || b2.Delay() != startDelay {
+ t.Fatal("incorrect delay time")
+ }
+
+ if b1.Delay() != startDelay {
+ t.Fatal("backoff is stateful")
+ }
+
+ if b1.Reset(); b1.Delay() != startDelay {
+ t.Fatalf("Reset does something")
+ }
+}
+
+func TestPolynomialBackoff(t *testing.T) {
+ bkf := NewPolynomialBackoff(time.Second, time.Second*33, NoJitter, time.Second, []float64{0.5, 2, 3}, rand.NewSource(0))
+ b1 := bkf()
+ b2 := bkf()
+
+ if b1.Delay() != time.Second || b2.Delay() != time.Second {
+ t.Fatal("incorrect delay time")
+ }
+
+ checkDelay(b1, time.Millisecond*5500, t)
+ checkDelay(b1, time.Millisecond*16500, t)
+ checkDelay(b1, time.Millisecond*33000, t)
+ checkDelay(b2, time.Millisecond*5500, t)
+
+ b1.Reset()
+ b1.Delay()
+ checkDelay(b1, time.Millisecond*5500, t)
+}
+
+func TestExponentialBackoff(t *testing.T) {
+ bkf := NewExponentialBackoff(time.Millisecond*650, time.Second*7, NoJitter, time.Second, 1.5, -time.Millisecond*400, rand.NewSource(0))
+ b1 := bkf()
+ b2 := bkf()
+
+ if b1.Delay() != time.Millisecond*650 || b2.Delay() != time.Millisecond*650 {
+ t.Fatal("incorrect delay time")
+ }
+
+ checkDelay(b1, time.Millisecond*1100, t)
+ checkDelay(b1, time.Millisecond*1850, t)
+ checkDelay(b1, time.Millisecond*2975, t)
+ checkDelay(b1, time.Microsecond*4662500, t)
+ checkDelay(b1, time.Second*7, t)
+ checkDelay(b2, time.Millisecond*1100, t)
+
+ b1.Reset()
+ b1.Delay()
+ checkDelay(b1, time.Millisecond*1100, t)
+}
+
+func minMaxJitterTest(jitter Jitter, t *testing.T) {
+ rng := rand.New(rand.NewSource(0))
+ if jitter(time.Nanosecond, time.Hour*10, time.Hour*20, rng) < time.Hour*10 {
+ t.Fatal("Min not working")
+ }
+ if jitter(time.Hour, time.Nanosecond, time.Nanosecond*10, rng) > time.Nanosecond*10 {
+ t.Fatal("Max not working")
+ }
+}
+
+func TestNoJitter(t *testing.T) {
+ minMaxJitterTest(NoJitter, t)
+ for i := 0; i < 10; i++ {
+ expected := time.Second * time.Duration(i)
+ if calculated := NoJitter(expected, time.Duration(0), time.Second*100, nil); calculated != expected {
+ t.Fatalf("expected %v, got %v", expected, calculated)
+ }
+ }
+}
+
+func TestFullJitter(t *testing.T) {
+ rng := rand.New(rand.NewSource(0))
+ minMaxJitterTest(FullJitter, t)
+ const numBuckets = 51
+ const multiplier = 10
+ const threshold = 20
+
+ histogram := make([]int, numBuckets)
+
+ for i := 0; i < (numBuckets-1)*multiplier; i++ {
+ started := time.Nanosecond * 50
+ calculated := FullJitter(started, 0, 100, rng)
+ histogram[calculated]++
+ }
+
+ for _, count := range histogram {
+ if count > threshold {
+ t.Fatal("jitter is not close to evenly spread")
+ }
+ }
+
+ if histogram[numBuckets-1] > 0 {
+ t.Fatal("jitter increased overall time")
+ }
+}
+
+func TestManyBackoffFactory(t *testing.T) {
+ rngSource := rand.NewSource(0)
+ concurrent := 10
+
+ t.Run("Exponential", func(_ *testing.T) {
+ testManyBackoffFactoryHelper(concurrent,
+ NewExponentialBackoff(time.Millisecond*650, time.Second*7, FullJitter, time.Second, 1.5, -time.Millisecond*400, rngSource),
+ )
+ })
+ t.Run("Polynomial", func(_ *testing.T) {
+ testManyBackoffFactoryHelper(concurrent,
+ NewPolynomialBackoff(time.Second, time.Second*33, NoJitter, time.Second, []float64{0.5, 2, 3}, rngSource),
+ )
+ })
+ t.Run("Fixed", func(_ *testing.T) {
+ testManyBackoffFactoryHelper(concurrent,
+ NewFixedBackoff(time.Second),
+ )
+ })
+}
+
+func testManyBackoffFactoryHelper(concurrent int, bkf BackoffFactory) {
+ backoffCh := make(chan BackoffStrategy, concurrent)
+
+ errGrp := errgroup.Group{}
+ for i := 0; i < concurrent; i++ {
+ errGrp.Go(func() (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("panic %v", r)
+ }
+ }()
+ backoffCh <- bkf()
+ return
+ })
+ }
+ if err := errGrp.Wait(); err != nil {
+ panic(err)
+ }
+ close(backoffCh)
+
+ errGrp = errgroup.Group{}
+ for b := range backoffCh {
+ backoff := b
+ errGrp.Go(func() (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("panic %v", r)
+ }
+ }()
+
+ for i := 0; i < 5; i++ {
+ for j := 0; j < 10; j++ {
+ backoff.Delay()
+ }
+ backoff.Reset()
+ }
+ return
+ })
+ }
+
+ if err := errGrp.Wait(); err != nil {
+ panic(err)
+ }
+}
diff --git a/p2p/discovery/backoff/backoffcache.go b/p2p/discovery/backoff/backoffcache.go
new file mode 100644
index 0000000000..c8f11802e7
--- /dev/null
+++ b/p2p/discovery/backoff/backoffcache.go
@@ -0,0 +1,329 @@
+package backoff
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/discovery"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// BackoffDiscovery is an implementation of discovery that caches peer data and attenuates repeated queries
+type BackoffDiscovery struct {
+ disc discovery.Discovery
+ stratFactory BackoffFactory
+ peerCache map[string]*backoffCache
+ peerCacheMux sync.RWMutex
+
+ parallelBufSz int
+ returnedBufSz int
+
+ clock clock
+}
+
+type BackoffDiscoveryOption func(*BackoffDiscovery) error
+
+func NewBackoffDiscovery(disc discovery.Discovery, stratFactory BackoffFactory, opts ...BackoffDiscoveryOption) (discovery.Discovery, error) {
+ b := &BackoffDiscovery{
+ disc: disc,
+ stratFactory: stratFactory,
+ peerCache: make(map[string]*backoffCache),
+
+ parallelBufSz: 32,
+ returnedBufSz: 32,
+
+ clock: realClock{},
+ }
+
+ for _, opt := range opts {
+ if err := opt(b); err != nil {
+ return nil, err
+ }
+ }
+
+ return b, nil
+}
+
+// WithBackoffDiscoverySimultaneousQueryBufferSize sets the buffer size for the channels between the main FindPeers query
+// for a given namespace and all simultaneous FindPeers queries for the namespace
+func WithBackoffDiscoverySimultaneousQueryBufferSize(size int) BackoffDiscoveryOption {
+ return func(b *BackoffDiscovery) error {
+ if size < 0 {
+ return fmt.Errorf("cannot set size to be smaller than 0")
+ }
+ b.parallelBufSz = size
+ return nil
+ }
+}
+
+// WithBackoffDiscoveryReturnedChannelSize sets the size of the buffer to be used during a FindPeer query.
+// Note: This does not apply if the query occurs during the backoff time
+func WithBackoffDiscoveryReturnedChannelSize(size int) BackoffDiscoveryOption {
+ return func(b *BackoffDiscovery) error {
+ if size < 0 {
+ return fmt.Errorf("cannot set size to be smaller than 0")
+ }
+ b.returnedBufSz = size
+ return nil
+ }
+}
+
+type clock interface {
+ Now() time.Time
+}
+
+type realClock struct{}
+
+func (c realClock) Now() time.Time {
+ return time.Now()
+}
+
+type backoffCache struct {
+ // strat is assigned on creation and not written to
+ strat BackoffStrategy
+
+ mux sync.Mutex // guards writes to all following fields
+ nextDiscover time.Time
+ prevPeers map[peer.ID]peer.AddrInfo
+ peers map[peer.ID]peer.AddrInfo
+ sendingChs map[chan peer.AddrInfo]int
+ ongoing bool
+
+ clock clock
+}
+
+func (d *BackoffDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
+ return d.disc.Advertise(ctx, ns, opts...)
+}
+
+func (d *BackoffDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
+ // Get options
+ var options discovery.Options
+ err := options.Apply(opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get cached peers
+ d.peerCacheMux.RLock()
+ c, ok := d.peerCache[ns]
+ d.peerCacheMux.RUnlock()
+
+ /*
+ Overall plan:
+ If it's time to look for peers, look for peers, then return them
+ If it's not time then return cache
+ If it's time to look for peers, but we have already started looking. Get up to speed with ongoing request
+ */
+
+ // Setup cache if we don't have one yet
+ if !ok {
+ pc := &backoffCache{
+ nextDiscover: time.Time{},
+ prevPeers: make(map[peer.ID]peer.AddrInfo),
+ peers: make(map[peer.ID]peer.AddrInfo),
+ sendingChs: make(map[chan peer.AddrInfo]int),
+ strat: d.stratFactory(),
+ clock: d.clock,
+ }
+
+ d.peerCacheMux.Lock()
+ c, ok = d.peerCache[ns]
+
+ if !ok {
+ d.peerCache[ns] = pc
+ c = pc
+ }
+
+ d.peerCacheMux.Unlock()
+ }
+
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ timeExpired := d.clock.Now().After(c.nextDiscover)
+
+ // If it's not yet time to search again and no searches are in progress then return cached peers
+ if !(timeExpired || c.ongoing) {
+ chLen := options.Limit
+
+ if chLen == 0 {
+ chLen = len(c.prevPeers)
+ } else if chLen > len(c.prevPeers) {
+ chLen = len(c.prevPeers)
+ }
+ pch := make(chan peer.AddrInfo, chLen)
+ for _, ai := range c.prevPeers {
+ select {
+ case pch <- ai:
+ default:
+ // skip if we have asked for a lower limit than the number of peers known
+ }
+ }
+ close(pch)
+ return pch, nil
+ }
+
+ // If a request is not already in progress setup a dispatcher channel for dispatching incoming peers
+ if !c.ongoing {
+ pch, err := d.disc.FindPeers(ctx, ns, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ c.ongoing = true
+ go findPeerDispatcher(ctx, c, pch)
+ }
+
+ // Setup receiver channel for receiving peers from ongoing requests
+ evtCh := make(chan peer.AddrInfo, d.parallelBufSz)
+ pch := make(chan peer.AddrInfo, d.returnedBufSz)
+ rcvPeers := make([]peer.AddrInfo, 0, 32)
+ for _, ai := range c.peers {
+ rcvPeers = append(rcvPeers, ai)
+ }
+ c.sendingChs[evtCh] = options.Limit
+
+ go findPeerReceiver(ctx, pch, evtCh, rcvPeers)
+
+ return pch, nil
+}
+
+func findPeerDispatcher(ctx context.Context, c *backoffCache, pch <-chan peer.AddrInfo) {
+ defer func() {
+ c.mux.Lock()
+
+ // If the peer addresses have changed reset the backoff
+ if checkUpdates(c.prevPeers, c.peers) {
+ c.strat.Reset()
+ c.prevPeers = c.peers
+ }
+ c.nextDiscover = c.clock.Now().Add(c.strat.Delay())
+
+ c.ongoing = false
+ c.peers = make(map[peer.ID]peer.AddrInfo)
+
+ for ch := range c.sendingChs {
+ close(ch)
+ }
+ c.sendingChs = make(map[chan peer.AddrInfo]int)
+ c.mux.Unlock()
+ }()
+
+ for {
+ select {
+ case ai, ok := <-pch:
+ if !ok {
+ return
+ }
+ c.mux.Lock()
+
+ // If we receive the same peer multiple times return the address union
+ var sendAi peer.AddrInfo
+ if prevAi, ok := c.peers[ai.ID]; ok {
+ if combinedAi := mergeAddrInfos(prevAi, ai); combinedAi != nil {
+ sendAi = *combinedAi
+ } else {
+ c.mux.Unlock()
+ continue
+ }
+ } else {
+ sendAi = ai
+ }
+
+ c.peers[ai.ID] = sendAi
+
+ for ch, rem := range c.sendingChs {
+ if rem > 0 {
+ ch <- sendAi
+ c.sendingChs[ch] = rem - 1
+ }
+ }
+
+ c.mux.Unlock()
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func findPeerReceiver(ctx context.Context, pch, evtCh chan peer.AddrInfo, rcvPeers []peer.AddrInfo) {
+ defer close(pch)
+
+ for {
+ select {
+ case ai, ok := <-evtCh:
+ if ok {
+ rcvPeers = append(rcvPeers, ai)
+
+ sentAll := true
+ sendPeers:
+ for i, p := range rcvPeers {
+ select {
+ case pch <- p:
+ default:
+ rcvPeers = rcvPeers[i:]
+ sentAll = false
+ break sendPeers
+ }
+ }
+ if sentAll {
+ rcvPeers = []peer.AddrInfo{}
+ }
+ } else {
+ for _, p := range rcvPeers {
+ select {
+ case pch <- p:
+ case <-ctx.Done():
+ return
+ }
+ }
+ return
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func mergeAddrInfos(prevAi, newAi peer.AddrInfo) *peer.AddrInfo {
+ seen := make(map[string]struct{}, len(prevAi.Addrs))
+ combinedAddrs := make([]ma.Multiaddr, 0, len(prevAi.Addrs))
+ addAddrs := func(addrs []ma.Multiaddr) {
+ for _, addr := range addrs {
+ if _, ok := seen[addr.String()]; ok {
+ continue
+ }
+ seen[addr.String()] = struct{}{}
+ combinedAddrs = append(combinedAddrs, addr)
+ }
+ }
+ addAddrs(prevAi.Addrs)
+ addAddrs(newAi.Addrs)
+
+ if len(combinedAddrs) > len(prevAi.Addrs) {
+ combinedAi := &peer.AddrInfo{ID: prevAi.ID, Addrs: combinedAddrs}
+ return combinedAi
+ }
+ return nil
+}
+
+func checkUpdates(orig, update map[peer.ID]peer.AddrInfo) bool {
+ if len(orig) != len(update) {
+ return true
+ }
+ for p, ai := range update {
+ if prevAi, ok := orig[p]; ok {
+ if combinedAi := mergeAddrInfos(prevAi, ai); combinedAi != nil {
+ return true
+ }
+ } else {
+ return true
+ }
+ }
+ return false
+}
diff --git a/p2p/discovery/backoff/backoffcache_test.go b/p2p/discovery/backoff/backoffcache_test.go
new file mode 100644
index 0000000000..6679415a7f
--- /dev/null
+++ b/p2p/discovery/backoff/backoffcache_test.go
@@ -0,0 +1,315 @@
+package backoff
+
+import (
+ "context"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/p2p/discovery/mocks"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/libp2p/go-libp2p/core/discovery"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ mockClock "github.com/benbjohnson/clock"
+)
+
+type delayedDiscovery struct {
+ disc discovery.Discovery
+ delay time.Duration
+ clock *mockClock.Mock
+}
+
+func (d *delayedDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
+ return d.disc.Advertise(ctx, ns, opts...)
+}
+
+func (d *delayedDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
+ dch, err := d.disc.FindPeers(ctx, ns, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ ch := make(chan peer.AddrInfo, 32)
+ doneCh := make(chan struct{})
+ go func() {
+ defer close(ch)
+ defer close(doneCh)
+ for ai := range dch {
+ ch <- ai
+ d.clock.Sleep(d.delay)
+ }
+ }()
+
+ // Tick the clock forward to advance the sleep above
+ go func() {
+ for {
+ select {
+ case <-doneCh:
+ return
+ default:
+ d.clock.Add(d.delay)
+ }
+ }
+ }()
+
+ return ch, nil
+}
+
+func assertNumPeers(t *testing.T, ctx context.Context, d discovery.Discovery, ns string, count int) {
+ t.Helper()
+ assertNumPeersWithLimit(t, ctx, d, ns, 10, count)
+}
+
+func assertNumPeersWithLimit(t *testing.T, ctx context.Context, d discovery.Discovery, ns string, limit int, count int) {
+ t.Helper()
+ peerCh, err := d.FindPeers(ctx, ns, discovery.Limit(limit))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ peerset := make(map[peer.ID]struct{})
+ for p := range peerCh {
+ peerset[p.ID] = struct{}{}
+ }
+
+ if len(peerset) != count {
+ t.Fatalf("Was supposed to find %d, found %d instead", count, len(peerset))
+ }
+}
+
+// withClock lets you override the default time.Now() call. Useful for tests.
+func withClock(c clock) BackoffDiscoveryOption {
+ return func(b *BackoffDiscovery) error {
+ b.clock = c
+ return nil
+ }
+}
+
+func TestBackoffDiscoverySingleBackoff(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ clock := mockClock.NewMock()
+ discServer := mocks.NewDiscoveryServer(clock)
+
+ h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h1.Close()
+ h2 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ d1 := mocks.NewDiscoveryClient(h1, discServer)
+ d2 := mocks.NewDiscoveryClient(h2, discServer)
+
+ bkf := NewExponentialBackoff(
+ time.Millisecond*100,
+ time.Second*10,
+ NoJitter,
+ time.Millisecond*100,
+ 2.5,
+ 0,
+ rand.NewSource(0),
+ )
+ dCache, err := NewBackoffDiscovery(d1, bkf, withClock(clock))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const ns = "test"
+
+ // try adding a peer then find it
+ d1.Advertise(ctx, ns, discovery.TTL(time.Hour))
+ // Advance clock by one step
+ clock.Add(1)
+ assertNumPeers(t, ctx, dCache, ns, 1)
+
+ // add a new peer and make sure it is still hidden by the caching layer
+ d2.Advertise(ctx, ns, discovery.TTL(time.Hour))
+ // Advance clock by one step
+ clock.Add(1)
+ assertNumPeers(t, ctx, dCache, ns, 1)
+
+ // wait for cache to expire and check for the new peer
+ clock.Add(time.Millisecond * 110)
+ assertNumPeers(t, ctx, dCache, ns, 2)
+}
+
+func TestBackoffDiscoveryMultipleBackoff(t *testing.T) {
+ clock := mockClock.NewMock()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ discServer := mocks.NewDiscoveryServer(clock)
+
+ h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h1.Close()
+ h2 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ d1 := mocks.NewDiscoveryClient(h1, discServer)
+ d2 := mocks.NewDiscoveryClient(h2, discServer)
+
+ // Startup delay is 0ms. First backoff after finding data is 100ms, second backoff is 250ms.
+ bkf := NewExponentialBackoff(
+ time.Millisecond*100,
+ time.Second*10,
+ NoJitter,
+ time.Millisecond*100,
+ 2.5,
+ 0,
+ rand.NewSource(0),
+ )
+ dCache, err := NewBackoffDiscovery(d1, bkf, withClock(clock))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const ns = "test"
+
+ // try adding a peer then find it
+ d1.Advertise(ctx, ns, discovery.TTL(time.Hour))
+ // Advance clock by one step
+ clock.Add(1)
+ assertNumPeers(t, ctx, dCache, ns, 1)
+
+ // wait a little to make sure the extra request doesn't modify the backoff
+ clock.Add(time.Millisecond * 50) // 50 < 100
+ assertNumPeers(t, ctx, dCache, ns, 1)
+
+ // wait for backoff to expire and check if we increase it
+ clock.Add(time.Millisecond * 60) // 50+60 > 100
+ assertNumPeers(t, ctx, dCache, ns, 1)
+
+ d2.Advertise(ctx, ns, discovery.TTL(time.Millisecond*400))
+
+ clock.Add(time.Millisecond * 150) // 150 < 250
+ assertNumPeers(t, ctx, dCache, ns, 1)
+
+ clock.Add(time.Millisecond * 150) // 150 + 150 > 250
+ assertNumPeers(t, ctx, dCache, ns, 2)
+
+ // check that the backoff has been reset
+ // also checks that we can decrease our peer count (i.e. not just growing a set)
+ clock.Add(time.Millisecond * 110) // 110 > 100, also 150+150+110>400
+ assertNumPeers(t, ctx, dCache, ns, 1)
+}
+
+func TestBackoffDiscoverySimultaneousQuery(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ clock := mockClock.NewMock()
+ discServer := mocks.NewDiscoveryServer(clock)
+
+ // Testing with n larger than most internal buffer sizes (32)
+ n := 40
+ advertisers := make([]discovery.Discovery, n)
+
+ for i := 0; i < n; i++ {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h.Close()
+ advertisers[i] = mocks.NewDiscoveryClient(h, discServer)
+ }
+
+ d1 := &delayedDiscovery{advertisers[0], time.Millisecond * 10, clock}
+
+ bkf := NewFixedBackoff(time.Millisecond * 200)
+ dCache, err := NewBackoffDiscovery(d1, bkf, withClock(clock))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const ns = "test"
+
+ for _, a := range advertisers {
+ if _, err := a.Advertise(ctx, ns, discovery.TTL(time.Hour)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ // Advance clock by one step
+ clock.Add(1)
+
+ ch1, err := dCache.FindPeers(ctx, ns)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ <-ch1
+ ch2, err := dCache.FindPeers(ctx, ns)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ szCh2 := 0
+ for ai := range ch2 {
+ _ = ai
+ szCh2++
+ }
+
+ szCh1 := 1
+ for range ch1 {
+ szCh1++
+ }
+
+ if szCh1 != n && szCh2 != n {
+ t.Fatalf("Channels returned %d, %d elements instead of %d", szCh1, szCh2, n)
+ }
+}
+
+func TestBackoffDiscoveryCacheCapacity(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ clock := mockClock.NewMock()
+ discServer := mocks.NewDiscoveryServer(clock)
+
+ // Testing with n larger than most internal buffer sizes (32)
+ n := 40
+ advertisers := make([]discovery.Discovery, n)
+
+ for i := 0; i < n; i++ {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h.Close()
+ advertisers[i] = mocks.NewDiscoveryClient(h, discServer)
+ }
+
+ h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ d1 := mocks.NewDiscoveryClient(h1, discServer)
+
+ discoveryInterval := time.Millisecond * 10
+
+ bkf := NewFixedBackoff(discoveryInterval)
+ dCache, err := NewBackoffDiscovery(d1, bkf, withClock(clock))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const ns = "test"
+
+ // add speers
+ for i := 0; i < n; i++ {
+ advertisers[i].Advertise(ctx, ns, discovery.TTL(time.Hour))
+ }
+ // Advance clock by one step
+ clock.Add(1)
+
+ // Request all peers, all will be present
+ assertNumPeersWithLimit(t, ctx, dCache, ns, n, n)
+
+ // Request peers with a lower limit
+ assertNumPeersWithLimit(t, ctx, dCache, ns, n-1, n-1)
+
+ // Wait a little time but don't allow cache to expire
+ clock.Add(discoveryInterval / 10)
+
+ // Request peers with a lower limit this time using cache
+ // Here we are testing that the cache logic does not block when there are more peers known than the limit requested
+ // See https://github.com/libp2p/go-libp2p-discovery/issues/67
+ assertNumPeersWithLimit(t, ctx, dCache, ns, n-1, n-1)
+
+ // Wait for next discovery so next request will bypass cache
+ clock.Add(time.Millisecond * 100)
+
+ // Ask for all peers again
+ assertNumPeersWithLimit(t, ctx, dCache, ns, n, n)
+}
diff --git a/p2p/discovery/backoff/backoffconnector.go b/p2p/discovery/backoff/backoffconnector.go
new file mode 100644
index 0000000000..4e4c34b19f
--- /dev/null
+++ b/p2p/discovery/backoff/backoffconnector.go
@@ -0,0 +1,94 @@
+package backoff
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ lru "github.com/hashicorp/golang-lru/v2"
+)
+
+// BackoffConnector is a utility to connect to peers, but only if we have not recently tried connecting to them already
+type BackoffConnector struct {
+ cache *lru.TwoQueueCache[peer.ID, *connCacheData]
+ host host.Host
+ connTryDur time.Duration
+ backoff BackoffFactory
+ mux sync.Mutex
+}
+
+// NewBackoffConnector creates a utility to connect to peers, but only if we have not recently tried connecting to them already
+// cacheSize is the size of a TwoQueueCache
+// connectionTryDuration is how long we attempt to connect to a peer before giving up
+// backoff describes the strategy used to decide how long to backoff after previously attempting to connect to a peer
+func NewBackoffConnector(h host.Host, cacheSize int, connectionTryDuration time.Duration, backoff BackoffFactory) (*BackoffConnector, error) {
+ cache, err := lru.New2Q[peer.ID, *connCacheData](cacheSize)
+ if err != nil {
+ return nil, err
+ }
+
+ return &BackoffConnector{
+ cache: cache,
+ host: h,
+ connTryDur: connectionTryDuration,
+ backoff: backoff,
+ }, nil
+}
+
+type connCacheData struct {
+ nextTry time.Time
+ strat BackoffStrategy
+}
+
+// Connect attempts to connect to the peers passed in by peerCh. Will not connect to peers if they are within the backoff period.
+// As Connect will attempt to dial peers as soon as it learns about them, the caller should try to keep the number,
+// and rate, of inbound peers manageable.
+func (c *BackoffConnector) Connect(ctx context.Context, peerCh <-chan peer.AddrInfo) {
+ for {
+ select {
+ case pi, ok := <-peerCh:
+ if !ok {
+ return
+ }
+
+ if pi.ID == c.host.ID() || pi.ID == "" {
+ continue
+ }
+
+ c.mux.Lock()
+ var cachedPeer *connCacheData
+ if tv, ok := c.cache.Get(pi.ID); ok {
+ now := time.Now()
+ if now.Before(tv.nextTry) {
+ c.mux.Unlock()
+ continue
+ }
+
+ tv.nextTry = now.Add(tv.strat.Delay())
+ } else {
+ cachedPeer = &connCacheData{strat: c.backoff()}
+ cachedPeer.nextTry = time.Now().Add(cachedPeer.strat.Delay())
+ c.cache.Add(pi.ID, cachedPeer)
+ }
+ c.mux.Unlock()
+
+ go func(pi peer.AddrInfo) {
+ ctx, cancel := context.WithTimeout(ctx, c.connTryDur)
+ defer cancel()
+
+ err := c.host.Connect(ctx, pi)
+ if err != nil {
+ log.Debug("Error connecting to pubsub peer", "peer", pi.ID, "err", err)
+ return
+ }
+ }(pi)
+
+ case <-ctx.Done():
+ log.Info("discovery: backoff connector context error", "err", ctx.Err())
+ return
+ }
+ }
+}
diff --git a/p2p/discovery/backoff/backoffconnector_test.go b/p2p/discovery/backoff/backoffconnector_test.go
new file mode 100644
index 0000000000..e95796c3b6
--- /dev/null
+++ b/p2p/discovery/backoff/backoffconnector_test.go
@@ -0,0 +1,96 @@
+package backoff
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type maxDialHost struct {
+ host.Host
+
+ mux sync.Mutex
+ timesDialed map[peer.ID]int
+ maxTimesToDial map[peer.ID]int
+}
+
+func (h *maxDialHost) Connect(ctx context.Context, ai peer.AddrInfo) error {
+ pid := ai.ID
+
+ h.mux.Lock()
+ defer h.mux.Unlock()
+ numDials := h.timesDialed[pid]
+ numDials += 1
+ h.timesDialed[pid] = numDials
+
+ if maxDials, ok := h.maxTimesToDial[pid]; ok && numDials > maxDials {
+ return fmt.Errorf("should not be dialing peer %s", pid.String())
+ }
+
+ return h.Host.Connect(ctx, ai)
+}
+
+func getNetHosts(t *testing.T, n int) []host.Host {
+ var out []host.Host
+
+ for i := 0; i < n; i++ {
+ netw := swarmt.GenSwarm(t)
+ h := bhost.NewBlankHost(netw)
+ t.Cleanup(func() { h.Close() })
+ out = append(out, h)
+ }
+
+ return out
+}
+
+func loadCh(peers []host.Host) <-chan peer.AddrInfo {
+ ch := make(chan peer.AddrInfo, len(peers))
+ for _, p := range peers {
+ ch <- p.Peerstore().PeerInfo(p.ID())
+ }
+ close(ch)
+ return ch
+}
+
+func TestBackoffConnector(t *testing.T) {
+ hosts := getNetHosts(t, 5)
+ primary := &maxDialHost{
+ Host: hosts[0],
+ timesDialed: make(map[peer.ID]int),
+ maxTimesToDial: map[peer.ID]int{
+ hosts[1].ID(): 1,
+ hosts[2].ID(): 2,
+ },
+ }
+
+ bc, err := NewBackoffConnector(primary, 10, time.Minute, NewFixedBackoff(250*time.Millisecond))
+ require.NoError(t, err)
+
+ bc.Connect(context.Background(), loadCh(hosts))
+ require.Eventually(t, func() bool { return len(primary.Network().Peers()) == len(hosts)-1 }, 3*time.Second, 10*time.Millisecond)
+
+ time.Sleep(100 * time.Millisecond) // give connection attempts time to complete (relevant when using multiple transports)
+ for _, c := range primary.Network().Conns() {
+ c.Close()
+ }
+ require.Eventually(t, func() bool { return len(primary.Network().Peers()) == 0 }, 3*time.Second, 10*time.Millisecond)
+
+ bc.Connect(context.Background(), loadCh(hosts))
+ require.Empty(t, primary.Network().Peers(), "shouldn't be connected to any peers")
+
+ time.Sleep(time.Millisecond * 500)
+ bc.Connect(context.Background(), loadCh(hosts))
+ require.Eventually(t, func() bool { return len(primary.Network().Peers()) == len(hosts)-2 }, 3*time.Second, 10*time.Millisecond)
+ // make sure we actually don't connect to host 1 any more
+ time.Sleep(100 * time.Millisecond)
+ require.Len(t, primary.Network().Peers(), len(hosts)-2, "wrong number of connections")
+}
diff --git a/p2p/discovery/mdns.go b/p2p/discovery/mdns.go
deleted file mode 100644
index a7ec72ab12..0000000000
--- a/p2p/discovery/mdns.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package discovery
-
-import (
- "context"
- "errors"
- "io"
- "io/ioutil"
- golog "log"
- "net"
- "sync"
- "time"
-
- logging "github.com/ipfs/go-log"
- "github.com/libp2p/go-libp2p-host"
- "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- ma "github.com/multiformats/go-multiaddr"
- manet "github.com/multiformats/go-multiaddr-net"
- "github.com/whyrusleeping/mdns"
-)
-
-var log = logging.Logger("mdns")
-
-const ServiceTag = "_ipfs-discovery._udp"
-
-type Service interface {
- io.Closer
- RegisterNotifee(Notifee)
- UnregisterNotifee(Notifee)
-}
-
-type Notifee interface {
- HandlePeerFound(pstore.PeerInfo)
-}
-
-type mdnsService struct {
- server *mdns.Server
- service *mdns.MDNSService
- host host.Host
- tag string
-
- lk sync.Mutex
- notifees []Notifee
- interval time.Duration
-}
-
-func getDialableListenAddrs(ph host.Host) ([]*net.TCPAddr, error) {
- var out []*net.TCPAddr
- for _, addr := range ph.Addrs() {
- na, err := manet.ToNetAddr(addr)
- if err != nil {
- continue
- }
- tcp, ok := na.(*net.TCPAddr)
- if ok {
- out = append(out, tcp)
- }
- }
- if len(out) == 0 {
- return nil, errors.New("failed to find good external addr from peerhost")
- }
- return out, nil
-}
-
-func NewMdnsService(ctx context.Context, peerhost host.Host, interval time.Duration, serviceTag string) (Service, error) {
-
- // TODO: dont let mdns use logging...
- golog.SetOutput(ioutil.Discard)
-
- var ipaddrs []net.IP
- port := 4001
-
- addrs, err := getDialableListenAddrs(peerhost)
- if err != nil {
- log.Warning(err)
- } else {
- port = addrs[0].Port
- for _, a := range addrs {
- ipaddrs = append(ipaddrs, a.IP)
- }
- }
-
- myid := peerhost.ID().Pretty()
-
- info := []string{myid}
- if serviceTag == "" {
- serviceTag = ServiceTag
- }
- service, err := mdns.NewMDNSService(myid, serviceTag, "", "", port, ipaddrs, info)
- if err != nil {
- return nil, err
- }
-
- // Create the mDNS server, defer shutdown
- server, err := mdns.NewServer(&mdns.Config{Zone: service})
- if err != nil {
- return nil, err
- }
-
- s := &mdnsService{
- server: server,
- service: service,
- host: peerhost,
- interval: interval,
- tag: serviceTag,
- }
-
- go s.pollForEntries(ctx)
-
- return s, nil
-}
-
-func (m *mdnsService) Close() error {
- return m.server.Shutdown()
-}
-
-func (m *mdnsService) pollForEntries(ctx context.Context) {
-
- ticker := time.NewTicker(m.interval)
- for {
- //execute mdns query right away at method call and then with every tick
- entriesCh := make(chan *mdns.ServiceEntry, 16)
- go func() {
- for entry := range entriesCh {
- m.handleEntry(entry)
- }
- }()
-
- log.Debug("starting mdns query")
- qp := &mdns.QueryParam{
- Domain: "local",
- Entries: entriesCh,
- Service: m.tag,
- Timeout: time.Second * 5,
- }
-
- err := mdns.Query(qp)
- if err != nil {
- log.Error("mdns lookup error: ", err)
- }
- close(entriesCh)
- log.Debug("mdns query complete")
-
- select {
- case <-ticker.C:
- continue
- case <-ctx.Done():
- log.Debug("mdns service halting")
- return
- }
- }
-}
-
-func (m *mdnsService) handleEntry(e *mdns.ServiceEntry) {
- log.Debugf("Handling MDNS entry: %s:%d %s", e.AddrV4, e.Port, e.Info)
- mpeer, err := peer.IDB58Decode(e.Info)
- if err != nil {
- log.Warning("Error parsing peer ID from mdns entry: ", err)
- return
- }
-
- if mpeer == m.host.ID() {
- log.Debug("got our own mdns entry, skipping")
- return
- }
-
- maddr, err := manet.FromNetAddr(&net.TCPAddr{
- IP: e.AddrV4,
- Port: e.Port,
- })
- if err != nil {
- log.Warning("Error parsing multiaddr from mdns entry: ", err)
- return
- }
-
- pi := pstore.PeerInfo{
- ID: mpeer,
- Addrs: []ma.Multiaddr{maddr},
- }
-
- m.lk.Lock()
- for _, n := range m.notifees {
- go n.HandlePeerFound(pi)
- }
- m.lk.Unlock()
-}
-
-func (m *mdnsService) RegisterNotifee(n Notifee) {
- m.lk.Lock()
- m.notifees = append(m.notifees, n)
- m.lk.Unlock()
-}
-
-func (m *mdnsService) UnregisterNotifee(n Notifee) {
- m.lk.Lock()
- found := -1
- for i, notif := range m.notifees {
- if notif == n {
- found = i
- break
- }
- }
- if found != -1 {
- m.notifees = append(m.notifees[:found], m.notifees[found+1:]...)
- }
- m.lk.Unlock()
-}
diff --git a/p2p/discovery/mdns/mdns.go b/p2p/discovery/mdns/mdns.go
new file mode 100644
index 0000000000..3a5e7a0ed0
--- /dev/null
+++ b/p2p/discovery/mdns/mdns.go
@@ -0,0 +1,202 @@
+package mdns
+
+import (
+ "context"
+ "errors"
+ "io"
+ "math/rand"
+ "strings"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/libp2p/zeroconf/v2"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+const (
+ ServiceName = "_p2p._udp"
+ mdnsDomain = "local"
+ dnsaddrPrefix = "dnsaddr="
+)
+
+var log = logging.Logger("mdns")
+
+type Service interface {
+ Start() error
+ io.Closer
+}
+
+type Notifee interface {
+ HandlePeerFound(peer.AddrInfo)
+}
+
+type mdnsService struct {
+ host host.Host
+ serviceName string
+ peerName string
+
+ // The context is canceled when Close() is called.
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ resolverWG sync.WaitGroup
+ server *zeroconf.Server
+
+ notifee Notifee
+}
+
+func NewMdnsService(host host.Host, serviceName string, notifee Notifee) *mdnsService {
+ if serviceName == "" {
+ serviceName = ServiceName
+ }
+ s := &mdnsService{
+ host: host,
+ serviceName: serviceName,
+ peerName: randomString(32 + rand.Intn(32)), // generate a random string between 32 and 63 characters long
+ notifee: notifee,
+ }
+ s.ctx, s.ctxCancel = context.WithCancel(context.Background())
+ return s
+}
+
+func (s *mdnsService) Start() error {
+ if err := s.startServer(); err != nil {
+ return err
+ }
+ s.startResolver(s.ctx)
+ return nil
+}
+
+func (s *mdnsService) Close() error {
+ s.ctxCancel()
+ if s.server != nil {
+ s.server.Shutdown()
+ }
+ s.resolverWG.Wait()
+ return nil
+}
+
+// We don't really care about the IP addresses, but the spec (and various routers / firewalls) require us
+// to send A and AAAA records.
+func (s *mdnsService) getIPs(addrs []ma.Multiaddr) ([]string, error) {
+ var ip4, ip6 string
+ for _, addr := range addrs {
+ first, _ := ma.SplitFirst(addr)
+ if first == nil {
+ continue
+ }
+ if ip4 == "" && first.Protocol().Code == ma.P_IP4 {
+ ip4 = first.Value()
+ } else if ip6 == "" && first.Protocol().Code == ma.P_IP6 {
+ ip6 = first.Value()
+ }
+ }
+ ips := make([]string, 0, 2)
+ if ip4 != "" {
+ ips = append(ips, ip4)
+ }
+ if ip6 != "" {
+ ips = append(ips, ip6)
+ }
+ if len(ips) == 0 {
+ return nil, errors.New("didn't find any IP addresses")
+ }
+ return ips, nil
+}
+
+func (s *mdnsService) startServer() error {
+ interfaceAddrs, err := s.host.Network().InterfaceListenAddresses()
+ if err != nil {
+ return err
+ }
+ addrs, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{
+ ID: s.host.ID(),
+ Addrs: interfaceAddrs,
+ })
+ if err != nil {
+ return err
+ }
+ var txts []string
+ for _, addr := range addrs {
+ if manet.IsThinWaist(addr) { // don't announce circuit addresses
+ txts = append(txts, dnsaddrPrefix+addr.String())
+ }
+ }
+
+ ips, err := s.getIPs(addrs)
+ if err != nil {
+ return err
+ }
+
+ server, err := zeroconf.RegisterProxy(
+ s.peerName,
+ s.serviceName,
+ mdnsDomain,
+ 4001, // we have to pass in a port number here, but libp2p only uses the TXT records
+ s.peerName,
+ ips,
+ txts,
+ nil,
+ )
+ if err != nil {
+ return err
+ }
+ s.server = server
+ return nil
+}
+
+func (s *mdnsService) startResolver(ctx context.Context) {
+ s.resolverWG.Add(2)
+ entryChan := make(chan *zeroconf.ServiceEntry, 1000)
+ go func() {
+ defer s.resolverWG.Done()
+ for entry := range entryChan {
+ // We only care about the TXT records.
+ // Ignore A, AAAA and PTR.
+ addrs := make([]ma.Multiaddr, 0, len(entry.Text)) // assume that all TXT records are dnsaddrs
+ for _, s := range entry.Text {
+ if !strings.HasPrefix(s, dnsaddrPrefix) {
+ log.Debug("missing dnsaddr prefix")
+ continue
+ }
+ addr, err := ma.NewMultiaddr(s[len(dnsaddrPrefix):])
+ if err != nil {
+ log.Debug("failed to parse multiaddr", "err", err)
+ continue
+ }
+ addrs = append(addrs, addr)
+ }
+ infos, err := peer.AddrInfosFromP2pAddrs(addrs...)
+ if err != nil {
+ log.Debug("failed to get peer info", "err", err)
+ continue
+ }
+ for _, info := range infos {
+ if info.ID == s.host.ID() {
+ continue
+ }
+ go s.notifee.HandlePeerFound(info)
+ }
+ }
+ }()
+ go func() {
+ defer s.resolverWG.Done()
+ if err := zeroconf.Browse(ctx, s.serviceName, mdnsDomain, entryChan); err != nil {
+ log.Debug("zeroconf browsing failed", "err", err)
+ }
+ }()
+}
+
+func randomString(l int) string {
+ const alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
+ s := make([]byte, 0, l)
+ for i := 0; i < l; i++ {
+ s = append(s, alphabet[rand.Intn(len(alphabet))])
+ }
+ return string(s)
+}
diff --git a/p2p/discovery/mdns/mdns_test.go b/p2p/discovery/mdns/mdns_test.go
new file mode 100644
index 0000000000..a2462790d3
--- /dev/null
+++ b/p2p/discovery/mdns/mdns_test.go
@@ -0,0 +1,98 @@
+package mdns
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func setupMDNS(t *testing.T, notifee Notifee) peer.ID {
+ t.Helper()
+ host, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ s := NewMdnsService(host, "", notifee)
+ require.NoError(t, s.Start())
+ t.Cleanup(func() {
+ host.Close()
+ s.Close()
+ })
+ return host.ID()
+}
+
+type notif struct {
+ mutex sync.Mutex
+ infos []peer.AddrInfo
+}
+
+var _ Notifee = ¬if{}
+
+func (n *notif) HandlePeerFound(info peer.AddrInfo) {
+ n.mutex.Lock()
+ n.infos = append(n.infos, info)
+ n.mutex.Unlock()
+}
+
+func (n *notif) GetPeers() []peer.AddrInfo {
+ n.mutex.Lock()
+ defer n.mutex.Unlock()
+ infos := make([]peer.AddrInfo, 0, len(n.infos))
+ infos = append(infos, n.infos...)
+ return infos
+}
+
+func TestOtherDiscovery(t *testing.T) {
+ const n = 4
+
+ notifs := make([]*notif, n)
+ hostIDs := make([]peer.ID, n)
+ for i := 0; i < n; i++ {
+ notif := ¬if{}
+ notifs[i] = notif
+ hostIDs[i] = setupMDNS(t, notif)
+ }
+
+ containsAllHostIDs := func(ids []peer.ID, currentHostID peer.ID) bool {
+ for _, id := range hostIDs {
+ var found bool
+ if currentHostID == id {
+ continue
+ }
+ for _, i := range ids {
+ if id == i {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+ return true
+ }
+
+ assert.Eventuallyf(
+ t,
+ func() bool {
+ for i, notif := range notifs {
+ infos := notif.GetPeers()
+ ids := make([]peer.ID, 0, len(infos))
+ for _, info := range infos {
+ ids = append(ids, info.ID)
+ }
+ if !containsAllHostIDs(ids, hostIDs[i]) {
+ return false
+ }
+ }
+ return true
+ },
+ 25*time.Second,
+ 5*time.Millisecond,
+ "expected peers to find each other",
+ )
+}
diff --git a/p2p/discovery/mdns_test.go b/p2p/discovery/mdns_test.go
deleted file mode 100644
index 579fc5bf69..0000000000
--- a/p2p/discovery/mdns_test.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package discovery
-
-import (
- "context"
- "testing"
- "time"
-
- bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
-
- host "github.com/libp2p/go-libp2p-host"
- netutil "github.com/libp2p/go-libp2p-netutil"
-
- pstore "github.com/libp2p/go-libp2p-peerstore"
-)
-
-type DiscoveryNotifee struct {
- h host.Host
-}
-
-func (n *DiscoveryNotifee) HandlePeerFound(pi pstore.PeerInfo) {
- n.h.Connect(context.Background(), pi)
-}
-
-func TestMdnsDiscovery(t *testing.T) {
- //TODO: re-enable when the new lib will get integrated
- t.Skip("TestMdnsDiscovery fails randomly with current lib")
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- a := bhost.New(netutil.GenSwarmNetwork(t, ctx))
- b := bhost.New(netutil.GenSwarmNetwork(t, ctx))
-
- sa, err := NewMdnsService(ctx, a, time.Second, "someTag")
- if err != nil {
- t.Fatal(err)
- }
-
- sb, err := NewMdnsService(ctx, b, time.Second, "someTag")
- if err != nil {
- t.Fatal(err)
- }
-
- _ = sb
-
- n := &DiscoveryNotifee{a}
-
- sa.RegisterNotifee(n)
-
- time.Sleep(time.Second * 2)
-
- err = a.Connect(ctx, pstore.PeerInfo{ID: b.ID()})
- if err != nil {
- t.Fatal(err)
- }
-}
diff --git a/p2p/discovery/mocks/mocks.go b/p2p/discovery/mocks/mocks.go
new file mode 100644
index 0000000000..3014538a75
--- /dev/null
+++ b/p2p/discovery/mocks/mocks.go
@@ -0,0 +1,114 @@
+package mocks
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/discovery"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+type clock interface {
+ Now() time.Time
+}
+
+type MockDiscoveryServer struct {
+ mx sync.Mutex
+ db map[string]map[peer.ID]*discoveryRegistration
+ clock clock
+}
+
+type discoveryRegistration struct {
+ info peer.AddrInfo
+ expiration time.Time
+}
+
+func NewDiscoveryServer(clock clock) *MockDiscoveryServer {
+ return &MockDiscoveryServer{
+ db: make(map[string]map[peer.ID]*discoveryRegistration),
+ clock: clock,
+ }
+}
+
+func (s *MockDiscoveryServer) Advertise(ns string, info peer.AddrInfo, ttl time.Duration) (time.Duration, error) {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ peers, ok := s.db[ns]
+ if !ok {
+ peers = make(map[peer.ID]*discoveryRegistration)
+ s.db[ns] = peers
+ }
+ peers[info.ID] = &discoveryRegistration{info, s.clock.Now().Add(ttl)}
+ return ttl, nil
+}
+
+func (s *MockDiscoveryServer) FindPeers(ns string, limit int) (<-chan peer.AddrInfo, error) {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ peers, ok := s.db[ns]
+ if !ok || len(peers) == 0 {
+ emptyCh := make(chan peer.AddrInfo)
+ close(emptyCh)
+ return emptyCh, nil
+ }
+
+ count := len(peers)
+ if limit != 0 && count > limit {
+ count = limit
+ }
+
+ iterTime := s.clock.Now()
+ ch := make(chan peer.AddrInfo, count)
+ numSent := 0
+ for p, reg := range peers {
+ if numSent == count {
+ break
+ }
+ if iterTime.After(reg.expiration) {
+ delete(peers, p)
+ continue
+ }
+
+ numSent++
+ ch <- reg.info
+ }
+ close(ch)
+
+ return ch, nil
+}
+
+type MockDiscoveryClient struct {
+ host host.Host
+ server *MockDiscoveryServer
+}
+
+func NewDiscoveryClient(h host.Host, server *MockDiscoveryServer) *MockDiscoveryClient {
+ return &MockDiscoveryClient{
+ host: h,
+ server: server,
+ }
+}
+
+func (d *MockDiscoveryClient) Advertise(_ context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
+ var options discovery.Options
+ err := options.Apply(opts...)
+ if err != nil {
+ return 0, err
+ }
+
+ return d.server.Advertise(ns, *host.InfoFromHost(d.host), options.Ttl)
+}
+
+func (d *MockDiscoveryClient) FindPeers(_ context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
+ var options discovery.Options
+ err := options.Apply(opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return d.server.FindPeers(ns, options.Limit)
+}
diff --git a/p2p/discovery/routing/routing.go b/p2p/discovery/routing/routing.go
new file mode 100644
index 0000000000..e550bc304f
--- /dev/null
+++ b/p2p/discovery/routing/routing.go
@@ -0,0 +1,109 @@
+package routing
+
+import (
+ "context"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/discovery"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/routing"
+
+ "github.com/ipfs/go-cid"
+ mh "github.com/multiformats/go-multihash"
+)
+
+// RoutingDiscovery is an implementation of discovery using ContentRouting.
+// Namespaces are translated to Cids using the SHA256 hash.
+type RoutingDiscovery struct {
+ routing.ContentRouting
+}
+
+func NewRoutingDiscovery(router routing.ContentRouting) *RoutingDiscovery {
+ return &RoutingDiscovery{router}
+}
+
+func (d *RoutingDiscovery) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
+ var options discovery.Options
+ err := options.Apply(opts...)
+ if err != nil {
+ return 0, err
+ }
+
+ ttl := options.Ttl
+ if ttl == 0 || ttl > 3*time.Hour {
+ // the DHT provider record validity is 24hrs, but it is recommended to republish at least every 6hrs
+ // we go one step further and republish every 3hrs
+ ttl = 3 * time.Hour
+ }
+
+ cid, err := nsToCid(ns)
+ if err != nil {
+ return 0, err
+ }
+
+ // this context requires a timeout; it determines how long the DHT looks for
+ // closest peers to the key/CID before it goes on to provide the record to them.
+ // Not setting a timeout here will make the DHT wander forever.
+ pctx, cancel := context.WithTimeout(ctx, 60*time.Second)
+ defer cancel()
+
+ err = d.Provide(pctx, cid, true)
+ if err != nil {
+ return 0, err
+ }
+
+ return ttl, nil
+}
+
+func (d *RoutingDiscovery) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
+ options := discovery.Options{
+ Limit: 100, // default limit if not specified in options
+ }
+ err := options.Apply(opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ cid, err := nsToCid(ns)
+ if err != nil {
+ return nil, err
+ }
+
+ return d.FindProvidersAsync(ctx, cid, options.Limit), nil
+}
+
+func nsToCid(ns string) (cid.Cid, error) {
+ h, err := mh.Sum([]byte(ns), mh.SHA2_256, -1)
+ if err != nil {
+ return cid.Undef, err
+ }
+
+ return cid.NewCidV1(cid.Raw, h), nil
+}
+
+func NewDiscoveryRouting(disc discovery.Discovery, opts ...discovery.Option) *DiscoveryRouting {
+ return &DiscoveryRouting{disc, opts}
+}
+
+type DiscoveryRouting struct {
+ discovery.Discovery
+ opts []discovery.Option
+}
+
+func (r *DiscoveryRouting) Provide(ctx context.Context, c cid.Cid, bcast bool) error {
+ if !bcast {
+ return nil
+ }
+
+ _, err := r.Advertise(ctx, cidToNs(c), r.opts...)
+ return err
+}
+
+func (r *DiscoveryRouting) FindProvidersAsync(ctx context.Context, c cid.Cid, limit int) <-chan peer.AddrInfo {
+ ch, _ := r.FindPeers(ctx, cidToNs(c), append([]discovery.Option{discovery.Limit(limit)}, r.opts...)...)
+ return ch
+}
+
+func cidToNs(c cid.Cid) string {
+ return "/provider/" + c.String()
+}
diff --git a/p2p/discovery/routing/routing_test.go b/p2p/discovery/routing/routing_test.go
new file mode 100644
index 0000000000..d6dce1dea3
--- /dev/null
+++ b/p2p/discovery/routing/routing_test.go
@@ -0,0 +1,151 @@
+package routing
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p/p2p/discovery/mocks"
+ "github.com/libp2p/go-libp2p/p2p/discovery/util"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/ipfs/go-cid"
+ "github.com/libp2p/go-libp2p/core/discovery"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+type mockRoutingTable struct {
+ mx sync.Mutex
+ providers map[string]map[peer.ID]peer.AddrInfo
+}
+
+type mockRouting struct {
+ h host.Host
+ tab *mockRoutingTable
+}
+
+func NewMockRoutingTable() *mockRoutingTable {
+ return &mockRoutingTable{providers: make(map[string]map[peer.ID]peer.AddrInfo)}
+}
+
+func NewMockRouting(h host.Host, tab *mockRoutingTable) *mockRouting {
+ return &mockRouting{h: h, tab: tab}
+}
+
+func (m *mockRouting) Provide(_ context.Context, cid cid.Cid, _ bool) error {
+ m.tab.mx.Lock()
+ defer m.tab.mx.Unlock()
+
+ pmap, ok := m.tab.providers[cid.String()]
+ if !ok {
+ pmap = make(map[peer.ID]peer.AddrInfo)
+ m.tab.providers[cid.String()] = pmap
+ }
+
+ pmap[m.h.ID()] = peer.AddrInfo{ID: m.h.ID(), Addrs: m.h.Addrs()}
+
+ return nil
+}
+
+func (m *mockRouting) FindProvidersAsync(ctx context.Context, cid cid.Cid, _ int) <-chan peer.AddrInfo {
+ ch := make(chan peer.AddrInfo)
+ go func() {
+ defer close(ch)
+ m.tab.mx.Lock()
+ defer m.tab.mx.Unlock()
+
+ pmap, ok := m.tab.providers[cid.String()]
+ if !ok {
+ return
+ }
+
+ for _, pi := range pmap {
+ select {
+ case ch <- pi:
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ return ch
+}
+
+func TestRoutingDiscovery(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+
+ mtab := NewMockRoutingTable()
+ mr1 := NewMockRouting(h1, mtab)
+ mr2 := NewMockRouting(h2, mtab)
+
+ d1 := NewRoutingDiscovery(mr1)
+ d2 := NewRoutingDiscovery(mr2)
+
+ _, err := d1.Advertise(ctx, "/test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pis, err := util.FindPeers(ctx, d2, "/test", discovery.Limit(20))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(pis) != 1 {
+ t.Fatalf("Expected 1 peer, got %d", len(pis))
+ }
+
+ pi := pis[0]
+ if pi.ID != h1.ID() {
+ t.Fatalf("Unexpected peer: %s", pi.ID)
+ }
+}
+
+func TestDiscoveryRouting(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := bhost.NewBlankHost(swarmt.GenSwarm(t))
+
+ clock := clock.NewMock()
+ dserver := mocks.NewDiscoveryServer(clock)
+ d1 := mocks.NewDiscoveryClient(h1, dserver)
+ d2 := mocks.NewDiscoveryClient(h2, dserver)
+
+ r1 := NewDiscoveryRouting(d1, discovery.TTL(time.Hour))
+ r2 := NewDiscoveryRouting(d2, discovery.TTL(time.Hour))
+
+ c, err := nsToCid("/test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := r1.Provide(ctx, c, true); err != nil {
+ t.Fatal(err)
+ }
+
+ pch := r2.FindProvidersAsync(ctx, c, 20)
+
+ allAIs := make([]peer.AddrInfo, 0, len(pch))
+ for ai := range pch {
+ allAIs = append(allAIs, ai)
+ }
+
+ if len(allAIs) != 1 {
+ t.Fatalf("Expected 1 peer, got %d", len(allAIs))
+ }
+
+ ai := allAIs[0]
+ if ai.ID != h1.ID() {
+ t.Fatalf("Unexpected peer: %s", ai.ID)
+ }
+}
diff --git a/p2p/discovery/util/util.go b/p2p/discovery/util/util.go
new file mode 100644
index 0000000000..19181df618
--- /dev/null
+++ b/p2p/discovery/util/util.go
@@ -0,0 +1,58 @@
+package util
+
+import (
+ "context"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/discovery"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("discovery-util")
+
+// FindPeers is a utility function that synchronously collects peers from a Discoverer.
+func FindPeers(ctx context.Context, d discovery.Discoverer, ns string, opts ...discovery.Option) ([]peer.AddrInfo, error) {
+
+ ch, err := d.FindPeers(ctx, ns, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]peer.AddrInfo, 0, len(ch))
+ for pi := range ch {
+ res = append(res, pi)
+ }
+
+ return res, nil
+}
+
+// Advertise is a utility function that persistently advertises a service through an Advertiser.
+func Advertise(ctx context.Context, a discovery.Advertiser, ns string, opts ...discovery.Option) {
+ go func() {
+ for {
+ ttl, err := a.Advertise(ctx, ns, opts...)
+ if err != nil {
+ log.Debug("Error advertising", "namespace", ns, "err", err)
+ if ctx.Err() != nil {
+ return
+ }
+
+ select {
+ case <-time.After(2 * time.Minute):
+ continue
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ wait := 7 * ttl / 8
+ select {
+ case <-time.After(wait):
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+}
diff --git a/p2p/host/autonat/autonat.go b/p2p/host/autonat/autonat.go
new file mode 100644
index 0000000000..f9711f76c4
--- /dev/null
+++ b/p2p/host/autonat/autonat.go
@@ -0,0 +1,455 @@
+package autonat
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "slices"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var log = logging.Logger("autonat")
+
+const maxConfidence = 3
+
+// AmbientAutoNAT is the implementation of ambient NAT autodiscovery
+type AmbientAutoNAT struct {
+ host host.Host
+
+ *config
+
+ ctx context.Context
+ ctxCancel context.CancelFunc // is closed when Close is called
+ backgroundRunning chan struct{} // is closed when the background go routine exits
+
+ inboundConn chan network.Conn
+ dialResponses chan error
+ // Used when testing the autonat service
+ observations chan network.Reachability
+ // status is an autoNATResult reflecting current status.
+ status atomic.Pointer[network.Reachability]
+ // Reflects the confidence on of the NATStatus being private, as a single
+ // dialback may fail for reasons unrelated to NAT.
+ // If it is <3, then multiple autoNAT peers may be contacted for dialback
+ // If only a single autoNAT peer is known, then the confidence increases
+ // for each failure until it reaches 3.
+ confidence int
+ lastInbound time.Time
+ lastProbe time.Time
+ recentProbes map[peer.ID]time.Time
+ pendingProbes int
+ ourAddrs map[string]struct{}
+
+ service *autoNATService
+
+ emitReachabilityChanged event.Emitter
+ subscriber event.Subscription
+}
+
+// StaticAutoNAT is a simple AutoNAT implementation when a single NAT status is desired.
+type StaticAutoNAT struct {
+ host host.Host
+ reachability network.Reachability
+ service *autoNATService
+}
+
+// New creates a new NAT autodiscovery system attached to a host
+func New(h host.Host, options ...Option) (AutoNAT, error) {
+ var err error
+ conf := new(config)
+ conf.host = h
+ conf.dialPolicy.host = h
+
+ if err = defaults(conf); err != nil {
+ return nil, err
+ }
+ if conf.addressFunc == nil {
+ if aa, ok := h.(interface{ AllAddrs() []ma.Multiaddr }); ok {
+ conf.addressFunc = aa.AllAddrs
+ } else {
+ conf.addressFunc = h.Addrs
+ }
+ }
+
+ for _, o := range options {
+ if err = o(conf); err != nil {
+ return nil, err
+ }
+ }
+ emitReachabilityChanged, _ := h.EventBus().Emitter(new(event.EvtLocalReachabilityChanged), eventbus.Stateful)
+
+ var service *autoNATService
+ if (!conf.forceReachability || conf.reachability == network.ReachabilityPublic) && conf.dialer != nil {
+ service, err = newAutoNATService(conf)
+ if err != nil {
+ return nil, err
+ }
+ service.Enable()
+ }
+
+ if conf.forceReachability {
+ emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: conf.reachability})
+
+ return &StaticAutoNAT{
+ host: h,
+ reachability: conf.reachability,
+ service: service,
+ }, nil
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ as := &AmbientAutoNAT{
+ ctx: ctx,
+ ctxCancel: cancel,
+ backgroundRunning: make(chan struct{}),
+ host: h,
+ config: conf,
+ inboundConn: make(chan network.Conn, 5),
+ dialResponses: make(chan error, 1),
+ observations: make(chan network.Reachability, 1),
+
+ emitReachabilityChanged: emitReachabilityChanged,
+ service: service,
+ recentProbes: make(map[peer.ID]time.Time),
+ ourAddrs: make(map[string]struct{}),
+ }
+ reachability := network.ReachabilityUnknown
+ as.status.Store(&reachability)
+
+ subscriber, err := as.host.EventBus().Subscribe(
+ []any{new(event.EvtLocalAddressesUpdated), new(event.EvtPeerIdentificationCompleted)},
+ eventbus.Name("autonat"),
+ )
+ if err != nil {
+ return nil, err
+ }
+ as.subscriber = subscriber
+
+ go as.background()
+
+ return as, nil
+}
+
+// Status returns the AutoNAT observed reachability status.
+func (as *AmbientAutoNAT) Status() network.Reachability {
+ s := as.status.Load()
+ return *s
+}
+
+func (as *AmbientAutoNAT) emitStatus() {
+ status := *as.status.Load()
+ as.emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: status})
+ if as.metricsTracer != nil {
+ as.metricsTracer.ReachabilityStatus(status)
+ }
+}
+
+func ipInList(candidate ma.Multiaddr, list []ma.Multiaddr) bool {
+ candidateIP, _ := manet.ToIP(candidate)
+ for _, i := range list {
+ if ip, err := manet.ToIP(i); err == nil && ip.Equal(candidateIP) {
+ return true
+ }
+ }
+ return false
+}
+
+func (as *AmbientAutoNAT) background() {
+ defer close(as.backgroundRunning)
+ // wait a bit for the node to come online and establish some connections
+ // before starting autodetection
+ delay := as.config.bootDelay
+
+ subChan := as.subscriber.Out()
+ defer as.subscriber.Close()
+ defer as.emitReachabilityChanged.Close()
+
+ // Fallback timer to update address in case EvtLocalAddressesUpdated is not emitted.
+ // TODO: The event not emitting properly is a bug. This is a workaround.
+ addrChangeTicker := time.NewTicker(30 * time.Minute)
+ defer addrChangeTicker.Stop()
+
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+ timerRunning := true
+ forceProbe := false
+ for {
+ select {
+ case conn := <-as.inboundConn:
+ localAddrs := as.host.Addrs()
+ if manet.IsPublicAddr(conn.RemoteMultiaddr()) &&
+ !ipInList(conn.RemoteMultiaddr(), localAddrs) {
+ as.lastInbound = time.Now()
+ }
+ case <-addrChangeTicker.C:
+ // schedule a new probe if addresses have changed
+ case e := <-subChan:
+ switch e := e.(type) {
+ case event.EvtPeerIdentificationCompleted:
+ if proto, err := as.host.Peerstore().SupportsProtocols(e.Peer, AutoNATProto); err == nil && len(proto) > 0 {
+ forceProbe = true
+ }
+ case event.EvtLocalAddressesUpdated:
+ // schedule a new probe if addresses have changed
+ default:
+ log.Error("unknown event type", "event_type", fmt.Sprintf("%T", e))
+ }
+ case obs := <-as.observations:
+ as.recordObservation(obs)
+ continue
+ case err, ok := <-as.dialResponses:
+ if !ok {
+ return
+ }
+ as.pendingProbes--
+ if IsDialRefused(err) {
+ forceProbe = true
+ } else {
+ as.handleDialResponse(err)
+ }
+ case <-timer.C:
+ timerRunning = false
+ forceProbe = false
+ // Update the last probe time. We use it to ensure
+ // that we don't spam the peerstore.
+ as.lastProbe = time.Now()
+ peer := as.getPeerToProbe()
+ as.tryProbe(peer)
+ case <-as.ctx.Done():
+ return
+ }
+ // On address update, reduce confidence from maximum so that we schedule
+ // the next probe sooner
+ hasNewAddr := as.checkAddrs()
+ if hasNewAddr && as.confidence == maxConfidence {
+ as.confidence--
+ }
+
+ if timerRunning && !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(as.scheduleProbe(forceProbe))
+ timerRunning = true
+ }
+}
+
+func (as *AmbientAutoNAT) checkAddrs() (hasNewAddr bool) {
+ currentAddrs := as.addressFunc()
+ hasNewAddr = slices.ContainsFunc(currentAddrs, func(a ma.Multiaddr) bool {
+ _, ok := as.ourAddrs[string(a.Bytes())]
+ return !ok
+ })
+ clear(as.ourAddrs)
+ for _, a := range currentAddrs {
+ if !manet.IsPublicAddr(a) {
+ continue
+ }
+ as.ourAddrs[string(a.Bytes())] = struct{}{}
+ }
+ return hasNewAddr
+}
+
+// scheduleProbe calculates when the next probe should be scheduled for.
+func (as *AmbientAutoNAT) scheduleProbe(forceProbe bool) time.Duration {
+ now := time.Now()
+ currentStatus := *as.status.Load()
+ nextProbeAfter := as.config.refreshInterval
+ receivedInbound := as.lastInbound.After(as.lastProbe)
+ switch {
+ case forceProbe && currentStatus == network.ReachabilityUnknown:
+ // retry very quicky if forceProbe is true *and* we don't know our reachability
+ // limit all peers fetch from peerstore to 1 per second.
+ nextProbeAfter = 2 * time.Second
+ case currentStatus == network.ReachabilityUnknown,
+ as.confidence < maxConfidence,
+ currentStatus != network.ReachabilityPublic && receivedInbound:
+ // Retry quickly in case:
+ // 1. Our reachability is Unknown
+ // 2. We don't have enough confidence in our reachability.
+ // 3. We're private but we received an inbound connection.
+ nextProbeAfter = as.config.retryInterval
+ case currentStatus == network.ReachabilityPublic && receivedInbound:
+ // We are public and we received an inbound connection recently,
+ // wait a little longer
+ nextProbeAfter *= 2
+ nextProbeAfter = min(nextProbeAfter, maxRefreshInterval)
+ }
+ nextProbeTime := as.lastProbe.Add(nextProbeAfter)
+ if nextProbeTime.Before(now) {
+ nextProbeTime = now
+ }
+ if as.metricsTracer != nil {
+ as.metricsTracer.NextProbeTime(nextProbeTime)
+ }
+
+ return nextProbeTime.Sub(now)
+}
+
+// handleDialResponse updates the current status based on dial response.
+func (as *AmbientAutoNAT) handleDialResponse(dialErr error) {
+ var observation network.Reachability
+ switch {
+ case dialErr == nil:
+ observation = network.ReachabilityPublic
+ case IsDialError(dialErr):
+ observation = network.ReachabilityPrivate
+ default:
+ observation = network.ReachabilityUnknown
+ }
+
+ as.recordObservation(observation)
+}
+
+// recordObservation updates NAT status and confidence
+func (as *AmbientAutoNAT) recordObservation(observation network.Reachability) {
+
+ currentStatus := *as.status.Load()
+
+ if observation == network.ReachabilityPublic {
+ changed := false
+ if currentStatus != network.ReachabilityPublic {
+ // Aggressively switch to public from other states ignoring confidence
+ log.Debug("NAT status is public")
+
+ // we are flipping our NATStatus, so confidence drops to 0
+ as.confidence = 0
+ if as.service != nil {
+ as.service.Enable()
+ }
+ changed = true
+ } else if as.confidence < maxConfidence {
+ as.confidence++
+ }
+ as.status.Store(&observation)
+ if changed {
+ as.emitStatus()
+ }
+ } else if observation == network.ReachabilityPrivate {
+ if currentStatus != network.ReachabilityPrivate {
+ if as.confidence > 0 {
+ as.confidence--
+ } else {
+ log.Debug("NAT status is private")
+
+ // we are flipping our NATStatus, so confidence drops to 0
+ as.confidence = 0
+ as.status.Store(&observation)
+ if as.service != nil {
+ as.service.Disable()
+ }
+ as.emitStatus()
+ }
+ } else if as.confidence < maxConfidence {
+ as.confidence++
+ as.status.Store(&observation)
+ }
+ } else if as.confidence > 0 {
+ // don't just flip to unknown, reduce confidence first
+ as.confidence--
+ } else {
+ log.Debug("NAT status is unknown")
+ as.status.Store(&observation)
+ if currentStatus != network.ReachabilityUnknown {
+ if as.service != nil {
+ as.service.Enable()
+ }
+ as.emitStatus()
+ }
+ }
+ if as.metricsTracer != nil {
+ as.metricsTracer.ReachabilityStatusConfidence(as.confidence)
+ }
+}
+
+func (as *AmbientAutoNAT) tryProbe(p peer.ID) {
+ if p == "" || as.pendingProbes > 5 {
+ return
+ }
+ info := as.host.Peerstore().PeerInfo(p)
+ as.recentProbes[p] = time.Now()
+ as.pendingProbes++
+ go as.probe(&info)
+}
+
+func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) {
+ cli := NewAutoNATClient(as.host, as.config.addressFunc, as.metricsTracer)
+ ctx, cancel := context.WithTimeout(as.ctx, as.config.requestTimeout)
+ defer cancel()
+
+ err := cli.DialBack(ctx, pi.ID)
+ log.Debug("Dialback through peer completed", "peer", pi.ID, "err", err)
+
+ select {
+ case as.dialResponses <- err:
+ case <-as.ctx.Done():
+ return
+ }
+}
+
+func (as *AmbientAutoNAT) getPeerToProbe() peer.ID {
+ peers := as.host.Network().Peers()
+ if len(peers) == 0 {
+ return ""
+ }
+
+ // clean old probes
+ fixedNow := time.Now()
+ for k, v := range as.recentProbes {
+ if fixedNow.Sub(v) > as.throttlePeerPeriod {
+ delete(as.recentProbes, k)
+ }
+ }
+
+ // Shuffle peers
+ for n := len(peers); n > 0; n-- {
+ randIndex := rand.Intn(n)
+ peers[n-1], peers[randIndex] = peers[randIndex], peers[n-1]
+ }
+
+ for _, p := range peers {
+ info := as.host.Peerstore().PeerInfo(p)
+ // Exclude peers which don't support the autonat protocol.
+ if proto, err := as.host.Peerstore().SupportsProtocols(p, AutoNATProto); len(proto) == 0 || err != nil {
+ continue
+ }
+
+ if as.config.dialPolicy.skipPeer(info.Addrs) {
+ continue
+ }
+ return p
+ }
+
+ return ""
+}
+
+func (as *AmbientAutoNAT) Close() error {
+ as.ctxCancel()
+ if as.service != nil {
+ return as.service.Close()
+ }
+ <-as.backgroundRunning
+ return nil
+}
+
+// Status returns the AutoNAT observed reachability status.
+func (s *StaticAutoNAT) Status() network.Reachability {
+ return s.reachability
+}
+
+func (s *StaticAutoNAT) Close() error {
+ if s.service != nil {
+ return s.service.Close()
+ }
+ return nil
+}
diff --git a/p2p/host/autonat/autonat_test.go b/p2p/host/autonat/autonat_test.go
new file mode 100644
index 0000000000..6a5768cd5a
--- /dev/null
+++ b/p2p/host/autonat/autonat_test.go
@@ -0,0 +1,330 @@
+package autonat
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/libp2p/go-msgio/pbio"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// these are mock service implementations for testing
+func makeAutoNATServicePrivate(t *testing.T) host.Host {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ h.SetStreamHandler(AutoNATProto, sayPrivateStreamHandler(t))
+ return h
+}
+
+func sayPrivateStreamHandler(t *testing.T) network.StreamHandler {
+ return func(s network.Stream) {
+ defer s.Close()
+ r := pbio.NewDelimitedReader(s, network.MessageSizeMax)
+ if err := r.ReadMsg(&pb.Message{}); err != nil {
+ t.Error(err)
+ return
+ }
+ w := pbio.NewDelimitedWriter(s)
+ res := pb.Message{
+ Type: pb.Message_DIAL_RESPONSE.Enum(),
+ DialResponse: newDialResponseError(pb.Message_E_DIAL_ERROR, "dial failed"),
+ }
+ w.WriteMsg(&res)
+ }
+}
+
+func makeAutoNATRefuseDialRequest(t *testing.T, done chan struct{}) host.Host {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ h.SetStreamHandler(AutoNATProto, sayRefusedStreamHandler(t, done))
+ return h
+}
+
+func sayRefusedStreamHandler(t *testing.T, done chan struct{}) network.StreamHandler {
+ return func(s network.Stream) {
+ defer s.Close()
+ r := pbio.NewDelimitedReader(s, network.MessageSizeMax)
+ if err := r.ReadMsg(&pb.Message{}); err != nil {
+ // ignore error if the test has completed
+ select {
+ case _, ok := <-done:
+ if !ok {
+ return
+ }
+ default:
+ }
+ t.Error(err)
+ return
+ }
+ w := pbio.NewDelimitedWriter(s)
+ res := pb.Message{
+ Type: pb.Message_DIAL_RESPONSE.Enum(),
+ DialResponse: newDialResponseError(pb.Message_E_DIAL_REFUSED, "dial refused"),
+ }
+ w.WriteMsg(&res)
+ }
+}
+
+func makeAutoNATServicePublic(t *testing.T) host.Host {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ h.SetStreamHandler(AutoNATProto, func(s network.Stream) {
+ defer s.Close()
+ r := pbio.NewDelimitedReader(s, network.MessageSizeMax)
+ if err := r.ReadMsg(&pb.Message{}); err != nil {
+ t.Error(err)
+ return
+ }
+ w := pbio.NewDelimitedWriter(s)
+ res := pb.Message{
+ Type: pb.Message_DIAL_RESPONSE.Enum(),
+ DialResponse: newDialResponseOK(s.Conn().RemoteMultiaddr()),
+ }
+ w.WriteMsg(&res)
+ })
+ return h
+}
+
+func makeAutoNAT(t *testing.T, ash host.Host) (host.Host, AutoNAT) {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ h.Peerstore().AddAddrs(ash.ID(), ash.Addrs(), time.Minute)
+ h.Peerstore().AddProtocols(ash.ID(), AutoNATProto)
+ a, _ := New(h, WithSchedule(100*time.Millisecond, time.Second), WithoutStartupDelay())
+ a.(*AmbientAutoNAT).config.dialPolicy.allowSelfDials = true
+ a.(*AmbientAutoNAT).config.throttlePeerPeriod = 100 * time.Millisecond
+ return h, a
+}
+
+func identifyAsServer(server, recip host.Host) {
+ recip.Peerstore().AddAddrs(server.ID(), server.Addrs(), time.Minute)
+ recip.Peerstore().AddProtocols(server.ID(), AutoNATProto)
+
+}
+
+func connect(t *testing.T, a, b host.Host) {
+ pinfo := peer.AddrInfo{ID: a.ID(), Addrs: a.Addrs()}
+ err := b.Connect(context.Background(), pinfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func expectEvent(t *testing.T, s event.Subscription, expected network.Reachability, timeout time.Duration) {
+ t.Helper()
+ select {
+ case e := <-s.Out():
+ ev, ok := e.(event.EvtLocalReachabilityChanged)
+ if !ok || ev.Reachability != expected {
+ t.Fatal("got wrong event type from the bus")
+ }
+
+ case <-time.After(timeout):
+ t.Fatal("failed to get the reachability event from the bus")
+ }
+}
+
+// tests
+func TestAutoNATPrivate(t *testing.T) {
+ hs := makeAutoNATServicePrivate(t)
+ defer hs.Close()
+ hc, an := makeAutoNAT(t, hs)
+ defer hc.Close()
+ defer an.Close()
+
+ // subscribe to AutoNat events
+ s, err := hc.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+ if err != nil {
+ t.Fatalf("failed to subscribe to event EvtLocalReachabilityChanged, err=%s", err)
+ }
+
+ status := an.Status()
+ if status != network.ReachabilityUnknown {
+ t.Fatalf("unexpected NAT status: %d", status)
+ }
+
+ connect(t, hs, hc)
+ expectEvent(t, s, network.ReachabilityPrivate, 3*time.Second)
+}
+
+func TestAutoNATPublic(t *testing.T) {
+ hs := makeAutoNATServicePublic(t)
+ defer hs.Close()
+ hc, an := makeAutoNAT(t, hs)
+ defer hc.Close()
+ defer an.Close()
+
+ // subscribe to AutoNat events
+ s, err := hc.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+ if err != nil {
+ t.Fatalf("failed to subscribe to event EvtLocalReachabilityChanged, err=%s", err)
+ }
+
+ status := an.Status()
+ if status != network.ReachabilityUnknown {
+ t.Fatalf("unexpected NAT status: %d", status)
+ }
+
+ connect(t, hs, hc)
+ expectEvent(t, s, network.ReachabilityPublic, 3*time.Second)
+}
+
+func TestAutoNATPublictoPrivate(t *testing.T) {
+ hs := makeAutoNATServicePublic(t)
+ defer hs.Close()
+ hc, an := makeAutoNAT(t, hs)
+ defer hc.Close()
+ defer an.Close()
+
+ // subscribe to AutoNat events
+ s, err := hc.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+ if err != nil {
+ t.Fatalf("failed to subscribe to event EvtLocalReachabilityChanged, err=%s", err)
+ }
+
+ if status := an.Status(); status != network.ReachabilityUnknown {
+ t.Fatalf("unexpected NAT status: %d", status)
+ }
+
+ connect(t, hs, hc)
+ expectEvent(t, s, network.ReachabilityPublic, 3*time.Second)
+
+ hs.SetStreamHandler(AutoNATProto, sayPrivateStreamHandler(t))
+ hps := makeAutoNATServicePrivate(t)
+ connect(t, hps, hc)
+ identifyAsServer(hps, hc)
+
+ expectEvent(t, s, network.ReachabilityPrivate, 3*time.Second)
+}
+
+func TestAutoNATIncomingEvents(t *testing.T) {
+ hs := makeAutoNATServicePrivate(t)
+ defer hs.Close()
+ hc, ani := makeAutoNAT(t, hs)
+ defer hc.Close()
+ defer ani.Close()
+ an := ani.(*AmbientAutoNAT)
+
+ status := an.Status()
+ if status != network.ReachabilityUnknown {
+ t.Fatalf("unexpected NAT status: %d", status)
+ }
+
+ connect(t, hs, hc)
+
+ em, _ := hc.EventBus().Emitter(&event.EvtPeerIdentificationCompleted{})
+ em.Emit(event.EvtPeerIdentificationCompleted{Peer: hs.ID()})
+
+ require.Eventually(t, func() bool {
+ return an.Status() != network.ReachabilityUnknown
+ }, 5*time.Second, 100*time.Millisecond, "Expected probe due to identification of autonat service")
+}
+
+func TestAutoNATDialRefused(t *testing.T) {
+ hs := makeAutoNATServicePublic(t)
+ defer hs.Close()
+ hc, an := makeAutoNAT(t, hs)
+ defer hc.Close()
+ defer an.Close()
+
+ // subscribe to AutoNat events
+ s, err := hc.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+ if err != nil {
+ t.Fatalf("failed to subscribe to event EvtLocalReachabilityChanged, err=%s", err)
+ }
+
+ if status := an.Status(); status != network.ReachabilityUnknown {
+ t.Fatalf("unexpected NAT status: %d", status)
+ }
+
+ connect(t, hs, hc)
+ expectEvent(t, s, network.ReachabilityPublic, 10*time.Second)
+
+ done := make(chan struct{})
+ hs.SetStreamHandler(AutoNATProto, sayRefusedStreamHandler(t, done))
+ hps := makeAutoNATRefuseDialRequest(t, done)
+ connect(t, hps, hc)
+ identifyAsServer(hps, hc)
+
+ require.Never(t, func() bool {
+ return an.Status() != network.ReachabilityPublic
+ }, 3*time.Second, 1*time.Second, "Expected probe to not change reachability from public")
+ close(done)
+}
+
+func recordObservation(an *AmbientAutoNAT, status network.Reachability) {
+ an.observations <- status
+}
+
+func TestAutoNATObservationRecording(t *testing.T) {
+ hs := makeAutoNATServicePublic(t)
+ defer hs.Close()
+ hc, ani := makeAutoNAT(t, hs)
+ defer hc.Close()
+ defer ani.Close()
+ an := ani.(*AmbientAutoNAT)
+
+ s, err := hc.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+ if err != nil {
+ t.Fatalf("failed to subscribe to event EvtLocalRoutabilityPublic, err=%s", err)
+ }
+
+ expectStatus := func(expected network.Reachability, msg string, args ...any) {
+ require.EventuallyWithTf(t, func(collect *assert.CollectT) {
+ assert.Equal(collect, expected, an.Status())
+ }, 2*time.Second, 100*time.Millisecond, msg, args...)
+ }
+
+ recordObservation(an, network.ReachabilityPublic)
+ expectStatus(network.ReachabilityPublic, "failed to transition to public.")
+ expectEvent(t, s, network.ReachabilityPublic, 3*time.Second)
+
+ // a single recording should have confidence still at 0, and transition to private quickly.
+ recordObservation(an, network.ReachabilityPrivate)
+ expectStatus(network.ReachabilityPrivate, "failed to transition to private.")
+
+ expectEvent(t, s, network.ReachabilityPrivate, 3*time.Second)
+
+ // stronger public confidence should be harder to undo.
+ recordObservation(an, network.ReachabilityPublic)
+ recordObservation(an, network.ReachabilityPublic)
+ expectStatus(network.ReachabilityPublic, "failed to transition to public.")
+ expectEvent(t, s, network.ReachabilityPublic, 3*time.Second)
+
+ recordObservation(an, network.ReachabilityPrivate)
+ expectStatus(network.ReachabilityPublic, "too-extreme private transition.")
+
+ // Don't emit events if reachability hasn't changed
+ recordObservation(an, network.ReachabilityPublic)
+ expectStatus(network.ReachabilityPublic, "reachability should stay public")
+ select {
+ case <-s.Out():
+ t.Fatal("received event without state transition")
+ case <-time.After(300 * time.Millisecond):
+ }
+}
+
+func TestStaticNat(t *testing.T) {
+ _, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h.Close()
+ s, _ := h.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{})
+
+ nat, err := New(h, WithReachability(network.ReachabilityPrivate))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nat.Status() != network.ReachabilityPrivate {
+ t.Fatalf("should be private")
+ }
+ expectEvent(t, s, network.ReachabilityPrivate, 3*time.Second)
+}
diff --git a/p2p/host/autonat/client.go b/p2p/host/autonat/client.go
new file mode 100644
index 0000000000..3118cd5812
--- /dev/null
+++ b/p2p/host/autonat/client.go
@@ -0,0 +1,129 @@
+package autonat
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+
+ "github.com/libp2p/go-msgio/pbio"
+)
+
+// NewAutoNATClient creates a fresh instance of an AutoNATClient
+// If addrFunc is nil, h.Addrs will be used
+func NewAutoNATClient(h host.Host, addrFunc AddrFunc, mt MetricsTracer) Client {
+ if addrFunc == nil {
+ addrFunc = h.Addrs
+ }
+ return &client{h: h, addrFunc: addrFunc, mt: mt}
+}
+
+type client struct {
+ h host.Host
+ addrFunc AddrFunc
+ mt MetricsTracer
+}
+
+// DialBack asks peer p to dial us back on all addresses returned by the addrFunc.
+// It blocks until we've received a response from the peer.
+//
+// Note: A returned error Message_E_DIAL_ERROR does not imply that the server
+// actually performed a dial attempt. Servers that run a version < v0.20.0 also
+// return Message_E_DIAL_ERROR if the dial was skipped due to the dialPolicy.
+func (c *client) DialBack(ctx context.Context, p peer.ID) error {
+ s, err := c.h.NewStream(ctx, p, AutoNATProto)
+ if err != nil {
+ return err
+ }
+
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to autonat service", "err", err)
+ s.Reset()
+ return err
+ }
+
+ if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for autonat stream", "err", err)
+ s.Reset()
+ return err
+ }
+ defer s.Scope().ReleaseMemory(maxMsgSize)
+
+ deadline := time.Now().Add(streamTimeout)
+ if ctxDeadline, ok := ctx.Deadline(); ok {
+ if ctxDeadline.Before(deadline) {
+ deadline = ctxDeadline
+ }
+ }
+
+ s.SetDeadline(deadline)
+ // Might as well just reset the stream. Once we get to this point, we
+ // don't care about being nice.
+ defer s.Close()
+
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ w := pbio.NewDelimitedWriter(s)
+
+ req := newDialMessage(peer.AddrInfo{ID: c.h.ID(), Addrs: c.addrFunc()})
+ if err := w.WriteMsg(req); err != nil {
+ s.Reset()
+ return err
+ }
+
+ var res pb.Message
+ if err := r.ReadMsg(&res); err != nil {
+ s.Reset()
+ return err
+ }
+ if res.GetType() != pb.Message_DIAL_RESPONSE {
+ s.Reset()
+ return fmt.Errorf("unexpected response: %s", res.GetType().String())
+ }
+
+ status := res.GetDialResponse().GetStatus()
+ if c.mt != nil {
+ c.mt.ReceivedDialResponse(status)
+ }
+ switch status {
+ case pb.Message_OK:
+ return nil
+ default:
+ return Error{Status: status, Text: res.GetDialResponse().GetStatusText()}
+ }
+}
+
+// Error wraps errors signalled by AutoNAT services
+type Error struct {
+ Status pb.Message_ResponseStatus
+ Text string
+}
+
+func (e Error) Error() string {
+ return fmt.Sprintf("AutoNAT error: %s (%s)", e.Text, e.Status.String())
+}
+
+// IsDialError returns true if the error was due to a dial back failure
+func (e Error) IsDialError() bool {
+ return e.Status == pb.Message_E_DIAL_ERROR
+}
+
+// IsDialRefused returns true if the error was due to a refusal to dial back
+func (e Error) IsDialRefused() bool {
+ return e.Status == pb.Message_E_DIAL_REFUSED
+}
+
+// IsDialError returns true if the AutoNAT peer signalled an error dialing back
+func IsDialError(e error) bool {
+ ae, ok := e.(Error)
+ return ok && ae.IsDialError()
+}
+
+// IsDialRefused returns true if the AutoNAT peer signalled refusal to dial back
+func IsDialRefused(e error) bool {
+ ae, ok := e.(Error)
+ return ok && ae.IsDialRefused()
+}
diff --git a/p2p/host/autonat/dialpolicy.go b/p2p/host/autonat/dialpolicy.go
new file mode 100644
index 0000000000..9615229559
--- /dev/null
+++ b/p2p/host/autonat/dialpolicy.go
@@ -0,0 +1,95 @@
+package autonat
+
+import (
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/host"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type dialPolicy struct {
+ allowSelfDials bool
+ host host.Host
+}
+
+// skipDial indicates that a multiaddress isn't worth attempted dialing.
+// The same logic is used when the autonat client is considering if
+// a remote peer is worth using as a server, and when the server is
+// considering if a requested client is worth dialing back.
+func (d *dialPolicy) skipDial(addr ma.Multiaddr) bool {
+ // skip relay addresses
+ _, err := addr.ValueForProtocol(ma.P_CIRCUIT)
+ if err == nil {
+ return true
+ }
+
+ if d.allowSelfDials {
+ return false
+ }
+
+ // skip private network (unroutable) addresses
+ if !manet.IsPublicAddr(addr) {
+ return true
+ }
+ candidateIP, err := manet.ToIP(addr)
+ if err != nil {
+ return true
+ }
+
+ // Skip dialing addresses we believe are the local node's
+ for _, localAddr := range d.host.Addrs() {
+ localIP, err := manet.ToIP(localAddr)
+ if err != nil {
+ continue
+ }
+ if localIP.Equal(candidateIP) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// skipPeer indicates that the collection of multiaddresses representing a peer
+// isn't worth attempted dialing. If one of the addresses matches an address
+// we believe is ours, we exclude the peer, even if there are other valid
+// public addresses in the list.
+func (d *dialPolicy) skipPeer(addrs []ma.Multiaddr) bool {
+ localAddrs := d.host.Addrs()
+ localHosts := make([]net.IP, 0)
+ for _, lAddr := range localAddrs {
+ if _, err := lAddr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(lAddr) {
+ lIP, err := manet.ToIP(lAddr)
+ if err != nil {
+ continue
+ }
+ localHosts = append(localHosts, lIP)
+ }
+ }
+
+ // if a public IP of the peer is one of ours: skip the peer.
+ goodPublic := false
+ for _, addr := range addrs {
+ if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil && manet.IsPublicAddr(addr) {
+ aIP, err := manet.ToIP(addr)
+ if err != nil {
+ continue
+ }
+
+ for _, lIP := range localHosts {
+ if lIP.Equal(aIP) {
+ return true
+ }
+ }
+ goodPublic = true
+ }
+ }
+
+ if d.allowSelfDials {
+ return false
+ }
+
+ return !goodPublic
+}
diff --git a/p2p/host/autonat/dialpolicy_test.go b/p2p/host/autonat/dialpolicy_test.go
new file mode 100644
index 0000000000..6166083218
--- /dev/null
+++ b/p2p/host/autonat/dialpolicy_test.go
@@ -0,0 +1,143 @@
+package autonat
+
+import (
+ "context"
+ "errors"
+ "net"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+ blankhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+func makeMA(a string) multiaddr.Multiaddr {
+ addr, err := multiaddr.NewMultiaddr(a)
+ if err != nil {
+ panic(err)
+ }
+ return addr
+}
+
+type mockT struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ addr multiaddr.Multiaddr
+}
+
+func (m *mockT) Dial(_ context.Context, _ multiaddr.Multiaddr, _ peer.ID) (transport.CapableConn, error) {
+ return nil, nil
+}
+func (m *mockT) CanDial(_ multiaddr.Multiaddr) bool { return true }
+func (m *mockT) Listen(_ multiaddr.Multiaddr) (transport.Listener, error) {
+ return &mockL{m.ctx, m.cancel, m.addr}, nil
+}
+func (m *mockT) Protocols() []int { return []int{multiaddr.P_IP4} }
+func (m *mockT) Proxy() bool { return false }
+func (m *mockT) String() string { return "mock-tcp-ipv4" }
+
+type mockL struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ addr multiaddr.Multiaddr
+}
+
+func (l *mockL) Accept() (transport.CapableConn, error) {
+ <-l.ctx.Done()
+ return nil, errors.New("expected in mocked test")
+}
+func (l *mockL) Close() error { l.cancel(); return nil }
+func (l *mockL) Addr() net.Addr { return nil }
+func (l *mockL) Multiaddr() multiaddr.Multiaddr { return l.addr }
+
+func TestSkipDial(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ s := swarmt.GenSwarm(t)
+ defer s.Close()
+ d := dialPolicy{host: blankhost.NewBlankHost(s)}
+ if d.skipDial(makeMA("/ip4/8.8.8.8")) != false {
+ t.Fatal("failed dialing a valid public addr")
+ }
+
+ if d.skipDial(makeMA("/ip6/2607:f8b0:400a::1")) != false {
+ t.Fatal("failed dialing a valid public addr")
+ }
+
+ if d.skipDial(makeMA("/ip4/192.168.0.1")) != true {
+ t.Fatal("didn't skip dialing an internal addr")
+ }
+
+ s.AddTransport(&mockT{ctx, cancel, makeMA("/ip4/8.8.8.8")})
+ err := s.AddListenAddr(makeMA("/ip4/8.8.8.8"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if d.skipDial(makeMA("/ip4/8.8.8.8")) != true {
+ t.Fatal("failed dialing a valid host address")
+ }
+}
+
+func TestSkipPeer(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ s := swarmt.GenSwarm(t)
+ defer s.Close()
+
+ d := dialPolicy{host: blankhost.NewBlankHost(s)}
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/8.8.8.8")}) != false {
+ t.Fatal("failed dialing a valid public addr")
+ }
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/8.8.8.8"), makeMA("/ip4/192.168.0.1")}) != false {
+ t.Fatal("failed dialing a valid public addr")
+ }
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/192.168.0.1")}) != true {
+ t.Fatal("succeeded with no public addr")
+ }
+
+ s.AddTransport(&mockT{ctx, cancel, makeMA("/ip4/8.8.8.8")})
+ err := s.AddListenAddr(makeMA("/ip4/8.8.8.8"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/8.8.8.8"), makeMA("/ip4/192.168.0.1")}) != true {
+ t.Fatal("succeeded dialing host address")
+ }
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/8.8.8.8"), makeMA("/ip4/9.9.9.9")}) != true {
+ t.Fatal("succeeded dialing host address when other public")
+ }
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/9.9.9.9")}) != false {
+ t.Fatal("succeeded dialing host address when other public")
+ }
+}
+
+func TestSkipLocalPeer(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ s := swarmt.GenSwarm(t)
+ defer s.Close()
+
+ d := dialPolicy{host: blankhost.NewBlankHost(s)}
+ s.AddTransport(&mockT{ctx, cancel, makeMA("/ip4/192.168.0.1")})
+ err := s.AddListenAddr(makeMA("/ip4/192.168.0.1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/8.8.8.8")}) != false {
+ t.Fatal("failed dialing a valid public addr")
+ }
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/8.8.8.8"), makeMA("/ip4/192.168.0.1")}) != false {
+ t.Fatal("failed dialing a valid public addr")
+ }
+ if d.skipPeer([]multiaddr.Multiaddr{makeMA("/ip4/192.168.0.1")}) != true {
+ t.Fatal("succeeded with no public addr")
+ }
+}
diff --git a/p2p/host/autonat/interface.go b/p2p/host/autonat/interface.go
new file mode 100644
index 0000000000..9bf3bfe524
--- /dev/null
+++ b/p2p/host/autonat/interface.go
@@ -0,0 +1,31 @@
+package autonat
+
+import (
+ "context"
+ "io"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// AutoNAT is the interface for NAT autodiscovery
+type AutoNAT interface {
+ // Status returns the current NAT status
+ Status() network.Reachability
+ io.Closer
+}
+
+// Client is a stateless client interface to AutoNAT peers
+type Client interface {
+ // DialBack requests from a peer providing AutoNAT services to test dial back
+ // and report the address on a successful connection.
+ DialBack(ctx context.Context, p peer.ID) error
+}
+
+// AddrFunc is a function returning the candidate addresses for the local host.
+type AddrFunc func() []ma.Multiaddr
+
+// Option is an Autonat option for configuration
+type Option func(*config) error
diff --git a/p2p/host/autonat/metrics.go b/p2p/host/autonat/metrics.go
new file mode 100644
index 0000000000..4207d4e7d5
--- /dev/null
+++ b/p2p/host/autonat/metrics.go
@@ -0,0 +1,162 @@
+package autonat
+
+import (
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_autonat"
+
+var (
+ reachabilityStatus = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "reachability_status",
+ Help: "Current node reachability",
+ },
+ )
+ reachabilityStatusConfidence = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "reachability_status_confidence",
+ Help: "Node reachability status confidence",
+ },
+ )
+ receivedDialResponseTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "received_dial_response_total",
+ Help: "Count of dial responses for client",
+ },
+ []string{"response_status"},
+ )
+ outgoingDialResponseTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "outgoing_dial_response_total",
+ Help: "Count of dial responses for server",
+ },
+ []string{"response_status"},
+ )
+ outgoingDialRefusedTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "outgoing_dial_refused_total",
+ Help: "Count of dial requests refused by server",
+ },
+ []string{"refusal_reason"},
+ )
+ nextProbeTimestamp = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "next_probe_timestamp",
+ Help: "Time of next probe",
+ },
+ )
+ collectors = []prometheus.Collector{
+ reachabilityStatus,
+ reachabilityStatusConfidence,
+ receivedDialResponseTotal,
+ outgoingDialResponseTotal,
+ outgoingDialRefusedTotal,
+ nextProbeTimestamp,
+ }
+)
+
+type MetricsTracer interface {
+ ReachabilityStatus(status network.Reachability)
+ ReachabilityStatusConfidence(confidence int)
+ ReceivedDialResponse(status pb.Message_ResponseStatus)
+ OutgoingDialResponse(status pb.Message_ResponseStatus)
+ OutgoingDialRefused(reason string)
+ NextProbeTime(t time.Time)
+}
+
+func getResponseStatus(status pb.Message_ResponseStatus) string {
+ var s string
+ switch status {
+ case pb.Message_OK:
+ s = "ok"
+ case pb.Message_E_DIAL_ERROR:
+ s = "dial error"
+ case pb.Message_E_DIAL_REFUSED:
+ s = "dial refused"
+ case pb.Message_E_BAD_REQUEST:
+ s = "bad request"
+ case pb.Message_E_INTERNAL_ERROR:
+ s = "internal error"
+ default:
+ s = "unknown"
+ }
+ return s
+}
+
+const (
+ rate_limited = "rate limited"
+ dial_blocked = "dial blocked"
+ no_valid_address = "no valid address"
+)
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (mt *metricsTracer) ReachabilityStatus(status network.Reachability) {
+ reachabilityStatus.Set(float64(status))
+}
+
+func (mt *metricsTracer) ReachabilityStatusConfidence(confidence int) {
+ reachabilityStatusConfidence.Set(float64(confidence))
+}
+
+func (mt *metricsTracer) ReceivedDialResponse(status pb.Message_ResponseStatus) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, getResponseStatus(status))
+ receivedDialResponseTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) OutgoingDialResponse(status pb.Message_ResponseStatus) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, getResponseStatus(status))
+ outgoingDialResponseTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) OutgoingDialRefused(reason string) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, reason)
+ outgoingDialRefusedTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) NextProbeTime(t time.Time) {
+ nextProbeTimestamp.Set(float64(t.Unix()))
+}
diff --git a/p2p/host/autonat/metrics_test.go b/p2p/host/autonat/metrics_test.go
new file mode 100644
index 0000000000..1e5c8759e7
--- /dev/null
+++ b/p2p/host/autonat/metrics_test.go
@@ -0,0 +1,82 @@
+//go:build nocover
+
+package autonat
+
+import (
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+)
+
+func BenchmarkReachabilityStatus(b *testing.B) {
+ b.ReportAllocs()
+ mt := NewMetricsTracer()
+ for i := 0; i < b.N; i++ {
+ mt.ReachabilityStatus(network.Reachability(i % 3))
+ }
+}
+
+func BenchmarkClientDialResponse(b *testing.B) {
+ b.ReportAllocs()
+ mt := NewMetricsTracer()
+ statuses := []pb.Message_ResponseStatus{
+ pb.Message_OK, pb.Message_E_DIAL_ERROR, pb.Message_E_DIAL_REFUSED, pb.Message_E_BAD_REQUEST}
+ for i := 0; i < b.N; i++ {
+ mt.ReceivedDialResponse(statuses[i%len(statuses)])
+ }
+}
+
+func BenchmarkServerDialResponse(b *testing.B) {
+ b.ReportAllocs()
+ mt := NewMetricsTracer()
+ statuses := []pb.Message_ResponseStatus{
+ pb.Message_OK, pb.Message_E_DIAL_ERROR, pb.Message_E_DIAL_REFUSED, pb.Message_E_BAD_REQUEST}
+ for i := 0; i < b.N; i++ {
+ mt.OutgoingDialResponse(statuses[i%len(statuses)])
+ }
+}
+
+func BenchmarkServerDialRefused(b *testing.B) {
+ b.ReportAllocs()
+ mt := NewMetricsTracer()
+ for i := 0; i < b.N; i++ {
+ mt.OutgoingDialRefused(rate_limited)
+ }
+}
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ mt := NewMetricsTracer()
+ statuses := []network.Reachability{
+ network.ReachabilityPublic,
+ network.ReachabilityPrivate,
+ network.ReachabilityUnknown,
+ }
+ respStatuses := []pb.Message_ResponseStatus{
+ pb.Message_OK,
+ pb.Message_E_BAD_REQUEST,
+ pb.Message_E_DIAL_REFUSED,
+ pb.Message_E_INTERNAL_ERROR,
+ }
+ reasons := []string{
+ rate_limited,
+ "bad request",
+ "no valid address",
+ }
+ tests := map[string]func(){
+ "ReachabilityStatus": func() { mt.ReachabilityStatus(statuses[rand.Intn(len(statuses))]) },
+ "ReachabilityStatusConfidence": func() { mt.ReachabilityStatusConfidence(rand.Intn(4)) },
+ "ReceivedDialResponse": func() { mt.ReceivedDialResponse(respStatuses[rand.Intn(len(respStatuses))]) },
+ "OutgoingDialResponse": func() { mt.OutgoingDialResponse(respStatuses[rand.Intn(len(respStatuses))]) },
+ "OutgoingDialRefused": func() { mt.OutgoingDialRefused(reasons[rand.Intn(len(reasons))]) },
+ "NextProbeTime": func() { mt.NextProbeTime(time.Now()) },
+ }
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+ if allocs > 0 {
+ t.Fatalf("%s alloc test failed expected 0 received %0.2f", method, allocs)
+ }
+ }
+}
diff --git a/p2p/host/autonat/notify.go b/p2p/host/autonat/notify.go
new file mode 100644
index 0000000000..6ae4326211
--- /dev/null
+++ b/p2p/host/autonat/notify.go
@@ -0,0 +1,30 @@
+package autonat
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var _ network.Notifiee = (*AmbientAutoNAT)(nil)
+
+// Listen is part of the network.Notifiee interface
+func (as *AmbientAutoNAT) Listen(_ network.Network, _ ma.Multiaddr) {}
+
+// ListenClose is part of the network.Notifiee interface
+func (as *AmbientAutoNAT) ListenClose(_ network.Network, _ ma.Multiaddr) {}
+
+// Connected is part of the network.Notifiee interface
+func (as *AmbientAutoNAT) Connected(_ network.Network, c network.Conn) {
+ if c.Stat().Direction == network.DirInbound &&
+ manet.IsPublicAddr(c.RemoteMultiaddr()) {
+ select {
+ case as.inboundConn <- c:
+ default:
+ }
+ }
+}
+
+// Disconnected is part of the network.Notifiee interface
+func (as *AmbientAutoNAT) Disconnected(_ network.Network, _ network.Conn) {}
diff --git a/p2p/host/autonat/options.go b/p2p/host/autonat/options.go
new file mode 100644
index 0000000000..b378da348d
--- /dev/null
+++ b/p2p/host/autonat/options.go
@@ -0,0 +1,155 @@
+package autonat
+
+import (
+ "errors"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+// config holds configurable options for the autonat subsystem.
+type config struct {
+ host host.Host
+
+ addressFunc AddrFunc
+ dialPolicy dialPolicy
+ dialer network.Network
+ forceReachability bool
+ reachability network.Reachability
+ metricsTracer MetricsTracer
+
+ // client
+ bootDelay time.Duration
+ retryInterval time.Duration
+ refreshInterval time.Duration
+ requestTimeout time.Duration
+ throttlePeerPeriod time.Duration
+
+ // server
+ dialTimeout time.Duration
+ maxPeerAddresses int
+ throttleGlobalMax int
+ throttlePeerMax int
+ throttleResetPeriod time.Duration
+ throttleResetJitter time.Duration
+}
+
+var defaults = func(c *config) error {
+ c.bootDelay = 15 * time.Second
+ c.retryInterval = 90 * time.Second
+ c.refreshInterval = 15 * time.Minute
+ c.requestTimeout = 30 * time.Second
+ c.throttlePeerPeriod = 90 * time.Second
+
+ c.dialTimeout = 15 * time.Second
+ c.maxPeerAddresses = 16
+ c.throttleGlobalMax = 30
+ c.throttlePeerMax = 3
+ c.throttleResetPeriod = 1 * time.Minute
+ c.throttleResetJitter = 15 * time.Second
+ return nil
+}
+
+const maxRefreshInterval = 24 * time.Hour
+
+// EnableService specifies that AutoNAT should be allowed to run a NAT service to help
+// other peers determine their own NAT status. The provided Network should not be the
+// default network/dialer of the host passed to `New`, as the NAT system will need to
+// make parallel connections, and as such will modify both the associated peerstore
+// and terminate connections of this dialer. The dialer provided
+// should be compatible (TCP/UDP) however with the transports of the libp2p network.
+func EnableService(dialer network.Network) Option {
+ return func(c *config) error {
+ if dialer == c.host.Network() || dialer.Peerstore() == c.host.Peerstore() {
+ return errors.New("dialer should not be that of the host")
+ }
+ c.dialer = dialer
+ return nil
+ }
+}
+
+// WithReachability overrides autonat to simply report an over-ridden reachability
+// status.
+func WithReachability(reachability network.Reachability) Option {
+ return func(c *config) error {
+ c.forceReachability = true
+ c.reachability = reachability
+ return nil
+ }
+}
+
+// UsingAddresses allows overriding which Addresses the AutoNAT client believes
+// are "its own". Useful for testing, or for more exotic port-forwarding
+// scenarios where the host may be listening on different ports than it wants
+// to externally advertise or verify connectability on.
+func UsingAddresses(addrFunc AddrFunc) Option {
+ return func(c *config) error {
+ if addrFunc == nil {
+ return errors.New("invalid address function supplied")
+ }
+ c.addressFunc = addrFunc
+ return nil
+ }
+}
+
+// WithSchedule configures how aggressively probes will be made to verify the
+// address of the host. retryInterval indicates how often probes should be made
+// when the host lacks confidence about its address, while refreshInterval
+// is the schedule of periodic probes when the host believes it knows its
+// steady-state reachability.
+func WithSchedule(retryInterval, refreshInterval time.Duration) Option {
+ return func(c *config) error {
+ c.retryInterval = retryInterval
+ c.refreshInterval = refreshInterval
+ return nil
+ }
+}
+
+// WithoutStartupDelay removes the initial delay the NAT subsystem typically
+// uses as a buffer for ensuring that connectivity and guesses as to the hosts
+// local interfaces have settled down during startup.
+func WithoutStartupDelay() Option {
+ return func(c *config) error {
+ c.bootDelay = 1
+ return nil
+ }
+}
+
+// WithoutThrottling indicates that this autonat service should not place
+// restrictions on how many peers it is willing to help when acting as
+// a server.
+func WithoutThrottling() Option {
+ return func(c *config) error {
+ c.throttleGlobalMax = 0
+ return nil
+ }
+}
+
+// WithThrottling specifies how many peers (`amount`) it is willing to help
+// ever `interval` amount of time when acting as a server.
+func WithThrottling(amount int, interval time.Duration) Option {
+ return func(c *config) error {
+ c.throttleGlobalMax = amount
+ c.throttleResetPeriod = interval
+ c.throttleResetJitter = interval / 4
+ return nil
+ }
+}
+
+// WithPeerThrottling specifies a limit for the maximum number of IP checks
+// this node will provide to an individual peer in each `interval`.
+func WithPeerThrottling(amount int) Option {
+ return func(c *config) error {
+ c.throttlePeerMax = amount
+ return nil
+ }
+}
+
+// WithMetricsTracer uses mt to track autonat metrics
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(c *config) error {
+ c.metricsTracer = mt
+ return nil
+ }
+}
diff --git a/p2p/host/autonat/pb/autonat.pb.go b/p2p/host/autonat/pb/autonat.pb.go
new file mode 100644
index 0000000000..c03f446fd5
--- /dev/null
+++ b/p2p/host/autonat/pb/autonat.pb.go
@@ -0,0 +1,450 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/host/autonat/pb/autonat.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Message_MessageType int32
+
+const (
+ Message_DIAL Message_MessageType = 0
+ Message_DIAL_RESPONSE Message_MessageType = 1
+)
+
+// Enum value maps for Message_MessageType.
+var (
+ Message_MessageType_name = map[int32]string{
+ 0: "DIAL",
+ 1: "DIAL_RESPONSE",
+ }
+ Message_MessageType_value = map[string]int32{
+ "DIAL": 0,
+ "DIAL_RESPONSE": 1,
+ }
+)
+
+func (x Message_MessageType) Enum() *Message_MessageType {
+ p := new(Message_MessageType)
+ *p = x
+ return p
+}
+
+func (x Message_MessageType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_host_autonat_pb_autonat_proto_enumTypes[0].Descriptor()
+}
+
+func (Message_MessageType) Type() protoreflect.EnumType {
+ return &file_p2p_host_autonat_pb_autonat_proto_enumTypes[0]
+}
+
+func (x Message_MessageType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *Message_MessageType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = Message_MessageType(num)
+ return nil
+}
+
+// Deprecated: Use Message_MessageType.Descriptor instead.
+func (Message_MessageType) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type Message_ResponseStatus int32
+
+const (
+ Message_OK Message_ResponseStatus = 0
+ Message_E_DIAL_ERROR Message_ResponseStatus = 100
+ Message_E_DIAL_REFUSED Message_ResponseStatus = 101
+ Message_E_BAD_REQUEST Message_ResponseStatus = 200
+ Message_E_INTERNAL_ERROR Message_ResponseStatus = 300
+)
+
+// Enum value maps for Message_ResponseStatus.
+var (
+ Message_ResponseStatus_name = map[int32]string{
+ 0: "OK",
+ 100: "E_DIAL_ERROR",
+ 101: "E_DIAL_REFUSED",
+ 200: "E_BAD_REQUEST",
+ 300: "E_INTERNAL_ERROR",
+ }
+ Message_ResponseStatus_value = map[string]int32{
+ "OK": 0,
+ "E_DIAL_ERROR": 100,
+ "E_DIAL_REFUSED": 101,
+ "E_BAD_REQUEST": 200,
+ "E_INTERNAL_ERROR": 300,
+ }
+)
+
+func (x Message_ResponseStatus) Enum() *Message_ResponseStatus {
+ p := new(Message_ResponseStatus)
+ *p = x
+ return p
+}
+
+func (x Message_ResponseStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Message_ResponseStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_host_autonat_pb_autonat_proto_enumTypes[1].Descriptor()
+}
+
+func (Message_ResponseStatus) Type() protoreflect.EnumType {
+ return &file_p2p_host_autonat_pb_autonat_proto_enumTypes[1]
+}
+
+func (x Message_ResponseStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *Message_ResponseStatus) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = Message_ResponseStatus(num)
+ return nil
+}
+
+// Deprecated: Use Message_ResponseStatus.Descriptor instead.
+func (Message_ResponseStatus) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type Message struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type *Message_MessageType `protobuf:"varint,1,opt,name=type,enum=autonat.pb.Message_MessageType" json:"type,omitempty"`
+ Dial *Message_Dial `protobuf:"bytes,2,opt,name=dial" json:"dial,omitempty"`
+ DialResponse *Message_DialResponse `protobuf:"bytes,3,opt,name=dialResponse" json:"dialResponse,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message) Reset() {
+ *x = Message{}
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Message) GetType() Message_MessageType {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return Message_DIAL
+}
+
+func (x *Message) GetDial() *Message_Dial {
+ if x != nil {
+ return x.Dial
+ }
+ return nil
+}
+
+func (x *Message) GetDialResponse() *Message_DialResponse {
+ if x != nil {
+ return x.DialResponse
+ }
+ return nil
+}
+
+type Message_PeerInfo struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message_PeerInfo) Reset() {
+ *x = Message_PeerInfo{}
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message_PeerInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message_PeerInfo) ProtoMessage() {}
+
+func (x *Message_PeerInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message_PeerInfo.ProtoReflect.Descriptor instead.
+func (*Message_PeerInfo) Descriptor() ([]byte, []int) {
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Message_PeerInfo) GetId() []byte {
+ if x != nil {
+ return x.Id
+ }
+ return nil
+}
+
+func (x *Message_PeerInfo) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
+ }
+ return nil
+}
+
+type Message_Dial struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Peer *Message_PeerInfo `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message_Dial) Reset() {
+ *x = Message_Dial{}
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message_Dial) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message_Dial) ProtoMessage() {}
+
+func (x *Message_Dial) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message_Dial.ProtoReflect.Descriptor instead.
+func (*Message_Dial) Descriptor() ([]byte, []int) {
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Message_Dial) GetPeer() *Message_PeerInfo {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+type Message_DialResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Status *Message_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=autonat.pb.Message_ResponseStatus" json:"status,omitempty"`
+ StatusText *string `protobuf:"bytes,2,opt,name=statusText" json:"statusText,omitempty"`
+ Addr []byte `protobuf:"bytes,3,opt,name=addr" json:"addr,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message_DialResponse) Reset() {
+ *x = Message_DialResponse{}
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message_DialResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message_DialResponse) ProtoMessage() {}
+
+func (x *Message_DialResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_autonat_pb_autonat_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message_DialResponse.ProtoReflect.Descriptor instead.
+func (*Message_DialResponse) Descriptor() ([]byte, []int) {
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Message_DialResponse) GetStatus() Message_ResponseStatus {
+ if x != nil && x.Status != nil {
+ return *x.Status
+ }
+ return Message_OK
+}
+
+func (x *Message_DialResponse) GetStatusText() string {
+ if x != nil && x.StatusText != nil {
+ return *x.StatusText
+ }
+ return ""
+}
+
+func (x *Message_DialResponse) GetAddr() []byte {
+ if x != nil {
+ return x.Addr
+ }
+ return nil
+}
+
+var File_p2p_host_autonat_pb_autonat_proto protoreflect.FileDescriptor
+
+const file_p2p_host_autonat_pb_autonat_proto_rawDesc = "" +
+ "\n" +
+ "!p2p/host/autonat/pb/autonat.proto\x12\n" +
+ "autonat.pb\"\xb5\x04\n" +
+ "\aMessage\x123\n" +
+ "\x04type\x18\x01 \x01(\x0e2\x1f.autonat.pb.Message.MessageTypeR\x04type\x12,\n" +
+ "\x04dial\x18\x02 \x01(\v2\x18.autonat.pb.Message.DialR\x04dial\x12D\n" +
+ "\fdialResponse\x18\x03 \x01(\v2 .autonat.pb.Message.DialResponseR\fdialResponse\x1a0\n" +
+ "\bPeerInfo\x12\x0e\n" +
+ "\x02id\x18\x01 \x01(\fR\x02id\x12\x14\n" +
+ "\x05addrs\x18\x02 \x03(\fR\x05addrs\x1a8\n" +
+ "\x04Dial\x120\n" +
+ "\x04peer\x18\x01 \x01(\v2\x1c.autonat.pb.Message.PeerInfoR\x04peer\x1a~\n" +
+ "\fDialResponse\x12:\n" +
+ "\x06status\x18\x01 \x01(\x0e2\".autonat.pb.Message.ResponseStatusR\x06status\x12\x1e\n" +
+ "\n" +
+ "statusText\x18\x02 \x01(\tR\n" +
+ "statusText\x12\x12\n" +
+ "\x04addr\x18\x03 \x01(\fR\x04addr\"*\n" +
+ "\vMessageType\x12\b\n" +
+ "\x04DIAL\x10\x00\x12\x11\n" +
+ "\rDIAL_RESPONSE\x10\x01\"i\n" +
+ "\x0eResponseStatus\x12\x06\n" +
+ "\x02OK\x10\x00\x12\x10\n" +
+ "\fE_DIAL_ERROR\x10d\x12\x12\n" +
+ "\x0eE_DIAL_REFUSED\x10e\x12\x12\n" +
+ "\rE_BAD_REQUEST\x10\xc8\x01\x12\x15\n" +
+ "\x10E_INTERNAL_ERROR\x10\xac\x02B1Z/github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+
+var (
+ file_p2p_host_autonat_pb_autonat_proto_rawDescOnce sync.Once
+ file_p2p_host_autonat_pb_autonat_proto_rawDescData []byte
+)
+
+func file_p2p_host_autonat_pb_autonat_proto_rawDescGZIP() []byte {
+ file_p2p_host_autonat_pb_autonat_proto_rawDescOnce.Do(func() {
+ file_p2p_host_autonat_pb_autonat_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_host_autonat_pb_autonat_proto_rawDesc), len(file_p2p_host_autonat_pb_autonat_proto_rawDesc)))
+ })
+ return file_p2p_host_autonat_pb_autonat_proto_rawDescData
+}
+
+var file_p2p_host_autonat_pb_autonat_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_p2p_host_autonat_pb_autonat_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_p2p_host_autonat_pb_autonat_proto_goTypes = []any{
+ (Message_MessageType)(0), // 0: autonat.pb.Message.MessageType
+ (Message_ResponseStatus)(0), // 1: autonat.pb.Message.ResponseStatus
+ (*Message)(nil), // 2: autonat.pb.Message
+ (*Message_PeerInfo)(nil), // 3: autonat.pb.Message.PeerInfo
+ (*Message_Dial)(nil), // 4: autonat.pb.Message.Dial
+ (*Message_DialResponse)(nil), // 5: autonat.pb.Message.DialResponse
+}
+var file_p2p_host_autonat_pb_autonat_proto_depIdxs = []int32{
+ 0, // 0: autonat.pb.Message.type:type_name -> autonat.pb.Message.MessageType
+ 4, // 1: autonat.pb.Message.dial:type_name -> autonat.pb.Message.Dial
+ 5, // 2: autonat.pb.Message.dialResponse:type_name -> autonat.pb.Message.DialResponse
+ 3, // 3: autonat.pb.Message.Dial.peer:type_name -> autonat.pb.Message.PeerInfo
+ 1, // 4: autonat.pb.Message.DialResponse.status:type_name -> autonat.pb.Message.ResponseStatus
+ 5, // [5:5] is the sub-list for method output_type
+ 5, // [5:5] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_p2p_host_autonat_pb_autonat_proto_init() }
+func file_p2p_host_autonat_pb_autonat_proto_init() {
+ if File_p2p_host_autonat_pb_autonat_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_host_autonat_pb_autonat_proto_rawDesc), len(file_p2p_host_autonat_pb_autonat_proto_rawDesc)),
+ NumEnums: 2,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_host_autonat_pb_autonat_proto_goTypes,
+ DependencyIndexes: file_p2p_host_autonat_pb_autonat_proto_depIdxs,
+ EnumInfos: file_p2p_host_autonat_pb_autonat_proto_enumTypes,
+ MessageInfos: file_p2p_host_autonat_pb_autonat_proto_msgTypes,
+ }.Build()
+ File_p2p_host_autonat_pb_autonat_proto = out.File
+ file_p2p_host_autonat_pb_autonat_proto_goTypes = nil
+ file_p2p_host_autonat_pb_autonat_proto_depIdxs = nil
+}
diff --git a/p2p/host/autonat/pb/autonat.proto b/p2p/host/autonat/pb/autonat.proto
new file mode 100644
index 0000000000..60ab15e8e1
--- /dev/null
+++ b/p2p/host/autonat/pb/autonat.proto
@@ -0,0 +1,39 @@
+syntax = "proto2";
+
+package autonat.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/host/autonat/pb";
+
+message Message {
+ enum MessageType {
+ DIAL = 0;
+ DIAL_RESPONSE = 1;
+ }
+
+ enum ResponseStatus {
+ OK = 0;
+ E_DIAL_ERROR = 100;
+ E_DIAL_REFUSED = 101;
+ E_BAD_REQUEST = 200;
+ E_INTERNAL_ERROR = 300;
+ }
+
+ message PeerInfo {
+ optional bytes id = 1;
+ repeated bytes addrs = 2;
+ }
+
+ message Dial {
+ optional PeerInfo peer = 1;
+ }
+
+ message DialResponse {
+ optional ResponseStatus status = 1;
+ optional string statusText = 2;
+ optional bytes addr = 3;
+ }
+
+ optional MessageType type = 1;
+ optional Dial dial = 2;
+ optional DialResponse dialResponse = 3;
+}
diff --git a/p2p/host/autonat/proto.go b/p2p/host/autonat/proto.go
new file mode 100644
index 0000000000..e8f97c53a4
--- /dev/null
+++ b/p2p/host/autonat/proto.go
@@ -0,0 +1,39 @@
+package autonat
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// AutoNATProto identifies the autonat service protocol
+const AutoNATProto = "/libp2p/autonat/1.0.0"
+
+func newDialMessage(pi peer.AddrInfo) *pb.Message {
+ msg := new(pb.Message)
+ msg.Type = pb.Message_DIAL.Enum()
+ msg.Dial = new(pb.Message_Dial)
+ msg.Dial.Peer = new(pb.Message_PeerInfo)
+ msg.Dial.Peer.Id = []byte(pi.ID)
+ msg.Dial.Peer.Addrs = make([][]byte, len(pi.Addrs))
+ for i, addr := range pi.Addrs {
+ msg.Dial.Peer.Addrs[i] = addr.Bytes()
+ }
+
+ return msg
+}
+
+func newDialResponseOK(addr ma.Multiaddr) *pb.Message_DialResponse {
+ dr := new(pb.Message_DialResponse)
+ dr.Status = pb.Message_OK.Enum()
+ dr.Addr = addr.Bytes()
+ return dr
+}
+
+func newDialResponseError(status pb.Message_ResponseStatus, text string) *pb.Message_DialResponse {
+ dr := new(pb.Message_DialResponse)
+ dr.Status = status.Enum()
+ dr.StatusText = &text
+ return dr
+}
diff --git a/p2p/host/autonat/svc.go b/p2p/host/autonat/svc.go
new file mode 100644
index 0000000000..ed6c282690
--- /dev/null
+++ b/p2p/host/autonat/svc.go
@@ -0,0 +1,300 @@
+package autonat
+
+import (
+ "context"
+ "errors"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+
+ "github.com/libp2p/go-msgio/pbio"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var streamTimeout = 60 * time.Second
+
+const (
+ ServiceName = "libp2p.autonat"
+
+ maxMsgSize = 4096
+)
+
+// AutoNATService provides NAT autodetection services to other peers
+type autoNATService struct {
+ instanceLock sync.Mutex
+ instance context.CancelFunc
+ backgroundRunning chan struct{} // closed when background exits
+
+ config *config
+
+ // rate limiter
+ mx sync.Mutex
+ reqs map[peer.ID]int
+ globalReqs int
+}
+
+// NewAutoNATService creates a new AutoNATService instance attached to a host
+func newAutoNATService(c *config) (*autoNATService, error) {
+ if c.dialer == nil {
+ return nil, errors.New("cannot create NAT service without a network")
+ }
+ return &autoNATService{
+ config: c,
+ reqs: make(map[peer.ID]int),
+ }, nil
+}
+
+func (as *autoNATService) handleStream(s network.Stream) {
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to autonat service", "err", err)
+ s.Reset()
+ return
+ }
+
+ if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for autonat stream", "err", err)
+ s.Reset()
+ return
+ }
+ defer s.Scope().ReleaseMemory(maxMsgSize)
+
+ s.SetDeadline(time.Now().Add(streamTimeout))
+ defer s.Close()
+
+ pid := s.Conn().RemotePeer()
+ log.Debug("New stream from peer", "peer", pid)
+
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ w := pbio.NewDelimitedWriter(s)
+
+ var req pb.Message
+ var res pb.Message
+
+ err := r.ReadMsg(&req)
+ if err != nil {
+ log.Debug("Error reading message", "peer", pid, "err", err)
+ s.Reset()
+ return
+ }
+
+ t := req.GetType()
+ if t != pb.Message_DIAL {
+ log.Debug("Unexpected message", "peer", pid, "message_type", t.String(), "expected_type", pb.Message_DIAL.String())
+ s.Reset()
+ return
+ }
+
+ dr := as.handleDial(pid, s.Conn().RemoteMultiaddr(), req.GetDial().GetPeer())
+ res.Type = pb.Message_DIAL_RESPONSE.Enum()
+ res.DialResponse = dr
+
+ err = w.WriteMsg(&res)
+ if err != nil {
+ log.Debug("Error writing response", "peer", pid, "err", err)
+ s.Reset()
+ return
+ }
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialResponse(res.GetDialResponse().GetStatus())
+ }
+}
+
+func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Message_PeerInfo) *pb.Message_DialResponse {
+ if mpi == nil {
+ return newDialResponseError(pb.Message_E_BAD_REQUEST, "missing peer info")
+ }
+
+ mpid := mpi.GetId()
+ if mpid != nil {
+ mp, err := peer.IDFromBytes(mpid)
+ if err != nil {
+ return newDialResponseError(pb.Message_E_BAD_REQUEST, "bad peer id")
+ }
+
+ if mp != p {
+ return newDialResponseError(pb.Message_E_BAD_REQUEST, "peer id mismatch")
+ }
+ }
+
+ addrs := make([]ma.Multiaddr, 0, as.config.maxPeerAddresses)
+ seen := make(map[string]struct{})
+
+ // Don't even try to dial peers with blocked remote addresses. In order to dial a peer, we
+ // need to know their public IP address, and it needs to be different from our public IP
+ // address.
+ if as.config.dialPolicy.skipDial(obsaddr) {
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialRefused(dial_blocked)
+ }
+ // Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code.
+ return newDialResponseError(pb.Message_E_DIAL_REFUSED, "refusing to dial peer with blocked observed address")
+ }
+
+ // Determine the peer's IP address.
+ hostIP, _ := ma.SplitFirst(obsaddr)
+ switch hostIP.Protocol().Code {
+ case ma.P_IP4, ma.P_IP6:
+ default:
+ // This shouldn't be possible as we should skip all addresses that don't include
+ // public IP addresses.
+ return newDialResponseError(pb.Message_E_INTERNAL_ERROR, "expected an IP address")
+ }
+
+ // add observed addr to the list of addresses to dial
+ addrs = append(addrs, obsaddr)
+ seen[obsaddr.String()] = struct{}{}
+
+ for _, maddr := range mpi.GetAddrs() {
+ addr, err := ma.NewMultiaddrBytes(maddr)
+ if err != nil {
+ log.Debug("Error parsing multiaddr", "err", err)
+ continue
+ }
+
+ // For security reasons, we _only_ dial the observed IP address.
+ // Replace other IP addresses with the observed one so we can still try the
+ // requested ports/transports.
+ if ip, rest := ma.SplitFirst(addr); !ip.Equal(hostIP) {
+ // Make sure it's an IP address
+ switch ip.Protocol().Code {
+ case ma.P_IP4, ma.P_IP6:
+ default:
+ continue
+ }
+ addr = hostIP.Multiaddr()
+ if len(rest) > 0 {
+ addr = addr.Encapsulate(rest)
+ }
+ }
+
+ // Make sure we're willing to dial the rest of the address (e.g., not a circuit
+ // address).
+ if as.config.dialPolicy.skipDial(addr) {
+ continue
+ }
+
+ str := addr.String()
+ _, ok := seen[str]
+ if ok {
+ continue
+ }
+
+ addrs = append(addrs, addr)
+ seen[str] = struct{}{}
+
+ if len(addrs) >= as.config.maxPeerAddresses {
+ break
+ }
+ }
+
+ if len(addrs) == 0 {
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialRefused(no_valid_address)
+ }
+ // Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code.
+ return newDialResponseError(pb.Message_E_DIAL_REFUSED, "no dialable addresses")
+ }
+
+ return as.doDial(peer.AddrInfo{ID: p, Addrs: addrs})
+}
+
+func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse {
+ // rate limit check
+ as.mx.Lock()
+ count := as.reqs[pi.ID]
+ if count >= as.config.throttlePeerMax || (as.config.throttleGlobalMax > 0 &&
+ as.globalReqs >= as.config.throttleGlobalMax) {
+ as.mx.Unlock()
+ if as.config.metricsTracer != nil {
+ as.config.metricsTracer.OutgoingDialRefused(rate_limited)
+ }
+ return newDialResponseError(pb.Message_E_DIAL_REFUSED, "too many dials")
+ }
+ as.reqs[pi.ID] = count + 1
+ as.globalReqs++
+ as.mx.Unlock()
+
+ ctx, cancel := context.WithTimeout(context.Background(), as.config.dialTimeout)
+ defer cancel()
+
+ as.config.dialer.Peerstore().ClearAddrs(pi.ID)
+
+ as.config.dialer.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL)
+
+ defer func() {
+ as.config.dialer.Peerstore().ClearAddrs(pi.ID)
+ as.config.dialer.Peerstore().RemovePeer(pi.ID)
+ }()
+
+ conn, err := as.config.dialer.DialPeer(ctx, pi.ID)
+ if err != nil {
+ log.Debug("error dialing peer", "peer", pi.ID, "err", err)
+ // wait for the context to timeout to avoid leaking timing information
+ // this renders the service ineffective as a port scanner
+ <-ctx.Done()
+ return newDialResponseError(pb.Message_E_DIAL_ERROR, "dial failed")
+ }
+
+ ra := conn.RemoteMultiaddr()
+ as.config.dialer.ClosePeer(pi.ID)
+ return newDialResponseOK(ra)
+}
+
+// Enable the autoNAT service if it is not running.
+func (as *autoNATService) Enable() {
+ as.instanceLock.Lock()
+ defer as.instanceLock.Unlock()
+ if as.instance != nil {
+ return
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ as.instance = cancel
+ as.backgroundRunning = make(chan struct{})
+ as.config.host.SetStreamHandler(AutoNATProto, as.handleStream)
+
+ go as.background(ctx)
+}
+
+// Disable the autoNAT service if it is running.
+func (as *autoNATService) Disable() {
+ as.instanceLock.Lock()
+ defer as.instanceLock.Unlock()
+ if as.instance != nil {
+ as.config.host.RemoveStreamHandler(AutoNATProto)
+ as.instance()
+ as.instance = nil
+ <-as.backgroundRunning
+ }
+}
+
+func (as *autoNATService) Close() error {
+ as.Disable()
+ return as.config.dialer.Close()
+}
+
+func (as *autoNATService) background(ctx context.Context) {
+ defer close(as.backgroundRunning)
+
+ timer := time.NewTimer(as.config.throttleResetPeriod)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-timer.C:
+ as.mx.Lock()
+ as.reqs = make(map[peer.ID]int)
+ as.globalReqs = 0
+ as.mx.Unlock()
+ jitter := rand.Float32() * float32(as.config.throttleResetJitter)
+ timer.Reset(as.config.throttleResetPeriod + time.Duration(int64(jitter)))
+ case <-ctx.Done():
+ return
+ }
+ }
+}
diff --git a/p2p/host/autonat/svc_test.go b/p2p/host/autonat/svc_test.go
new file mode 100644
index 0000000000..d58a9b75e9
--- /dev/null
+++ b/p2p/host/autonat/svc_test.go
@@ -0,0 +1,228 @@
+package autonat
+
+import (
+ "context"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func makeAutoNATConfig(t *testing.T) *config {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ dh := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ c := config{host: h, dialer: dh.Network()}
+ _ = defaults(&c)
+ c.forceReachability = true
+ c.dialPolicy.allowSelfDials = true
+ return &c
+}
+
+func makeAutoNATService(t *testing.T, c *config) *autoNATService {
+ as, err := newAutoNATService(c)
+ if err != nil {
+ t.Fatal(err)
+ }
+ as.Enable()
+
+ return as
+}
+
+func makeAutoNATClient(t *testing.T) (host.Host, Client) {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ cli := NewAutoNATClient(h, nil, nil)
+ return h, cli
+}
+
+// Note: these tests assume that the host has only private network addresses!
+func TestAutoNATServiceDialRefused(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ c := makeAutoNATConfig(t)
+ defer c.host.Close()
+ defer c.dialer.Close()
+
+ c.dialTimeout = 1 * time.Second
+ c.dialPolicy.allowSelfDials = false
+ _ = makeAutoNATService(t, c)
+ hc, ac := makeAutoNATClient(t)
+ defer hc.Close()
+ connect(t, c.host, hc)
+
+ err := ac.DialBack(ctx, c.host.ID())
+ if err == nil {
+ t.Fatal("Dial back succeeded unexpectedly!")
+ }
+
+ if !IsDialRefused(err) {
+ t.Fatal(err)
+ }
+}
+
+func TestAutoNATServiceDialSuccess(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ c := makeAutoNATConfig(t)
+ defer c.host.Close()
+ defer c.dialer.Close()
+
+ _ = makeAutoNATService(t, c)
+
+ hc, ac := makeAutoNATClient(t)
+ defer hc.Close()
+ connect(t, c.host, hc)
+
+ err := ac.DialBack(ctx, c.host.ID())
+ if err != nil {
+ t.Fatalf("Dial back failed: %s", err.Error())
+ }
+}
+
+func TestAutoNATServiceDialRateLimiter(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ c := makeAutoNATConfig(t)
+ defer c.host.Close()
+ defer c.dialer.Close()
+
+ c.dialTimeout = 200 * time.Millisecond
+ c.throttleResetPeriod = 200 * time.Millisecond
+ c.throttleResetJitter = 0
+ c.throttlePeerMax = 1
+ _ = makeAutoNATService(t, c)
+
+ hc, ac := makeAutoNATClient(t)
+ defer hc.Close()
+ connect(t, c.host, hc)
+
+ err := ac.DialBack(ctx, c.host.ID())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ac.DialBack(ctx, c.host.ID())
+ if err == nil {
+ t.Fatal("Dial back succeeded unexpectedly!")
+ }
+
+ if !IsDialRefused(err) {
+ t.Fatal(err)
+ }
+
+ time.Sleep(400 * time.Millisecond)
+
+ err = ac.DialBack(ctx, c.host.ID())
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestAutoNATServiceGlobalLimiter(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ c := makeAutoNATConfig(t)
+ defer c.host.Close()
+ defer c.dialer.Close()
+
+ c.dialTimeout = time.Second
+ c.throttleResetPeriod = 10 * time.Second
+ c.throttleResetJitter = 0
+ c.throttlePeerMax = 1
+ c.throttleGlobalMax = 5
+ _ = makeAutoNATService(t, c)
+
+ hs := c.host
+
+ for i := 0; i < 5; i++ {
+ hc, ac := makeAutoNATClient(t)
+ connect(t, hs, hc)
+
+ err := ac.DialBack(ctx, hs.ID())
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ hc, ac := makeAutoNATClient(t)
+ defer hc.Close()
+ connect(t, hs, hc)
+ err := ac.DialBack(ctx, hs.ID())
+ if err == nil {
+ t.Fatal("Dial back succeeded unexpectedly!")
+ }
+
+ if !IsDialRefused(err) {
+ t.Fatal(err)
+ }
+}
+
+func TestAutoNATServiceRateLimitJitter(t *testing.T) {
+ c := makeAutoNATConfig(t)
+ defer c.host.Close()
+ defer c.dialer.Close()
+
+ dur := 100 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ dur = 200 * time.Millisecond
+ }
+
+ c.throttleResetPeriod = dur
+ c.throttleResetJitter = dur
+ c.throttleGlobalMax = 1
+ svc := makeAutoNATService(t, c)
+ svc.mx.Lock()
+ svc.globalReqs = 1
+ svc.mx.Unlock()
+
+ require.Eventually(t, func() bool {
+ svc.mx.Lock()
+ defer svc.mx.Unlock()
+ return svc.globalReqs == 0
+ }, dur*5/2, 10*time.Millisecond, "reset of rate limiter occurred slower than expected")
+}
+
+func TestAutoNATServiceStartup(t *testing.T) {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h.Close()
+ dh := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer dh.Close()
+ an, err := New(h, EnableService(dh.Network()))
+ an.(*AmbientAutoNAT).config.dialPolicy.allowSelfDials = true
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hc, ac := makeAutoNATClient(t)
+ connect(t, h, hc)
+
+ err = ac.DialBack(context.Background(), h.ID())
+ if err != nil {
+ t.Fatal("autonat service be active in unknown mode.")
+ }
+
+ sub, _ := h.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
+
+ anc := an.(*AmbientAutoNAT)
+ anc.recordObservation(network.ReachabilityPublic)
+
+ <-sub.Out()
+
+ err = ac.DialBack(context.Background(), h.ID())
+ if err != nil {
+ t.Fatalf("autonat should be active, was %v", err)
+ }
+ if an.Status() != network.ReachabilityPublic {
+ t.Fatalf("autonat should report public, but didn't")
+ }
+}
diff --git a/p2p/host/autonat/test/autonat_test.go b/p2p/host/autonat/test/autonat_test.go
new file mode 100644
index 0000000000..5bc893930c
--- /dev/null
+++ b/p2p/host/autonat/test/autonat_test.go
@@ -0,0 +1,47 @@
+// This separate testing package helps to resolve a circular dependency potentially
+// being created between libp2p and libp2p-autonat
+package autonattest
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestAutonatRoundtrip(t *testing.T) {
+ t.Skip("this test doesn't work")
+
+ // 3 hosts are used: [client] and [service + dialback dialer]
+ client, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), libp2p.EnableNATService())
+ require.NoError(t, err)
+ service, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ dialback, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ if _, err := autonat.New(service, autonat.EnableService(dialback.Network())); err != nil {
+ t.Fatal(err)
+ }
+
+ client.Peerstore().AddAddrs(service.ID(), service.Addrs(), time.Hour)
+ require.NoError(t, client.Connect(context.Background(), service.Peerstore().PeerInfo(service.ID())))
+
+ cSub, err := client.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged))
+ require.NoError(t, err)
+ defer cSub.Close()
+
+ select {
+ case stat := <-cSub.Out():
+ if stat == network.ReachabilityUnknown {
+ t.Fatalf("After status update, client did not know its status")
+ }
+ case <-time.After(30 * time.Second):
+ t.Fatal("sub timed out.")
+ }
+}
diff --git a/p2p/host/autonat/test/dummy.go b/p2p/host/autonat/test/dummy.go
new file mode 100644
index 0000000000..c3597f8160
--- /dev/null
+++ b/p2p/host/autonat/test/dummy.go
@@ -0,0 +1,3 @@
+package autonattest
+
+// needed so that go test ./... doesn't error
diff --git a/p2p/host/autorelay/addrsplosion.go b/p2p/host/autorelay/addrsplosion.go
new file mode 100644
index 0000000000..13e6274b71
--- /dev/null
+++ b/p2p/host/autorelay/addrsplosion.go
@@ -0,0 +1,158 @@
+package autorelay
+
+import (
+ "encoding/binary"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// This function cleans up a relay's address set to remove private addresses and curtail
+// addrsplosion.
+// TODO: Remove this, we don't need this. The current method tries to select the
+// best address for the relay. Instead we should rely on the addresses provided by the
+// relay in response to the reservation request.
+func cleanupAddressSet(addrs []ma.Multiaddr) []ma.Multiaddr {
+ var public, private []ma.Multiaddr
+
+ for _, a := range addrs {
+ if isRelayAddr(a) {
+ continue
+ }
+
+ if manet.IsPublicAddr(a) {
+ public = append(public, a)
+ continue
+ }
+
+ // discard unroutable addrs
+ if manet.IsPrivateAddr(a) {
+ private = append(private, a)
+ }
+ }
+
+ if !hasAddrsplosion(public) {
+ return public
+ }
+
+ return sanitizeAddrsplodedSet(public, private)
+}
+
+func isRelayAddr(a ma.Multiaddr) bool {
+ isRelay := false
+
+ ma.ForEach(a, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_CIRCUIT:
+ isRelay = true
+ return false
+ default:
+ return true
+ }
+ })
+
+ return isRelay
+}
+
+// we have addrsplosion if for some protocol we advertise multiple ports on
+// the same base address.
+func hasAddrsplosion(addrs []ma.Multiaddr) bool {
+ aset := make(map[string]int)
+
+ for _, a := range addrs {
+ key, port := addrKeyAndPort(a)
+ xport, ok := aset[key]
+ if ok && port != xport {
+ return true
+ }
+ aset[key] = port
+ }
+
+ return false
+}
+
+func addrKeyAndPort(a ma.Multiaddr) (string, int) {
+ var (
+ key string
+ port int
+ )
+
+ ma.ForEach(a, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_TCP, ma.P_UDP:
+ port = int(binary.BigEndian.Uint16(c.RawValue()))
+ key += "/" + c.Protocol().Name
+ default:
+ val := c.Value()
+ if val == "" {
+ val = c.Protocol().Name
+ }
+ key += "/" + val
+ }
+ return true
+ })
+
+ return key, port
+}
+
+// clean up addrsplosion
+// the following heuristic is used:
+// - for each base address/protocol combination, if there are multiple ports advertised then
+// only accept the default port if present.
+// - If the default port is not present, we check for non-standard ports by tracking
+// private port bindings if present.
+// - If there is no default or private port binding, then we can't infer the correct
+// port and give up and return all addrs (for that base address)
+func sanitizeAddrsplodedSet(public, private []ma.Multiaddr) []ma.Multiaddr {
+ type portAndAddr struct {
+ addr ma.Multiaddr
+ port int
+ }
+
+ privports := make(map[int]struct{})
+ pubaddrs := make(map[string][]portAndAddr)
+
+ for _, a := range private {
+ _, port := addrKeyAndPort(a)
+ privports[port] = struct{}{}
+ }
+
+ for _, a := range public {
+ key, port := addrKeyAndPort(a)
+ pubaddrs[key] = append(pubaddrs[key], portAndAddr{addr: a, port: port})
+ }
+
+ var result []ma.Multiaddr
+ for _, pas := range pubaddrs {
+ if len(pas) == 1 {
+ // it's not addrsploded
+ result = append(result, pas[0].addr)
+ continue
+ }
+
+ haveAddr := false
+ for _, pa := range pas {
+ if _, ok := privports[pa.port]; ok {
+ // it matches a privately bound port, use it
+ result = append(result, pa.addr)
+ haveAddr = true
+ continue
+ }
+
+ if pa.port == 4001 || pa.port == 4002 {
+ // it's a default port, use it
+ result = append(result, pa.addr)
+ haveAddr = true
+ }
+ }
+
+ if !haveAddr {
+ // we weren't able to select a port; bite the bullet and use them all
+ for _, pa := range pas {
+ result = append(result, pa.addr)
+ }
+ }
+ }
+
+ return result
+}
diff --git a/p2p/host/autorelay/addrsplosion_test.go b/p2p/host/autorelay/addrsplosion_test.go
new file mode 100644
index 0000000000..19425c555e
--- /dev/null
+++ b/p2p/host/autorelay/addrsplosion_test.go
@@ -0,0 +1,92 @@
+package autorelay
+
+import (
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ matest "github.com/multiformats/go-multiaddr/matest"
+)
+
+func TestCleanupAddrs(t *testing.T) {
+ t.Run("with no addrplosion", func(t *testing.T) {
+ addrs := makeAddrList(
+ "/ip4/127.0.0.1/tcp/4001",
+ "/ip4/127.0.0.1/udp/4002/quic-v1",
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/udp/4002/quic-v1",
+ "/dnsaddr/somedomain.com/tcp/4002/ws",
+ )
+ clean := makeAddrList(
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/udp/4002/quic-v1",
+ "/dnsaddr/somedomain.com/tcp/4002/ws",
+ )
+ matest.AssertMultiaddrsMatch(t, clean, cleanupAddressSet(addrs))
+ })
+
+ t.Run("with default port", func(t *testing.T) {
+ // test with default port addrspolosion
+ addrs := makeAddrList(
+ "/ip4/127.0.0.1/tcp/4001",
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/tcp/33333",
+ "/ip4/1.2.3.4/tcp/33334",
+ "/ip4/1.2.3.4/tcp/33335",
+ "/ip4/1.2.3.4/udp/4002/quic-v1",
+ )
+ clean := makeAddrList(
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/udp/4002/quic-v1",
+ )
+ matest.AssertMultiaddrsMatch(t, clean, cleanupAddressSet(addrs))
+ })
+
+ t.Run("with default port, but no private addrs", func(t *testing.T) {
+ // test with default port addrsplosion but no private addrs
+ addrs := makeAddrList(
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/tcp/33333",
+ "/ip4/1.2.3.4/tcp/33334",
+ "/ip4/1.2.3.4/tcp/33335",
+ "/ip4/1.2.3.4/udp/4002/quic-v1",
+ )
+ clean := makeAddrList(
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/udp/4002/quic-v1",
+ )
+ matest.AssertMultiaddrsMatch(t, clean, cleanupAddressSet(addrs))
+ })
+
+ t.Run("with non-standard port", func(t *testing.T) {
+ addrs := makeAddrList(
+ "/ip4/127.0.0.1/tcp/12345",
+ "/ip4/1.2.3.4/tcp/12345",
+ "/ip4/1.2.3.4/tcp/33333",
+ "/ip4/1.2.3.4/tcp/33334",
+ "/ip4/1.2.3.4/tcp/33335",
+ )
+ clean := makeAddrList(
+ "/ip4/1.2.3.4/tcp/12345",
+ )
+ if !matest.AssertEqualMultiaddrs(t, clean, cleanupAddressSet(addrs)) {
+ t.Log("cleaned up set doesn't match expected")
+ }
+ })
+
+ t.Run("with a clean address set", func(t *testing.T) {
+ // test with a squeaky clean address set
+ addrs := makeAddrList(
+ "/ip4/1.2.3.4/tcp/4001",
+ "/ip4/1.2.3.4/udp/4001/quic-v1",
+ )
+ matest.AssertMultiaddrsMatch(t, addrs, cleanupAddressSet(addrs))
+ })
+}
+
+func makeAddrList(strs ...string) []ma.Multiaddr {
+ result := make([]ma.Multiaddr, 0, len(strs))
+ for _, s := range strs {
+ result = append(result, ma.StringCast(s))
+ }
+ return result
+}
diff --git a/p2p/host/autorelay/autorelay.go b/p2p/host/autorelay/autorelay.go
new file mode 100644
index 0000000000..fe977366eb
--- /dev/null
+++ b/p2p/host/autorelay/autorelay.go
@@ -0,0 +1,107 @@
+package autorelay
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("autorelay")
+
+type AutoRelay struct {
+ refCount sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ mx sync.Mutex
+ status network.Reachability
+
+ relayFinder *relayFinder
+
+ host host.Host
+
+ metricsTracer MetricsTracer
+}
+
+func NewAutoRelay(host host.Host, opts ...Option) (*AutoRelay, error) {
+ r := &AutoRelay{
+ host: host,
+ status: network.ReachabilityUnknown,
+ }
+ conf := defaultConfig
+ for _, opt := range opts {
+ if err := opt(&conf); err != nil {
+ return nil, err
+ }
+ }
+ r.ctx, r.ctxCancel = context.WithCancel(context.Background())
+ rf, err := newRelayFinder(host, &conf)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create autorelay: %w", err)
+ }
+ r.relayFinder = rf
+ r.metricsTracer = &wrappedMetricsTracer{conf.metricsTracer}
+
+ return r, nil
+}
+
+func (r *AutoRelay) Start() {
+ r.refCount.Add(1)
+ go func() {
+ defer r.refCount.Done()
+ r.background()
+ }()
+}
+
+func (r *AutoRelay) background() {
+ subReachability, err := r.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("autorelay (background)"))
+ if err != nil {
+ log.Debug("failed to subscribe to the EvtLocalReachabilityChanged")
+ return
+ }
+ defer subReachability.Close()
+
+ for {
+ select {
+ case <-r.ctx.Done():
+ return
+ case ev, ok := <-subReachability.Out():
+ if !ok {
+ return
+ }
+ evt := ev.(event.EvtLocalReachabilityChanged)
+ switch evt.Reachability {
+ case network.ReachabilityPrivate, network.ReachabilityUnknown:
+ err := r.relayFinder.Start()
+ if errors.Is(err, errAlreadyRunning) {
+ log.Debug("tried to start already running relay finder")
+ } else if err != nil {
+ log.Error("failed to start relay finder", "err", err)
+ } else {
+ r.metricsTracer.RelayFinderStatus(true)
+ }
+ case network.ReachabilityPublic:
+ r.relayFinder.Stop()
+ r.metricsTracer.RelayFinderStatus(false)
+ }
+ r.mx.Lock()
+ r.status = evt.Reachability
+ r.mx.Unlock()
+ }
+ }
+}
+
+func (r *AutoRelay) Close() error {
+ r.ctxCancel()
+ err := r.relayFinder.Stop()
+ r.refCount.Wait()
+ return err
+}
diff --git a/p2p/host/autorelay/autorelay_test.go b/p2p/host/autorelay/autorelay_test.go
new file mode 100644
index 0000000000..a2186b5183
--- /dev/null
+++ b/p2p/host/autorelay/autorelay_test.go
@@ -0,0 +1,603 @@
+package autorelay_test
+
+import (
+ "context"
+ "fmt"
+ "slices"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/p2p/host/autorelay"
+ circuitv2_proto "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const protoIDv2 = circuitv2_proto.ProtoIDv2Hop
+
+type mockClock struct {
+ *test.MockClock
+}
+
+func (c mockClock) InstantTimer(when time.Time) autorelay.InstantTimer {
+ return c.MockClock.InstantTimer(when)
+}
+
+func newMockClock() mockClock {
+ return mockClock{MockClock: test.NewMockClock()}
+}
+
+var _ autorelay.ClockWithInstantTimer = mockClock{}
+
+func numRelays(h host.Host) int {
+ return len(usedRelays(h))
+}
+
+func usedRelays(h host.Host) []peer.ID {
+ m := make(map[peer.ID]struct{})
+ for _, addr := range h.Addrs() {
+ addr, comp := ma.SplitLast(addr)
+ if comp.Protocol().Code != ma.P_CIRCUIT { // not a relay addr
+ continue
+ }
+ _, comp = ma.SplitLast(addr)
+ if comp.Protocol().Code != ma.P_P2P {
+ panic("expected p2p component")
+ }
+ id, err := peer.Decode(comp.Value())
+ if err != nil {
+ panic(err)
+ }
+ m[id] = struct{}{}
+ }
+ peers := make([]peer.ID, 0, len(m))
+ for id := range m {
+ peers = append(peers, id)
+ }
+ return peers
+}
+
+func newPrivateNode(t *testing.T, peerSource func(context.Context, int) <-chan peer.AddrInfo,
+ opts ...autorelay.Option) host.Host {
+ t.Helper()
+ h, err := libp2p.New(
+ libp2p.ForceReachabilityPrivate(),
+ libp2p.EnableAutoRelayWithPeerSource(peerSource, opts...),
+ )
+ require.NoError(t, err)
+ return h
+}
+
+func newPrivateNodeWithStaticRelays(t *testing.T, static []peer.AddrInfo, opts ...autorelay.Option) host.Host {
+ t.Helper()
+ h, err := libp2p.New(
+ libp2p.ForceReachabilityPrivate(),
+ libp2p.EnableAutoRelayWithStaticRelays(static, opts...),
+ )
+ require.NoError(t, err)
+ return h
+}
+
+func newRelay(t *testing.T) host.Host {
+ t.Helper()
+ h, err := libp2p.New(
+ libp2p.DisableRelay(),
+ libp2p.EnableRelayService(),
+ libp2p.ForceReachabilityPublic(),
+ libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ for i, addr := range addrs {
+ saddr := addr.String()
+ if strings.HasPrefix(saddr, "/ip4/127.0.0.1/") {
+ addrNoIP := strings.TrimPrefix(saddr, "/ip4/127.0.0.1")
+ // .internal is classified as a public address as users
+ // are free to map this dns to a public ip address for
+ // use within a LAN
+ addrs[i] = ma.StringCast("/dns/libp2p.internal" + addrNoIP)
+ }
+ }
+ return addrs
+ }),
+ )
+ require.NoError(t, err)
+ require.Eventually(t, func() bool {
+ for _, p := range h.Mux().Protocols() {
+ if p == protoIDv2 {
+ return true
+ }
+ }
+ return false
+ }, time.Second, 10*time.Millisecond)
+ return h
+}
+
+func TestSingleCandidate(t *testing.T) {
+ var counter int
+ h := newPrivateNode(t,
+ func(_ context.Context, num int) <-chan peer.AddrInfo {
+ counter++
+ require.Equal(t, 1, num)
+ peerChan := make(chan peer.AddrInfo, num)
+ defer close(peerChan)
+ r := newRelay(t)
+ t.Cleanup(func() { r.Close() })
+ peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
+ return peerChan
+ },
+ autorelay.WithMaxCandidates(1),
+ autorelay.WithNumRelays(99999),
+ autorelay.WithBootDelay(0),
+ autorelay.WithMinInterval(time.Hour),
+ )
+ defer h.Close()
+
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 10*time.Second, 100*time.Millisecond)
+ // test that we don't add any more relays
+ require.Never(t, func() bool { return numRelays(h) > 1 }, 200*time.Millisecond, 50*time.Millisecond)
+ require.Equal(t, 1, counter, "expected the peer source callback to only have been called once")
+}
+
+func TestSingleRelay(t *testing.T) {
+ const numCandidates = 3
+ var called bool
+ peerChan := make(chan peer.AddrInfo, numCandidates)
+ for i := 0; i < numCandidates; i++ {
+ r := newRelay(t)
+ t.Cleanup(func() { r.Close() })
+ peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
+ }
+ close(peerChan)
+
+ h := newPrivateNode(t,
+ func(_ context.Context, num int) <-chan peer.AddrInfo {
+ require.False(t, called, "expected the peer source callback to only have been called once")
+ called = true
+ require.Equal(t, numCandidates, num)
+ return peerChan
+ },
+ autorelay.WithMaxCandidates(numCandidates),
+ autorelay.WithNumRelays(1),
+ autorelay.WithBootDelay(0),
+ autorelay.WithMinInterval(time.Hour),
+ )
+ defer h.Close()
+
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 5*time.Second, 100*time.Millisecond)
+ // test that we don't add any more relays
+ require.Never(t, func() bool { return numRelays(h) > 1 }, 200*time.Millisecond, 50*time.Millisecond)
+}
+
+func TestWaitForCandidates(t *testing.T) {
+ peerChan := make(chan peer.AddrInfo)
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo { return peerChan },
+ autorelay.WithMinCandidates(2),
+ autorelay.WithNumRelays(1),
+ autorelay.WithBootDelay(time.Hour),
+ autorelay.WithMinInterval(time.Hour),
+ )
+ defer h.Close()
+
+ r1 := newRelay(t)
+ t.Cleanup(func() { r1.Close() })
+ peerChan <- peer.AddrInfo{ID: r1.ID(), Addrs: r1.Addrs()}
+
+ // make sure we don't add any relays yet
+ // We need to wait until we have at least 2 candidates before we connect.
+ require.Never(t, func() bool { return numRelays(h) > 0 }, 200*time.Millisecond, 50*time.Millisecond)
+
+ r2 := newRelay(t)
+ t.Cleanup(func() { r2.Close() })
+ peerChan <- peer.AddrInfo{ID: r2.ID(), Addrs: r2.Addrs()}
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 10*time.Second, 100*time.Millisecond)
+}
+
+func TestBackoff(t *testing.T) {
+ const backoff = 20 * time.Second
+ cl := newMockClock()
+ r, err := libp2p.New(
+ libp2p.DisableRelay(),
+ libp2p.ForceReachabilityPublic(),
+ libp2p.AddrsFactory(func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ for i, addr := range addrs {
+ saddr := addr.String()
+ if strings.HasPrefix(saddr, "/ip4/127.0.0.1/") {
+ addrNoIP := strings.TrimPrefix(saddr, "/ip4/127.0.0.1")
+ addrs[i] = ma.StringCast("/dns4/localhost" + addrNoIP)
+ }
+ }
+ return addrs
+ }),
+ )
+ require.NoError(t, err)
+ defer r.Close()
+ var reservations atomic.Int32
+ r.SetStreamHandler(protoIDv2, func(str network.Stream) {
+ defer reservations.Add(1)
+ str.Close()
+ })
+
+ var counter atomic.Int32
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo {
+ // always return the same node, and make sure we don't try to connect to it too frequently
+ counter.Add(1)
+ peerChan := make(chan peer.AddrInfo, 1)
+ peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
+ close(peerChan)
+ return peerChan
+ },
+ autorelay.WithNumRelays(1),
+ autorelay.WithBootDelay(0),
+ autorelay.WithBackoff(backoff),
+ autorelay.WithMinCandidates(1),
+ autorelay.WithMaxCandidateAge(1),
+ autorelay.WithClock(cl),
+ autorelay.WithMinInterval(0),
+ )
+ defer h.Close()
+
+ require.Eventually(t, func() bool {
+ return reservations.Load() == 1
+ }, 3*time.Second, 20*time.Millisecond, "reservations load should be 1")
+ // We need to wait
+
+ cl.AdvanceBy(1) // Increment the time a little so we can make another peer source call
+ require.Eventually(t, func() bool {
+ // The reservation will fail, and autorelay will ask the peer source for
+ // more candidates. Wait until it does so, this way we know that client
+ // knows the relay connection has failed before we advance the time.
+ return counter.Load() > 1
+ }, 2*time.Second, 100*time.Millisecond, "counter load should be 2")
+
+ // make sure we don't add any relays yet
+ for i := 0; i < 2; i++ {
+ cl.AdvanceBy(backoff / 3)
+ require.Equal(t, 1, int(reservations.Load()))
+ }
+ cl.AdvanceBy(backoff)
+ require.Eventually(t, func() bool {
+ return reservations.Load() == 2
+ }, 3*time.Second, 100*time.Millisecond, "reservations load should be 2")
+ require.Less(t, int(counter.Load()), 10) // just make sure we're not busy-looping
+ require.Equal(t, 2, int(reservations.Load()))
+}
+
+func TestStaticRelays(t *testing.T) {
+ const numStaticRelays = 3
+ var staticRelays []peer.AddrInfo
+ for i := 0; i < numStaticRelays; i++ {
+ r := newRelay(t)
+ t.Cleanup(func() { r.Close() })
+ staticRelays = append(staticRelays, peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()})
+ }
+
+ h := newPrivateNodeWithStaticRelays(t,
+ staticRelays,
+ autorelay.WithNumRelays(1),
+ )
+ defer h.Close()
+
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 10*time.Second, 50*time.Millisecond)
+}
+
+func TestConnectOnDisconnect(t *testing.T) {
+ const num = 3
+ peerChan := make(chan peer.AddrInfo, num)
+ relays := make([]host.Host, 0, num)
+ for i := 0; i < 3; i++ {
+ r := newRelay(t)
+ t.Cleanup(func() { r.Close() })
+ peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
+ relays = append(relays, r)
+ }
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo { return peerChan },
+ autorelay.WithMinCandidates(1),
+ autorelay.WithMaxCandidates(num),
+ autorelay.WithNumRelays(1),
+ autorelay.WithBootDelay(0),
+ autorelay.WithMinInterval(time.Hour),
+ )
+ defer h.Close()
+
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 10*time.Second, 100*time.Millisecond)
+ relaysInUse := usedRelays(h)
+ require.Len(t, relaysInUse, 1)
+ oldRelay := relaysInUse[0]
+
+ for _, r := range relays {
+ if r.ID() == oldRelay {
+ r.Close()
+ }
+ }
+
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 10*time.Second, 100*time.Millisecond)
+ relaysInUse = usedRelays(h)
+ require.Len(t, relaysInUse, 1)
+ require.NotEqualf(t, oldRelay, relaysInUse[0], "old relay should not be used again")
+}
+
+func TestMaxAge(t *testing.T) {
+ cl := newMockClock()
+
+ const num = 4
+ peerChan1 := make(chan peer.AddrInfo, num)
+ peerChan2 := make(chan peer.AddrInfo, num)
+ relays1 := make([]host.Host, 0, num)
+ relays2 := make([]host.Host, 0, num)
+ for i := 0; i < num; i++ {
+ r1 := newRelay(t)
+ t.Cleanup(func() { r1.Close() })
+ peerChan1 <- peer.AddrInfo{ID: r1.ID(), Addrs: r1.Addrs()}
+ relays1 = append(relays1, r1)
+ r2 := newRelay(t)
+ t.Cleanup(func() { r2.Close() })
+ relays2 = append(relays2, r2)
+ }
+ close(peerChan1)
+ peerChans := make(chan chan peer.AddrInfo, 2)
+ peerChans <- peerChan1
+ peerChans <- peerChan2
+ close(peerChans)
+
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo {
+ c, ok := <-peerChans
+ if !ok {
+ t.Fatal("unexpected call to PeerSource")
+ }
+ return c
+ },
+ autorelay.WithNumRelays(1),
+ autorelay.WithMaxCandidates(100),
+ autorelay.WithBootDelay(0),
+ autorelay.WithMaxCandidateAge(20*time.Minute),
+ autorelay.WithClock(cl),
+ autorelay.WithMinInterval(30*time.Second),
+ )
+ defer h.Close()
+
+ require.Eventually(t, func() bool {
+ return numRelays(h) > 0
+ }, 10*time.Second, 100*time.Millisecond)
+ relays := usedRelays(h)
+ require.Len(t, relays, 1)
+
+ cl.AdvanceBy(time.Minute)
+ require.Eventually(t, func() bool {
+ return len(peerChans) == 0
+ }, 10*time.Second, 100*time.Millisecond)
+
+ cl.AdvanceBy(10 * time.Minute)
+ for _, r := range relays2 {
+ peerChan2 <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
+ }
+ cl.AdvanceBy(11 * time.Minute)
+
+ require.Eventually(t, func() bool {
+ relays = usedRelays(h)
+ return len(relays) == 1
+ }, 10*time.Second, 100*time.Millisecond)
+
+ // by now the 3 relays should have been garbage collected
+ // And we should only be using a single relay. Lets close it.
+ var oldRelay peer.ID
+ for _, r := range relays1 {
+ if r.ID() == relays[0] {
+ oldRelay = r.ID()
+ r.Close()
+ }
+ }
+ require.NotEmpty(t, oldRelay)
+
+ require.Eventually(t, func() bool {
+ relays = usedRelays(h)
+ if len(relays) != 1 {
+ return false
+ }
+ return relays[0] != oldRelay
+ }, 10*time.Second, 100*time.Millisecond)
+
+ require.Len(t, relays, 1)
+ ids := make([]peer.ID, 0, len(relays2))
+ for _, r := range relays2 {
+ ids = append(ids, r.ID())
+ }
+
+ require.Eventually(t, func() bool {
+ for _, id := range ids {
+ if id == relays[0] {
+ return true
+ }
+ }
+ fmt.Println("waiting for", ids, "to contain", relays[0])
+ return false
+ }, 3*time.Second, 100*time.Millisecond)
+ require.Contains(t, ids, relays[0])
+}
+
+func TestReconnectToStaticRelays(t *testing.T) {
+ cl := newMockClock()
+ var staticRelays []peer.AddrInfo
+ const numStaticRelays = 1
+ relays := make([]host.Host, 0, numStaticRelays)
+ for i := 0; i < numStaticRelays; i++ {
+ r := newRelay(t)
+ t.Cleanup(func() { r.Close() })
+ relays = append(relays, r)
+ staticRelays = append(staticRelays, peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()})
+ }
+
+ h := newPrivateNodeWithStaticRelays(t,
+ staticRelays,
+ autorelay.WithClock(cl),
+ autorelay.WithBackoff(30*time.Minute),
+ )
+ defer h.Close()
+
+ cl.AdvanceBy(time.Minute)
+ require.Eventually(t, func() bool {
+ return numRelays(h) == 1
+ }, 10*time.Second, 100*time.Millisecond)
+
+ relaysInUse := usedRelays(h)
+ oldRelay := relaysInUse[0]
+ for _, r := range relays {
+ if r.ID() == oldRelay {
+ r.Network().ClosePeer(h.ID())
+ }
+ }
+ require.Eventually(t, func() bool {
+ return numRelays(h) == 0
+ }, 10*time.Second, 100*time.Millisecond)
+
+ cl.AdvanceBy(time.Hour)
+ require.Eventually(t, func() bool {
+ return numRelays(h) == 1
+ }, 10*time.Second, 100*time.Millisecond)
+}
+
+func TestMinInterval(t *testing.T) {
+ cl := newMockClock()
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo {
+ peerChan := make(chan peer.AddrInfo, 1)
+ defer close(peerChan)
+ r1 := newRelay(t)
+ t.Cleanup(func() { r1.Close() })
+ peerChan <- peer.AddrInfo{ID: r1.ID(), Addrs: r1.Addrs()}
+ return peerChan
+ },
+ autorelay.WithClock(cl),
+ autorelay.WithMinCandidates(2),
+ autorelay.WithNumRelays(1),
+ autorelay.WithBootDelay(time.Hour),
+ autorelay.WithMinInterval(500*time.Millisecond),
+ )
+ defer h.Close()
+
+ cl.AdvanceBy(400 * time.Millisecond)
+ // The second call to peerSource should happen after 1 second
+ require.Never(t, func() bool { return numRelays(h) > 0 }, 500*time.Millisecond, 100*time.Millisecond)
+ cl.AdvanceBy(600 * time.Millisecond)
+ require.Eventually(t, func() bool { return numRelays(h) > 0 }, 3*time.Second, 100*time.Millisecond)
+}
+
+func TestNoBusyLoop0MinInterval(t *testing.T) {
+ var calledTimes uint64
+ cl := newMockClock()
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo {
+ atomic.AddUint64(&calledTimes, 1)
+ peerChan := make(chan peer.AddrInfo, 1)
+ defer close(peerChan)
+ r1 := newRelay(t)
+ t.Cleanup(func() { r1.Close() })
+ peerChan <- peer.AddrInfo{ID: r1.ID(), Addrs: r1.Addrs()}
+ return peerChan
+ },
+ autorelay.WithClock(cl),
+ autorelay.WithMinCandidates(1),
+ autorelay.WithMaxCandidates(1),
+ autorelay.WithNumRelays(0),
+ autorelay.WithBootDelay(time.Hour),
+ autorelay.WithMinInterval(time.Millisecond),
+ )
+ defer h.Close()
+
+ require.Never(t, func() bool {
+ cl.AdvanceBy(time.Second)
+ val := atomic.LoadUint64(&calledTimes)
+ return val >= 2
+ }, 500*time.Millisecond, 100*time.Millisecond)
+ val := atomic.LoadUint64(&calledTimes)
+ require.Less(t, val, uint64(2))
+}
+func TestAutoRelayAddrsEvent(t *testing.T) {
+ cl := newMockClock()
+ relays := []host.Host{newRelay(t), newRelay(t), newRelay(t), newRelay(t), newRelay(t)}
+ t.Cleanup(func() {
+ for _, r := range relays {
+ r.Close()
+ }
+ })
+
+ relayIDFromP2PAddr := func(a ma.Multiaddr) peer.ID {
+ r, c := ma.SplitLast(a)
+ if c.Protocol().Code != ma.P_CIRCUIT {
+ return ""
+ }
+ if id, err := peer.IDFromP2PAddr(r); err == nil {
+ return id
+ }
+ return ""
+ }
+
+ checkAddrsContainsPeersAsRelay := func(addrs []ma.Multiaddr, peers ...peer.ID) bool {
+ for _, p := range peers {
+ if !slices.ContainsFunc(addrs, func(a ma.Multiaddr) bool { return relayIDFromP2PAddr(a) == p }) {
+ return false
+ }
+ }
+ return true
+ }
+ peerChan := make(chan peer.AddrInfo, 5)
+ h := newPrivateNode(t,
+ func(context.Context, int) <-chan peer.AddrInfo {
+ return peerChan
+ },
+ autorelay.WithClock(cl),
+ autorelay.WithMinCandidates(1),
+ autorelay.WithMaxCandidates(10),
+ autorelay.WithNumRelays(5),
+ autorelay.WithBootDelay(1*time.Second),
+ autorelay.WithMinInterval(time.Hour),
+ )
+ defer h.Close()
+
+ sub, err := h.EventBus().Subscribe(new(event.EvtAutoRelayAddrsUpdated))
+ require.NoError(t, err)
+
+ peerChan <- peer.AddrInfo{ID: relays[0].ID(), Addrs: relays[0].Addrs()}
+ cl.AdvanceBy(time.Second)
+
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ e := <-sub.Out()
+ evt := e.(event.EvtAutoRelayAddrsUpdated)
+ if !checkAddrsContainsPeersAsRelay(evt.RelayAddrs, relays[0].ID()) {
+ collect.Errorf("expected %s to be in %v", relays[0].ID(), evt.RelayAddrs)
+ }
+ if checkAddrsContainsPeersAsRelay(evt.RelayAddrs, relays[1].ID()) {
+ collect.Errorf("expected %s to not be in %v", relays[1].ID(), evt.RelayAddrs)
+ }
+ }, 5*time.Second, 50*time.Millisecond)
+ for _, r := range relays[1:] {
+ peerChan <- peer.AddrInfo{ID: r.ID(), Addrs: r.Addrs()}
+ }
+ require.EventuallyWithT(t, func(c *assert.CollectT) {
+ e := <-sub.Out()
+ evt := e.(event.EvtAutoRelayAddrsUpdated)
+ relayIds := []peer.ID{}
+ for _, r := range relays[1:] {
+ relayIds = append(relayIds, r.ID())
+ }
+ if !checkAddrsContainsPeersAsRelay(evt.RelayAddrs, relayIds...) {
+ c.Errorf("expected %s to be in %v", relayIds, evt.RelayAddrs)
+ }
+ }, 5*time.Second, 50*time.Millisecond)
+ select {
+ case e := <-sub.Out():
+ t.Fatal("expected no more events after all reservations obtained; got: ", e.(event.EvtAutoRelayAddrsUpdated))
+ case <-time.After(1 * time.Second):
+ }
+}
diff --git a/p2p/host/autorelay/metrics.go b/p2p/host/autorelay/metrics.go
new file mode 100644
index 0000000000..af4b53ed01
--- /dev/null
+++ b/p2p/host/autorelay/metrics.go
@@ -0,0 +1,373 @@
+package autorelay
+
+import (
+ "errors"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_autorelay"
+
+var (
+ status = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "status",
+ Help: "relay finder active",
+ })
+ reservationsOpenedTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservations_opened_total",
+ Help: "Reservations Opened",
+ },
+ )
+ reservationsClosedTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservations_closed_total",
+ Help: "Reservations Closed",
+ },
+ )
+ reservationRequestsOutcomeTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservation_requests_outcome_total",
+ Help: "Reservation Request Outcome",
+ },
+ []string{"request_type", "outcome"},
+ )
+
+ relayAddressesUpdatedTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "relay_addresses_updated_total",
+ Help: "Relay Addresses Updated Count",
+ },
+ )
+ relayAddressesCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "relay_addresses_count",
+ Help: "Relay Addresses Count",
+ },
+ )
+
+ candidatesCircuitV2SupportTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "candidates_circuit_v2_support_total",
+ Help: "Candidates supporting circuit v2",
+ },
+ []string{"support"},
+ )
+ candidatesTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "candidates_total",
+ Help: "Candidates Total",
+ },
+ []string{"type"},
+ )
+ candLoopState = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "candidate_loop_state",
+ Help: "Candidate Loop State",
+ },
+ )
+
+ scheduledWorkTime = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "scheduled_work_time",
+ Help: "Scheduled Work Times",
+ },
+ []string{"work_type"},
+ )
+
+ desiredReservations = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "desired_reservations",
+ Help: "Desired Reservations",
+ },
+ )
+
+ collectors = []prometheus.Collector{
+ status,
+ reservationsOpenedTotal,
+ reservationsClosedTotal,
+ reservationRequestsOutcomeTotal,
+ relayAddressesUpdatedTotal,
+ relayAddressesCount,
+ candidatesCircuitV2SupportTotal,
+ candidatesTotal,
+ candLoopState,
+ scheduledWorkTime,
+ desiredReservations,
+ }
+)
+
+type candidateLoopState int
+
+const (
+ peerSourceRateLimited candidateLoopState = iota
+ waitingOnPeerChan
+ waitingForTrigger
+ stopped
+)
+
+// MetricsTracer is the interface for tracking metrics for autorelay
+type MetricsTracer interface {
+ RelayFinderStatus(isActive bool)
+
+ ReservationEnded(cnt int)
+ ReservationOpened(cnt int)
+ ReservationRequestFinished(isRefresh bool, err error)
+
+ RelayAddressCount(int)
+ RelayAddressUpdated()
+
+ CandidateChecked(supportsCircuitV2 bool)
+ CandidateAdded(cnt int)
+ CandidateRemoved(cnt int)
+ CandidateLoopState(state candidateLoopState)
+
+ ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes)
+
+ DesiredReservations(int)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+
+ // Initialise these counters to 0 otherwise the first reservation requests aren't handled
+ // correctly when using promql increase function
+ reservationRequestsOutcomeTotal.WithLabelValues("refresh", "success")
+ reservationRequestsOutcomeTotal.WithLabelValues("new", "success")
+ candidatesCircuitV2SupportTotal.WithLabelValues("yes")
+ candidatesCircuitV2SupportTotal.WithLabelValues("no")
+ return &metricsTracer{}
+}
+
+func (mt *metricsTracer) RelayFinderStatus(isActive bool) {
+ if isActive {
+ status.Set(1)
+ } else {
+ status.Set(0)
+ }
+}
+
+func (mt *metricsTracer) ReservationEnded(cnt int) {
+ reservationsClosedTotal.Add(float64(cnt))
+}
+
+func (mt *metricsTracer) ReservationOpened(cnt int) {
+ reservationsOpenedTotal.Add(float64(cnt))
+}
+
+func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ if isRefresh {
+ *tags = append(*tags, "refresh")
+ } else {
+ *tags = append(*tags, "new")
+ }
+ *tags = append(*tags, getReservationRequestStatus(err))
+ reservationRequestsOutcomeTotal.WithLabelValues(*tags...).Inc()
+
+ if !isRefresh && err == nil {
+ reservationsOpenedTotal.Inc()
+ }
+}
+
+func (mt *metricsTracer) RelayAddressUpdated() {
+ relayAddressesUpdatedTotal.Inc()
+}
+
+func (mt *metricsTracer) RelayAddressCount(cnt int) {
+ relayAddressesCount.Set(float64(cnt))
+}
+
+func (mt *metricsTracer) CandidateChecked(supportsCircuitV2 bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if supportsCircuitV2 {
+ *tags = append(*tags, "yes")
+ } else {
+ *tags = append(*tags, "no")
+ }
+ candidatesCircuitV2SupportTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (mt *metricsTracer) CandidateAdded(cnt int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "added")
+ candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
+}
+
+func (mt *metricsTracer) CandidateRemoved(cnt int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "removed")
+ candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt))
+}
+
+func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) {
+ candLoopState.Set(float64(state))
+}
+
+func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, "allowed peer source call")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextAllowedCallToPeerSource.Unix()))
+ *tags = (*tags)[:0]
+
+ *tags = append(*tags, "reservation refresh")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextRefresh.Unix()))
+ *tags = (*tags)[:0]
+
+ *tags = append(*tags, "clear backoff")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextBackoff.Unix()))
+ *tags = (*tags)[:0]
+
+ *tags = append(*tags, "old candidate check")
+ scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextOldCandidateCheck.Unix()))
+}
+
+func (mt *metricsTracer) DesiredReservations(cnt int) {
+ desiredReservations.Set(float64(cnt))
+}
+
+func getReservationRequestStatus(err error) string {
+ if err == nil {
+ return "success"
+ }
+
+ status := "err other"
+ var re client.ReservationError
+ if errors.As(err, &re) {
+ switch re.Status {
+ case pbv2.Status_CONNECTION_FAILED:
+ return "connection failed"
+ case pbv2.Status_MALFORMED_MESSAGE:
+ return "malformed message"
+ case pbv2.Status_RESERVATION_REFUSED:
+ return "reservation refused"
+ case pbv2.Status_PERMISSION_DENIED:
+ return "permission denied"
+ case pbv2.Status_RESOURCE_LIMIT_EXCEEDED:
+ return "resource limit exceeded"
+ }
+ }
+ return status
+}
+
+// wrappedMetricsTracer wraps MetricsTracer and ignores all calls when mt is nil
+type wrappedMetricsTracer struct {
+ mt MetricsTracer
+}
+
+var _ MetricsTracer = &wrappedMetricsTracer{}
+
+func (mt *wrappedMetricsTracer) RelayFinderStatus(isActive bool) {
+ if mt.mt != nil {
+ mt.mt.RelayFinderStatus(isActive)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ReservationEnded(cnt int) {
+ if mt.mt != nil {
+ mt.mt.ReservationEnded(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ReservationOpened(cnt int) {
+ if mt.mt != nil {
+ mt.mt.ReservationOpened(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ReservationRequestFinished(isRefresh bool, err error) {
+ if mt.mt != nil {
+ mt.mt.ReservationRequestFinished(isRefresh, err)
+ }
+}
+
+func (mt *wrappedMetricsTracer) RelayAddressUpdated() {
+ if mt.mt != nil {
+ mt.mt.RelayAddressUpdated()
+ }
+}
+
+func (mt *wrappedMetricsTracer) RelayAddressCount(cnt int) {
+ if mt.mt != nil {
+ mt.mt.RelayAddressCount(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateChecked(supportsCircuitV2 bool) {
+ if mt.mt != nil {
+ mt.mt.CandidateChecked(supportsCircuitV2)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateAdded(cnt int) {
+ if mt.mt != nil {
+ mt.mt.CandidateAdded(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateRemoved(cnt int) {
+ if mt.mt != nil {
+ mt.mt.CandidateRemoved(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) {
+ if mt.mt != nil {
+ mt.mt.ScheduledWorkUpdated(scheduledWork)
+ }
+}
+
+func (mt *wrappedMetricsTracer) DesiredReservations(cnt int) {
+ if mt.mt != nil {
+ mt.mt.DesiredReservations(cnt)
+ }
+}
+
+func (mt *wrappedMetricsTracer) CandidateLoopState(state candidateLoopState) {
+ if mt.mt != nil {
+ mt.mt.CandidateLoopState(state)
+ }
+}
diff --git a/p2p/host/autorelay/metrics_noalloc_test.go b/p2p/host/autorelay/metrics_noalloc_test.go
new file mode 100644
index 0000000000..a44cb6e459
--- /dev/null
+++ b/p2p/host/autorelay/metrics_noalloc_test.go
@@ -0,0 +1,59 @@
+//go:build nocover
+
+package autorelay
+
+import (
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+)
+
+func getRandScheduledWork() scheduledWorkTimes {
+ randTime := func() time.Time {
+ return time.Now().Add(time.Duration(rand.Intn(10)) * time.Second)
+ }
+ return scheduledWorkTimes{
+ leastFrequentInterval: 0,
+ nextRefresh: randTime(),
+ nextBackoff: randTime(),
+ nextOldCandidateCheck: randTime(),
+ nextAllowedCallToPeerSource: randTime(),
+ }
+}
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ scheduledWork := []scheduledWorkTimes{}
+ for i := 0; i < 10; i++ {
+ scheduledWork = append(scheduledWork, getRandScheduledWork())
+ }
+ errs := []error{
+ client.ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE},
+ client.ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE},
+ nil,
+ }
+ tr := NewMetricsTracer()
+ tests := map[string]func(){
+ "RelayFinderStatus": func() { tr.RelayFinderStatus(rand.Intn(2) == 1) },
+ "ReservationEnded": func() { tr.ReservationEnded(rand.Intn(10)) },
+ "ReservationRequestFinished": func() { tr.ReservationRequestFinished(rand.Intn(2) == 1, errs[rand.Intn(len(errs))]) },
+ "RelayAddressCount": func() { tr.RelayAddressCount(rand.Intn(10)) },
+ "RelayAddressUpdated": func() { tr.RelayAddressUpdated() },
+ "ReservationOpened": func() { tr.ReservationOpened(rand.Intn(10)) },
+ "CandidateChecked": func() { tr.CandidateChecked(rand.Intn(2) == 1) },
+ "CandidateAdded": func() { tr.CandidateAdded(rand.Intn(10)) },
+ "CandidateRemoved": func() { tr.CandidateRemoved(rand.Intn(10)) },
+ "ScheduledWorkUpdated": func() { tr.ScheduledWorkUpdated(&scheduledWork[rand.Intn(len(scheduledWork))]) },
+ "DesiredReservations": func() { tr.DesiredReservations(rand.Intn(10)) },
+ "CandidateLoopState": func() { tr.CandidateLoopState(candidateLoopState(rand.Intn(10))) },
+ }
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+
+ if allocs > 0 {
+ t.Fatalf("Alloc Test: %s, got: %0.2f, expected: 0 allocs", method, allocs)
+ }
+ }
+}
diff --git a/p2p/host/autorelay/options.go b/p2p/host/autorelay/options.go
new file mode 100644
index 0000000000..7207c0f66c
--- /dev/null
+++ b/p2p/host/autorelay/options.go
@@ -0,0 +1,233 @@
+package autorelay
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// AutoRelay will call this function when it needs new candidates because it is
+// not connected to the desired number of relays or we get disconnected from one
+// of the relays. Implementations must send *at most* numPeers, and close the
+// channel when they don't intend to provide any more peers. AutoRelay will not
+// call the callback again until the channel is closed. Implementations should
+// send new peers, but may send peers they sent before. AutoRelay implements a
+// per-peer backoff (see WithBackoff). See WithMinInterval for setting the
+// minimum interval between calls to the callback. The context.Context passed
+// may be canceled when AutoRelay feels satisfied, it will be canceled when the
+// node is shutting down. If the context is canceled you MUST close the output
+// channel at some point.
+type PeerSource func(ctx context.Context, num int) <-chan peer.AddrInfo
+
+type config struct {
+ clock ClockWithInstantTimer
+ peerSource PeerSource
+ // minimum interval used to call the peerSource callback
+ minInterval time.Duration
+ // see WithMinCandidates
+ minCandidates int
+ // see WithMaxCandidates
+ maxCandidates int
+ // Delay until we obtain reservations with relays, if we have less than minCandidates candidates.
+ // See WithBootDelay.
+ bootDelay time.Duration
+ // backoff is the time we wait after failing to obtain a reservation with a candidate
+ backoff time.Duration
+ // Number of relays we strive to obtain a reservation with.
+ desiredRelays int
+ // see WithMaxCandidateAge
+ maxCandidateAge time.Duration
+ setMinCandidates bool
+ // see WithMetricsTracer
+ metricsTracer MetricsTracer
+}
+
+var defaultConfig = config{
+ clock: RealClock{},
+ minCandidates: 4,
+ maxCandidates: 20,
+ bootDelay: 3 * time.Minute,
+ backoff: time.Hour,
+ desiredRelays: 2,
+ maxCandidateAge: 30 * time.Minute,
+ minInterval: 30 * time.Second,
+}
+
+var (
+ errAlreadyHavePeerSource = errors.New("can only use a single WithPeerSource or WithStaticRelays")
+)
+
+type Option func(*config) error
+
+func WithStaticRelays(static []peer.AddrInfo) Option {
+ return func(c *config) error {
+ if c.peerSource != nil {
+ return errAlreadyHavePeerSource
+ }
+
+ WithPeerSource(func(_ context.Context, numPeers int) <-chan peer.AddrInfo {
+ if len(static) < numPeers {
+ numPeers = len(static)
+ }
+ c := make(chan peer.AddrInfo, numPeers)
+ defer close(c)
+
+ for i := 0; i < numPeers; i++ {
+ c <- static[i]
+ }
+ return c
+ })(c)
+ WithMinCandidates(len(static))(c)
+ WithMaxCandidates(len(static))(c)
+ WithNumRelays(len(static))(c)
+
+ return nil
+ }
+}
+
+// WithPeerSource defines a callback for AutoRelay to query for more relay candidates.
+func WithPeerSource(f PeerSource) Option {
+ return func(c *config) error {
+ if c.peerSource != nil {
+ return errAlreadyHavePeerSource
+ }
+ c.peerSource = f
+ return nil
+ }
+}
+
+// WithNumRelays sets the number of relays we strive to obtain reservations with.
+func WithNumRelays(n int) Option {
+ return func(c *config) error {
+ c.desiredRelays = n
+ return nil
+ }
+}
+
+// WithMaxCandidates sets the number of relay candidates that we buffer.
+func WithMaxCandidates(n int) Option {
+ return func(c *config) error {
+ c.maxCandidates = n
+ if c.minCandidates > n {
+ c.minCandidates = n
+ }
+ return nil
+ }
+}
+
+// WithMinCandidates sets the minimum number of relay candidates we collect before to get a reservation
+// with any of them (unless we've been running for longer than the boot delay).
+// This is to make sure that we don't just randomly connect to the first candidate that we discover.
+func WithMinCandidates(n int) Option {
+ return func(c *config) error {
+ if n > c.maxCandidates {
+ n = c.maxCandidates
+ }
+ c.minCandidates = n
+ c.setMinCandidates = true
+ return nil
+ }
+}
+
+// WithBootDelay set the boot delay for finding relays.
+// We won't attempt any reservation if we've have less than a minimum number of candidates.
+// This prevents us to connect to the "first best" relay, and allows us to carefully select the relay.
+// However, in case we haven't found enough relays after the boot delay, we use what we have.
+func WithBootDelay(d time.Duration) Option {
+ return func(c *config) error {
+ c.bootDelay = d
+ return nil
+ }
+}
+
+// WithBackoff sets the time we wait after failing to obtain a reservation with a candidate.
+func WithBackoff(d time.Duration) Option {
+ return func(c *config) error {
+ c.backoff = d
+ return nil
+ }
+}
+
+// WithMaxCandidateAge sets the maximum age of a candidate.
+// When we are connected to the desired number of relays, we don't ask the peer source for new candidates.
+// This can lead to AutoRelay's candidate list becoming outdated, and means we won't be able
+// to quickly establish a new relay connection if our existing connection breaks, if all the candidates
+// have become stale.
+func WithMaxCandidateAge(d time.Duration) Option {
+ return func(c *config) error {
+ c.maxCandidateAge = d
+ return nil
+ }
+}
+
+// InstantTimer is a timer that triggers at some instant rather than some duration
+type InstantTimer interface {
+ Reset(d time.Time) bool
+ Stop() bool
+ Ch() <-chan time.Time
+}
+
+// ClockWithInstantTimer is a clock that can create timers that trigger at some
+// instant rather than some duration
+type ClockWithInstantTimer interface {
+ Now() time.Time
+ Since(t time.Time) time.Duration
+ InstantTimer(when time.Time) InstantTimer
+}
+
+type RealTimer struct{ t *time.Timer }
+
+var _ InstantTimer = (*RealTimer)(nil)
+
+func (t RealTimer) Ch() <-chan time.Time {
+ return t.t.C
+}
+
+func (t RealTimer) Reset(d time.Time) bool {
+ return t.t.Reset(time.Until(d))
+}
+
+func (t RealTimer) Stop() bool {
+ return t.t.Stop()
+}
+
+type RealClock struct{}
+
+var _ ClockWithInstantTimer = RealClock{}
+
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+func (RealClock) Since(t time.Time) time.Duration {
+ return time.Since(t)
+}
+func (RealClock) InstantTimer(when time.Time) InstantTimer {
+ t := time.NewTimer(time.Until(when))
+ return &RealTimer{t}
+}
+
+func WithClock(cl ClockWithInstantTimer) Option {
+ return func(c *config) error {
+ c.clock = cl
+ return nil
+ }
+}
+
+// WithMinInterval sets the minimum interval after which peerSource callback will be called for more
+// candidates even if AutoRelay needs new candidates.
+func WithMinInterval(interval time.Duration) Option {
+ return func(c *config) error {
+ c.minInterval = interval
+ return nil
+ }
+}
+
+// WithMetricsTracer configures autorelay to use mt to track metrics
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(c *config) error {
+ c.metricsTracer = mt
+ return nil
+ }
+}
diff --git a/p2p/host/autorelay/relay_finder.go b/p2p/host/autorelay/relay_finder.go
new file mode 100644
index 0000000000..35ac928b7a
--- /dev/null
+++ b/p2p/host/autorelay/relay_finder.go
@@ -0,0 +1,839 @@
+package autorelay
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "slices"
+ "sync"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ circuitv2_proto "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+const protoIDv2 = circuitv2_proto.ProtoIDv2Hop
+
+// Terminology:
+// Candidate: Once we connect to a node and it supports relay protocol,
+// we call it a candidate, and consider using it as a relay.
+//
+// Relay: Out of the list candidates, the ones we have a reservation with.
+// Currently, we just randomly select a candidate, but we can employ more sophisticated
+// selection strategies here (e.g. by facotring in the RTT).
+
+const (
+ rsvpRefreshInterval = time.Minute
+ rsvpExpirationSlack = 2 * time.Minute
+
+ autorelayTag = "autorelay"
+ maxRelayAddrs = 100
+)
+
+type candidate struct {
+ added time.Time
+ supportsRelayV2 bool
+ ai peer.AddrInfo
+}
+
+// relayFinder is a Host that uses relays for connectivity when a NAT is detected.
+type relayFinder struct {
+ bootTime time.Time
+ host host.Host
+
+ conf *config
+
+ refCount sync.WaitGroup
+
+ ctxCancel context.CancelFunc
+ ctxCancelMx sync.Mutex
+
+ peerSource PeerSource
+
+ candidateFound chan struct{} // receives every time we find a new relay candidate
+ candidateMx sync.Mutex
+ candidates map[peer.ID]*candidate
+ backoff map[peer.ID]time.Time
+ maybeConnectToRelayTrigger chan struct{} // cap: 1
+ // Any time _something_ happens that might cause us to need new candidates.
+ // This could be
+ // * the disconnection of a relay
+ // * the failed attempt to obtain a reservation with a current candidate
+ // * a candidate is deleted due to its age
+ maybeRequestNewCandidates chan struct{} // cap: 1.
+
+ relayReservationUpdated chan struct{}
+
+ relayMx sync.Mutex
+ relays map[peer.ID]*circuitv2.Reservation
+
+ circuitAddrs []ma.Multiaddr
+
+ // A channel that triggers a run of `runScheduledWork`.
+ triggerRunScheduledWork chan struct{}
+ metricsTracer MetricsTracer
+
+ emitter event.Emitter
+}
+
+var errAlreadyRunning = errors.New("relayFinder already running")
+
+func newRelayFinder(host host.Host, conf *config) (*relayFinder, error) {
+ if conf.peerSource == nil {
+ panic("Can not create a new relayFinder. Need a Peer Source fn or a list of static relays. Refer to the documentation around `libp2p.EnableAutoRelay`")
+ }
+
+ emitter, err := host.EventBus().Emitter(new(event.EvtAutoRelayAddrsUpdated), eventbus.Stateful)
+ if err != nil {
+ return nil, err
+ }
+
+ return &relayFinder{
+ bootTime: conf.clock.Now(),
+ host: host,
+ conf: conf,
+ peerSource: conf.peerSource,
+ candidates: make(map[peer.ID]*candidate),
+ backoff: make(map[peer.ID]time.Time),
+ candidateFound: make(chan struct{}, 1),
+ maybeConnectToRelayTrigger: make(chan struct{}, 1),
+ maybeRequestNewCandidates: make(chan struct{}, 1),
+ triggerRunScheduledWork: make(chan struct{}, 1),
+ relays: make(map[peer.ID]*circuitv2.Reservation),
+ relayReservationUpdated: make(chan struct{}, 1),
+ metricsTracer: &wrappedMetricsTracer{conf.metricsTracer},
+ emitter: emitter,
+ }, nil
+}
+
+type scheduledWorkTimes struct {
+ leastFrequentInterval time.Duration
+ nextRefresh time.Time
+ nextBackoff time.Time
+ nextOldCandidateCheck time.Time
+ nextAllowedCallToPeerSource time.Time
+}
+
+func (rf *relayFinder) cleanupDisconnectedPeers(ctx context.Context) {
+ subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged), eventbus.Name("autorelay (relay finder)"), eventbus.BufSize(32))
+ if err != nil {
+ log.Error("failed to subscribe to the EvtPeerConnectednessChanged")
+ return
+ }
+ defer subConnectedness.Close()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case ev, ok := <-subConnectedness.Out():
+ if !ok {
+ return
+ }
+ evt := ev.(event.EvtPeerConnectednessChanged)
+ if evt.Connectedness != network.NotConnected {
+ continue
+ }
+ push := false
+
+ rf.relayMx.Lock()
+ if rf.usingRelay(evt.Peer) { // we were disconnected from a relay
+ log.Debug("disconnected from relay", "peer", evt.Peer)
+ delete(rf.relays, evt.Peer)
+ rf.notifyMaybeConnectToRelay()
+ rf.notifyMaybeNeedNewCandidates()
+ push = true
+ }
+ rf.relayMx.Unlock()
+
+ if push {
+ rf.notifyRelayReservationUpdated()
+ rf.metricsTracer.ReservationEnded(1)
+ }
+ }
+ }
+}
+
+func (rf *relayFinder) background(ctx context.Context) {
+ peerSourceRateLimiter := make(chan struct{}, 1)
+ rf.refCount.Add(1)
+ go func() {
+ defer rf.refCount.Done()
+ rf.findNodes(ctx, peerSourceRateLimiter)
+ }()
+
+ rf.refCount.Add(1)
+ go func() {
+ defer rf.refCount.Done()
+ rf.handleNewCandidates(ctx)
+ }()
+
+ now := rf.conf.clock.Now()
+ bootDelayTimer := rf.conf.clock.InstantTimer(now.Add(rf.conf.bootDelay))
+ defer bootDelayTimer.Stop()
+
+ // This is the least frequent event. It's our fallback timer if we don't have any other work to do.
+ leastFrequentInterval := rf.conf.minInterval
+ // Check if leastFrequentInterval is 0 to avoid busy looping
+ if rf.conf.backoff > leastFrequentInterval || leastFrequentInterval == 0 {
+ leastFrequentInterval = rf.conf.backoff
+ }
+ if rf.conf.maxCandidateAge > leastFrequentInterval || leastFrequentInterval == 0 {
+ leastFrequentInterval = rf.conf.maxCandidateAge
+ }
+ if rsvpRefreshInterval > leastFrequentInterval || leastFrequentInterval == 0 {
+ leastFrequentInterval = rsvpRefreshInterval
+ }
+
+ scheduledWork := &scheduledWorkTimes{
+ leastFrequentInterval: leastFrequentInterval,
+ nextRefresh: now.Add(rsvpRefreshInterval),
+ nextBackoff: now.Add(rf.conf.backoff),
+ nextOldCandidateCheck: now.Add(rf.conf.maxCandidateAge),
+ nextAllowedCallToPeerSource: now.Add(-time.Second), // allow immediately
+ }
+
+ workTimer := rf.conf.clock.InstantTimer(rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter))
+ defer workTimer.Stop()
+
+ go rf.cleanupDisconnectedPeers(ctx)
+
+ // update addrs on starting the relay finder.
+ rf.updateAddrs()
+ for {
+ select {
+ case <-rf.candidateFound:
+ rf.notifyMaybeConnectToRelay()
+ case <-bootDelayTimer.Ch():
+ rf.notifyMaybeConnectToRelay()
+ case <-rf.relayReservationUpdated:
+ rf.updateAddrs()
+ case now := <-workTimer.Ch():
+ // Note: `now` is not guaranteed to be the current time. It's the time
+ // that the timer was fired. This is okay because we'll schedule
+ // future work at a specific time.
+ nextTime := rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter)
+ workTimer.Reset(nextTime)
+ case <-rf.triggerRunScheduledWork:
+ // Ignore the next time because we aren't scheduling any future work here
+ _ = rf.runScheduledWork(ctx, rf.conf.clock.Now(), scheduledWork, peerSourceRateLimiter)
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (rf *relayFinder) updateAddrs() {
+ oldAddrs := rf.circuitAddrs
+ rf.circuitAddrs = rf.getCircuitAddrs()
+
+ if areSortedAddrsDifferent(rf.circuitAddrs, oldAddrs) {
+ log.Debug("relay addresses updated", "addrs", rf.circuitAddrs)
+ rf.metricsTracer.RelayAddressUpdated()
+ rf.metricsTracer.RelayAddressCount(len(rf.circuitAddrs))
+ if err := rf.emitter.Emit(event.EvtAutoRelayAddrsUpdated{RelayAddrs: slices.Clone(rf.circuitAddrs)}); err != nil {
+ log.Error("failed to emit event.EvtAutoRelayAddrs with RelayAddrs", "addrs", rf.circuitAddrs, "err", err)
+ }
+ }
+}
+
+// This function returns the p2p-circuit addrs for the host.
+// The returned addresses are of the form /p2p//p2p-circuit.
+func (rf *relayFinder) getCircuitAddrs() []ma.Multiaddr {
+ rf.relayMx.Lock()
+ defer rf.relayMx.Unlock()
+
+ raddrs := make([]ma.Multiaddr, 0, 4*len(rf.relays)+4)
+ for p := range rf.relays {
+ addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p))
+ circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p))
+ for _, addr := range addrs {
+ pub := addr.Encapsulate(circuit)
+ raddrs = append(raddrs, pub)
+ }
+ }
+
+ // Sort the addresses. We depend on this order for checking diffs to send address update events.
+ slices.SortStableFunc(raddrs, func(a, b ma.Multiaddr) int { return bytes.Compare(a.Bytes(), b.Bytes()) })
+ if len(raddrs) > maxRelayAddrs {
+ raddrs = raddrs[:maxRelayAddrs]
+ }
+ return raddrs
+}
+
+func (rf *relayFinder) runScheduledWork(ctx context.Context, now time.Time, scheduledWork *scheduledWorkTimes, peerSourceRateLimiter chan<- struct{}) time.Time {
+ nextTime := now.Add(scheduledWork.leastFrequentInterval)
+
+ if now.After(scheduledWork.nextRefresh) {
+ scheduledWork.nextRefresh = now.Add(rsvpRefreshInterval)
+ if rf.refreshReservations(ctx, now) {
+ rf.notifyRelayReservationUpdated()
+ }
+ }
+
+ if now.After(scheduledWork.nextBackoff) {
+ scheduledWork.nextBackoff = rf.clearBackoff(now)
+ }
+
+ if now.After(scheduledWork.nextOldCandidateCheck) {
+ scheduledWork.nextOldCandidateCheck = rf.clearOldCandidates(now)
+ }
+
+ if now.After(scheduledWork.nextAllowedCallToPeerSource) {
+ select {
+ case peerSourceRateLimiter <- struct{}{}:
+ scheduledWork.nextAllowedCallToPeerSource = now.Add(rf.conf.minInterval)
+ if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) {
+ nextTime = scheduledWork.nextAllowedCallToPeerSource
+ }
+ default:
+ }
+ } else {
+ // We still need to schedule this work if it's sooner than nextTime
+ if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) {
+ nextTime = scheduledWork.nextAllowedCallToPeerSource
+ }
+ }
+
+ // Find the next time we need to run scheduled work.
+ if scheduledWork.nextRefresh.Before(nextTime) {
+ nextTime = scheduledWork.nextRefresh
+ }
+ if scheduledWork.nextBackoff.Before(nextTime) {
+ nextTime = scheduledWork.nextBackoff
+ }
+ if scheduledWork.nextOldCandidateCheck.Before(nextTime) {
+ nextTime = scheduledWork.nextOldCandidateCheck
+ }
+ if nextTime.Equal(now) {
+ // Only happens in CI with a mock clock
+ nextTime = nextTime.Add(1) // avoids an infinite loop
+ }
+
+ rf.metricsTracer.ScheduledWorkUpdated(scheduledWork)
+
+ return nextTime
+}
+
+// clearOldCandidates clears old candidates from the map. Returns the next time
+// to run this function.
+func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time {
+ // If we don't have any candidates, we should run this again in rf.conf.maxCandidateAge.
+ nextTime := now.Add(rf.conf.maxCandidateAge)
+
+ var deleted bool
+ rf.candidateMx.Lock()
+ defer rf.candidateMx.Unlock()
+ for id, cand := range rf.candidates {
+ expiry := cand.added.Add(rf.conf.maxCandidateAge)
+ if expiry.After(now) {
+ if expiry.Before(nextTime) {
+ nextTime = expiry
+ }
+ } else {
+ log.Debug("deleting candidate due to age", "peer", id)
+ deleted = true
+ rf.removeCandidate(id)
+ }
+ }
+ if deleted {
+ rf.notifyMaybeNeedNewCandidates()
+ }
+
+ return nextTime
+}
+
+// clearBackoff clears old backoff entries from the map. Returns the next time
+// to run this function.
+func (rf *relayFinder) clearBackoff(now time.Time) time.Time {
+ nextTime := now.Add(rf.conf.backoff)
+
+ rf.candidateMx.Lock()
+ defer rf.candidateMx.Unlock()
+ for id, t := range rf.backoff {
+ expiry := t.Add(rf.conf.backoff)
+ if expiry.After(now) {
+ if expiry.Before(nextTime) {
+ nextTime = expiry
+ }
+ } else {
+ log.Debug("removing backoff for node", "peer", id)
+ delete(rf.backoff, id)
+ }
+ }
+
+ return nextTime
+}
+
+// findNodes accepts nodes from the channel and tests if they support relaying.
+// It is run on both public and private nodes.
+// It garbage collects old entries, so that nodes doesn't overflow.
+// This makes sure that as soon as we need to find relay candidates, we have them available.
+// peerSourceRateLimiter is used to limit how often we call the peer source.
+func (rf *relayFinder) findNodes(ctx context.Context, peerSourceRateLimiter <-chan struct{}) {
+ var peerChan <-chan peer.AddrInfo
+ var wg sync.WaitGroup
+ for {
+ rf.candidateMx.Lock()
+ numCandidates := len(rf.candidates)
+ rf.candidateMx.Unlock()
+
+ if peerChan == nil && numCandidates < rf.conf.minCandidates {
+ rf.metricsTracer.CandidateLoopState(peerSourceRateLimited)
+
+ select {
+ case <-peerSourceRateLimiter:
+ peerChan = rf.peerSource(ctx, rf.conf.maxCandidates)
+ select {
+ case rf.triggerRunScheduledWork <- struct{}{}:
+ default:
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+
+ if peerChan == nil {
+ rf.metricsTracer.CandidateLoopState(waitingForTrigger)
+ } else {
+ rf.metricsTracer.CandidateLoopState(waitingOnPeerChan)
+ }
+
+ select {
+ case <-rf.maybeRequestNewCandidates:
+ continue
+ case pi, ok := <-peerChan:
+ if !ok {
+ wg.Wait()
+ peerChan = nil
+ continue
+ }
+ log.Debug("found node", "peer", pi.ID)
+ rf.candidateMx.Lock()
+ numCandidates := len(rf.candidates)
+ backoffStart, isOnBackoff := rf.backoff[pi.ID]
+ rf.candidateMx.Unlock()
+ if isOnBackoff {
+ log.Debug("skipping node that we recently failed to obtain a reservation with", "peer", pi.ID, "last_attempt", rf.conf.clock.Since(backoffStart))
+ continue
+ }
+ if numCandidates >= rf.conf.maxCandidates {
+ log.Debug("skipping node. Already have enough candidates", "peer", pi.ID, "candidate_count", numCandidates, "max_candidates", rf.conf.maxCandidates)
+ continue
+ }
+ rf.refCount.Add(1)
+ wg.Add(1)
+ go func() {
+ defer rf.refCount.Done()
+ defer wg.Done()
+ if added := rf.handleNewNode(ctx, pi); added {
+ rf.notifyNewCandidate()
+ }
+ }()
+ case <-ctx.Done():
+ rf.metricsTracer.CandidateLoopState(stopped)
+ return
+ }
+ }
+}
+
+func (rf *relayFinder) notifyMaybeConnectToRelay() {
+ select {
+ case rf.maybeConnectToRelayTrigger <- struct{}{}:
+ default:
+ }
+}
+
+func (rf *relayFinder) notifyMaybeNeedNewCandidates() {
+ select {
+ case rf.maybeRequestNewCandidates <- struct{}{}:
+ default:
+ }
+}
+
+func (rf *relayFinder) notifyNewCandidate() {
+ select {
+ case rf.candidateFound <- struct{}{}:
+ default:
+ }
+}
+
+func (rf *relayFinder) notifyRelayReservationUpdated() {
+ select {
+ case rf.relayReservationUpdated <- struct{}{}:
+ default:
+ }
+}
+
+// handleNewNode tests if a peer supports circuit v2.
+// This method is only run on private nodes.
+// If a peer does, it is added to the candidates map.
+// Note that just supporting the protocol doesn't guarantee that we can also obtain a reservation.
+func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (added bool) {
+ rf.relayMx.Lock()
+ relayInUse := rf.usingRelay(pi.ID)
+ rf.relayMx.Unlock()
+ if relayInUse {
+ return false
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
+ defer cancel()
+ supportsV2, err := rf.tryNode(ctx, pi)
+ if err != nil {
+ log.Debug("node not accepted as a candidate", "peer", pi.ID, "err", err)
+ if err == errProtocolNotSupported {
+ rf.metricsTracer.CandidateChecked(false)
+ }
+ return false
+ }
+ rf.metricsTracer.CandidateChecked(true)
+
+ rf.candidateMx.Lock()
+ if len(rf.candidates) > rf.conf.maxCandidates {
+ rf.candidateMx.Unlock()
+ return false
+ }
+ log.Debug("node supports relay protocol", "peer", pi.ID, "supports_circuit_v2", supportsV2)
+ rf.addCandidate(&candidate{
+ added: rf.conf.clock.Now(),
+ ai: pi,
+ supportsRelayV2: supportsV2,
+ })
+ rf.candidateMx.Unlock()
+ return true
+}
+
+var errProtocolNotSupported = errors.New("doesn't speak circuit v2")
+
+// tryNode checks if a peer actually supports either circuit v2.
+// It does not modify any internal state.
+func (rf *relayFinder) tryNode(ctx context.Context, pi peer.AddrInfo) (supportsRelayV2 bool, err error) {
+ if err := rf.host.Connect(ctx, pi); err != nil {
+ return false, fmt.Errorf("error connecting to relay %s: %w", pi.ID, err)
+ }
+
+ conns := rf.host.Network().ConnsToPeer(pi.ID)
+ for _, conn := range conns {
+ if isRelayAddr(conn.RemoteMultiaddr()) {
+ return false, errors.New("not a public node")
+ }
+ }
+
+ // wait for identify to complete in at least one conn so that we can check the supported protocols
+ hi, ok := rf.host.(interface{ IDService() identify.IDService })
+ if !ok {
+ // if we don't have identify, assume the peer supports relay.
+ return true, nil
+ }
+ ready := make(chan struct{}, 1)
+ for _, conn := range conns {
+ go func(conn network.Conn) {
+ select {
+ case <-hi.IDService().IdentifyWait(conn):
+ select {
+ case ready <- struct{}{}:
+ default:
+ }
+ case <-ctx.Done():
+ }
+ }(conn)
+ }
+
+ select {
+ case <-ready:
+ case <-ctx.Done():
+ return false, ctx.Err()
+ }
+
+ protos, err := rf.host.Peerstore().SupportsProtocols(pi.ID, protoIDv2)
+ if err != nil {
+ return false, fmt.Errorf("error checking relay protocol support for peer %s: %w", pi.ID, err)
+ }
+ if len(protos) == 0 {
+ return false, errProtocolNotSupported
+ }
+ return true, nil
+}
+
+// When a new node that could be a relay is found, we receive a notification on the maybeConnectToRelayTrigger chan.
+// This function makes sure that we only run one instance of maybeConnectToRelay at once, and buffers
+// exactly one more trigger event to run maybeConnectToRelay.
+func (rf *relayFinder) handleNewCandidates(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-rf.maybeConnectToRelayTrigger:
+ rf.maybeConnectToRelay(ctx)
+ }
+ }
+}
+
+func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) {
+ rf.relayMx.Lock()
+ numRelays := len(rf.relays)
+ rf.relayMx.Unlock()
+ // We're already connected to our desired number of relays. Nothing to do here.
+ if numRelays == rf.conf.desiredRelays {
+ return
+ }
+
+ rf.candidateMx.Lock()
+ if len(rf.relays) == 0 && len(rf.candidates) < rf.conf.minCandidates && rf.conf.clock.Since(rf.bootTime) < rf.conf.bootDelay {
+ // During the startup phase, we don't want to connect to the first candidate that we find.
+ // Instead, we wait until we've found at least minCandidates, and then select the best of those.
+ // However, if that takes too long (longer than bootDelay), we still go ahead.
+ rf.candidateMx.Unlock()
+ return
+ }
+ if len(rf.candidates) == 0 {
+ rf.candidateMx.Unlock()
+ return
+ }
+ candidates := rf.selectCandidates()
+ rf.candidateMx.Unlock()
+
+ // We now iterate over the candidates, attempting (sequentially) to get reservations with them, until
+ // we reach the desired number of relays.
+ for _, cand := range candidates {
+ id := cand.ai.ID
+ rf.relayMx.Lock()
+ usingRelay := rf.usingRelay(id)
+ rf.relayMx.Unlock()
+ if usingRelay {
+ rf.candidateMx.Lock()
+ rf.removeCandidate(id)
+ rf.candidateMx.Unlock()
+ rf.notifyMaybeNeedNewCandidates()
+ continue
+ }
+ rsvp, err := rf.connectToRelay(ctx, cand)
+ if err != nil {
+ log.Debug("failed to connect to relay", "relay_peer", id, "err", err)
+ rf.notifyMaybeNeedNewCandidates()
+ rf.metricsTracer.ReservationRequestFinished(false, err)
+ continue
+ }
+ log.Debug("adding new relay", "relay_peer", id)
+ rf.relayMx.Lock()
+ rf.relays[id] = rsvp
+ numRelays := len(rf.relays)
+ rf.relayMx.Unlock()
+ rf.notifyMaybeNeedNewCandidates()
+
+ rf.host.ConnManager().Protect(id, autorelayTag) // protect the connection
+
+ rf.notifyRelayReservationUpdated()
+
+ rf.metricsTracer.ReservationRequestFinished(false, nil)
+
+ if numRelays >= rf.conf.desiredRelays {
+ break
+ }
+ }
+}
+
+func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*circuitv2.Reservation, error) {
+ id := cand.ai.ID
+
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ var rsvp *circuitv2.Reservation
+
+ // make sure we're still connected.
+ if rf.host.Network().Connectedness(id) != network.Connected {
+ if err := rf.host.Connect(ctx, cand.ai); err != nil {
+ rf.candidateMx.Lock()
+ rf.removeCandidate(cand.ai.ID)
+ rf.candidateMx.Unlock()
+ return nil, fmt.Errorf("failed to connect: %w", err)
+ }
+ }
+
+ rf.candidateMx.Lock()
+ rf.backoff[id] = rf.conf.clock.Now()
+ rf.candidateMx.Unlock()
+ var err error
+ if cand.supportsRelayV2 {
+ rsvp, err = circuitv2.Reserve(ctx, rf.host, cand.ai)
+ if err != nil {
+ err = fmt.Errorf("failed to reserve slot: %w", err)
+ }
+ }
+ rf.candidateMx.Lock()
+ rf.removeCandidate(id)
+ rf.candidateMx.Unlock()
+ return rsvp, err
+}
+
+func (rf *relayFinder) refreshReservations(ctx context.Context, now time.Time) bool {
+ rf.relayMx.Lock()
+
+ // find reservations about to expire and refresh them in parallel
+ g := new(errgroup.Group)
+ for p, rsvp := range rf.relays {
+ if now.Add(rsvpExpirationSlack).Before(rsvp.Expiration) {
+ continue
+ }
+
+ p := p
+ g.Go(func() error {
+ err := rf.refreshRelayReservation(ctx, p)
+ rf.metricsTracer.ReservationRequestFinished(true, err)
+ return err
+ })
+ }
+ rf.relayMx.Unlock()
+
+ err := g.Wait()
+ return err != nil
+}
+
+func (rf *relayFinder) refreshRelayReservation(ctx context.Context, p peer.ID) error {
+ rsvp, err := circuitv2.Reserve(ctx, rf.host, peer.AddrInfo{ID: p})
+
+ rf.relayMx.Lock()
+ if err != nil {
+ log.Debug("failed to refresh relay slot reservation", "relay_peer", p, "err", err)
+ _, exists := rf.relays[p]
+ delete(rf.relays, p)
+ // unprotect the connection
+ rf.host.ConnManager().Unprotect(p, autorelayTag)
+ rf.relayMx.Unlock()
+ if exists {
+ rf.metricsTracer.ReservationEnded(1)
+ }
+ return err
+ }
+
+ log.Debug("refreshed relay slot reservation", "relay_peer", p)
+ rf.relays[p] = rsvp
+ rf.relayMx.Unlock()
+ return nil
+}
+
+// usingRelay returns if we're currently using the given relay.
+func (rf *relayFinder) usingRelay(p peer.ID) bool {
+ _, ok := rf.relays[p]
+ return ok
+}
+
+// addCandidates adds a candidate to the candidates set. Assumes caller holds candidateMx mutex
+func (rf *relayFinder) addCandidate(cand *candidate) {
+ _, exists := rf.candidates[cand.ai.ID]
+ rf.candidates[cand.ai.ID] = cand
+ if !exists {
+ rf.metricsTracer.CandidateAdded(1)
+ }
+}
+
+func (rf *relayFinder) removeCandidate(id peer.ID) {
+ _, exists := rf.candidates[id]
+ if exists {
+ delete(rf.candidates, id)
+ rf.metricsTracer.CandidateRemoved(1)
+ }
+}
+
+// selectCandidates returns an ordered slice of relay candidates.
+// Callers should attempt to obtain reservations with the candidates in this order.
+func (rf *relayFinder) selectCandidates() []*candidate {
+ now := rf.conf.clock.Now()
+ candidates := make([]*candidate, 0, len(rf.candidates))
+ for _, cand := range rf.candidates {
+ if cand.added.Add(rf.conf.maxCandidateAge).After(now) {
+ candidates = append(candidates, cand)
+ }
+ }
+
+ // TODO: better relay selection strategy; this just selects random relays,
+ // but we should probably use ping latency as the selection metric
+ rand.Shuffle(len(candidates), func(i, j int) {
+ candidates[i], candidates[j] = candidates[j], candidates[i]
+ })
+ return candidates
+}
+
+func (rf *relayFinder) Start() error {
+ rf.ctxCancelMx.Lock()
+ defer rf.ctxCancelMx.Unlock()
+ if rf.ctxCancel != nil {
+ return errAlreadyRunning
+ }
+ log.Debug("starting relay finder")
+
+ rf.initMetrics()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ rf.ctxCancel = cancel
+ rf.refCount.Add(1)
+ go func() {
+ defer rf.refCount.Done()
+ rf.background(ctx)
+ }()
+ return nil
+}
+
+func (rf *relayFinder) Stop() error {
+ rf.ctxCancelMx.Lock()
+ defer rf.ctxCancelMx.Unlock()
+ log.Debug("stopping relay finder")
+ if rf.ctxCancel != nil {
+ rf.ctxCancel()
+ }
+ rf.refCount.Wait()
+ rf.ctxCancel = nil
+
+ rf.resetMetrics()
+ return nil
+}
+
+func (rf *relayFinder) initMetrics() {
+ rf.metricsTracer.DesiredReservations(rf.conf.desiredRelays)
+
+ rf.relayMx.Lock()
+ rf.metricsTracer.ReservationOpened(len(rf.relays))
+ rf.relayMx.Unlock()
+
+ rf.candidateMx.Lock()
+ rf.metricsTracer.CandidateAdded(len(rf.candidates))
+ rf.candidateMx.Unlock()
+}
+
+func (rf *relayFinder) resetMetrics() {
+ rf.relayMx.Lock()
+ rf.metricsTracer.ReservationEnded(len(rf.relays))
+ rf.relayMx.Unlock()
+
+ rf.candidateMx.Lock()
+ rf.metricsTracer.CandidateRemoved(len(rf.candidates))
+ rf.candidateMx.Unlock()
+
+ rf.metricsTracer.RelayAddressCount(0)
+ rf.metricsTracer.ScheduledWorkUpdated(&scheduledWorkTimes{})
+}
+
+func areSortedAddrsDifferent(a, b []ma.Multiaddr) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for i, aa := range a {
+ if !aa.Equal(b[i]) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/p2p/host/basic/addrs_manager.go b/p2p/host/basic/addrs_manager.go
new file mode 100644
index 0000000000..f561877434
--- /dev/null
+++ b/p2p/host/basic/addrs_manager.go
@@ -0,0 +1,676 @@
+package basichost
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "slices"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/basic/internal/backoff"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-netroute"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const maxObservedAddrsPerListenAddr = 3
+
+// addrChangeTickrInterval is the interval to recompute host addrs.
+var addrChangeTickrInterval = 5 * time.Second
+
+// ObservedAddrsManager maps our local listen addrs to externally observed addrs.
+type ObservedAddrsManager interface {
+ Addrs(minObservers int) []ma.Multiaddr
+ AddrsFor(local ma.Multiaddr) []ma.Multiaddr
+}
+
+type hostAddrs struct {
+ addrs []ma.Multiaddr
+ localAddrs []ma.Multiaddr
+ reachableAddrs []ma.Multiaddr
+ unreachableAddrs []ma.Multiaddr
+ unknownAddrs []ma.Multiaddr
+ relayAddrs []ma.Multiaddr
+}
+
+type addrsManager struct {
+ bus event.Bus
+ natManager NATManager
+ addrsFactory AddrsFactory
+ listenAddrs func() []ma.Multiaddr
+ addCertHashes func([]ma.Multiaddr) []ma.Multiaddr
+ observedAddrsManager ObservedAddrsManager
+ interfaceAddrs *interfaceAddrsCache
+ addrsReachabilityTracker *addrsReachabilityTracker
+
+ // addrsUpdatedChan is notified when addrs change. This is provided by the caller.
+ addrsUpdatedChan chan struct{}
+
+ // triggerAddrsUpdateChan is used to trigger an addresses update.
+ triggerAddrsUpdateChan chan chan struct{}
+ // started is used to check whether the addrsManager has started.
+ started atomic.Bool
+ // triggerReachabilityUpdate is notified when reachable addrs are updated.
+ triggerReachabilityUpdate chan struct{}
+
+ hostReachability atomic.Pointer[network.Reachability]
+
+ addrsMx sync.RWMutex
+ currentAddrs hostAddrs
+
+ wg sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
+}
+
+func newAddrsManager(
+ bus event.Bus,
+ natmgr NATManager,
+ addrsFactory AddrsFactory,
+ listenAddrs func() []ma.Multiaddr,
+ addCertHashes func([]ma.Multiaddr) []ma.Multiaddr,
+ observedAddrsManager ObservedAddrsManager,
+ addrsUpdatedChan chan struct{},
+ client autonatv2Client,
+ enableMetrics bool,
+ registerer prometheus.Registerer,
+) (*addrsManager, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ as := &addrsManager{
+ bus: bus,
+ listenAddrs: listenAddrs,
+ addCertHashes: addCertHashes,
+ observedAddrsManager: observedAddrsManager,
+ natManager: natmgr,
+ addrsFactory: addrsFactory,
+ triggerAddrsUpdateChan: make(chan chan struct{}, 1),
+ triggerReachabilityUpdate: make(chan struct{}, 1),
+ addrsUpdatedChan: addrsUpdatedChan,
+ interfaceAddrs: &interfaceAddrsCache{},
+ ctx: ctx,
+ ctxCancel: cancel,
+ }
+ unknownReachability := network.ReachabilityUnknown
+ as.hostReachability.Store(&unknownReachability)
+
+ if client != nil {
+ var metricsTracker MetricsTracker
+ if enableMetrics {
+ metricsTracker = newMetricsTracker(withRegisterer(registerer))
+ }
+ as.addrsReachabilityTracker = newAddrsReachabilityTracker(client, as.triggerReachabilityUpdate, nil, metricsTracker)
+ }
+ return as, nil
+}
+
+func (a *addrsManager) Start() error {
+ if a.addrsReachabilityTracker != nil {
+ err := a.addrsReachabilityTracker.Start()
+ if err != nil {
+ return fmt.Errorf("error starting addrs reachability tracker: %s", err)
+ }
+ }
+ return a.startBackgroundWorker()
+}
+
+func (a *addrsManager) Close() {
+ a.ctxCancel()
+ if a.natManager != nil {
+ err := a.natManager.Close()
+ if err != nil {
+ log.Warn("error closing natmgr", "err", err)
+ }
+ }
+ if a.addrsReachabilityTracker != nil {
+ err := a.addrsReachabilityTracker.Close()
+ if err != nil {
+ log.Warn("error closing addrs reachability tracker", "err", err)
+ }
+ }
+ a.wg.Wait()
+}
+
+func (a *addrsManager) NetNotifee() network.Notifiee {
+ return &network.NotifyBundle{
+ ListenF: func(network.Network, ma.Multiaddr) { a.updateAddrsSync() },
+ ListenCloseF: func(network.Network, ma.Multiaddr) { a.updateAddrsSync() },
+ }
+}
+
+func (a *addrsManager) updateAddrsSync() {
+ // This prevents a deadlock where addrs updates before starting the manager are ignored
+ if !a.started.Load() {
+ return
+ }
+ ch := make(chan struct{})
+ select {
+ case a.triggerAddrsUpdateChan <- ch:
+ select {
+ case <-ch:
+ case <-a.ctx.Done():
+ }
+ case <-a.ctx.Done():
+ }
+}
+
+func (a *addrsManager) startBackgroundWorker() (retErr error) {
+ autoRelayAddrsSub, err := a.bus.Subscribe(new(event.EvtAutoRelayAddrsUpdated), eventbus.Name("addrs-manager autorelay sub"))
+ if err != nil {
+ return fmt.Errorf("error subscribing to auto relay addrs: %s", err)
+ }
+ mc := multiCloser{autoRelayAddrsSub}
+ autonatReachabilitySub, err := a.bus.Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("addrs-manager autonatv1 sub"))
+ if err != nil {
+ return errors.Join(
+ fmt.Errorf("error subscribing to autonat reachability: %s", err),
+ mc.Close(),
+ )
+ }
+ mc = append(mc, autonatReachabilitySub)
+
+ emitter, err := a.bus.Emitter(new(event.EvtHostReachableAddrsChanged), eventbus.Stateful)
+ if err != nil {
+ return errors.Join(
+ fmt.Errorf("error creating reachability subscriber: %s", err),
+ mc.Close(),
+ )
+ }
+
+ var relayAddrs []ma.Multiaddr
+ // update relay addrs in case we're private
+ select {
+ case e := <-autoRelayAddrsSub.Out():
+ if evt, ok := e.(event.EvtAutoRelayAddrsUpdated); ok {
+ relayAddrs = slices.Clone(evt.RelayAddrs)
+ }
+ default:
+ }
+
+ select {
+ case e := <-autonatReachabilitySub.Out():
+ if evt, ok := e.(event.EvtLocalReachabilityChanged); ok {
+ a.hostReachability.Store(&evt.Reachability)
+ }
+ default:
+ }
+ // this ensures that listens concurrent with Start are reflected correctly after Start exits.
+ a.started.Store(true)
+ // update addresses before starting the worker loop. This ensures that any address updates
+ // before calling addrsManager.Start are correctly reported after Start returns.
+ a.updateAddrs(relayAddrs)
+
+ a.wg.Add(1)
+ go a.background(autoRelayAddrsSub, autonatReachabilitySub, emitter, relayAddrs)
+ return nil
+}
+
+func (a *addrsManager) background(autoRelayAddrsSub, autonatReachabilitySub event.Subscription,
+ emitter event.Emitter, relayAddrs []ma.Multiaddr,
+) {
+ defer a.wg.Done()
+ defer func() {
+ err := autoRelayAddrsSub.Close()
+ if err != nil {
+ log.Warn("error closing auto relay addrs sub", "err", err)
+ }
+ err = autonatReachabilitySub.Close()
+ if err != nil {
+ log.Warn("error closing autonat reachability sub", "err", err)
+ }
+ err = emitter.Close()
+ if err != nil {
+ log.Warn("error closing host reachability emitter", "err", err)
+ }
+ }()
+
+ ticker := time.NewTicker(addrChangeTickrInterval)
+ defer ticker.Stop()
+ var previousAddrs hostAddrs
+ var notifCh chan struct{}
+ for {
+ currAddrs := a.updateAddrs(relayAddrs)
+ if notifCh != nil {
+ close(notifCh)
+ notifCh = nil
+ }
+ a.notifyAddrsChanged(emitter, previousAddrs, currAddrs)
+ previousAddrs = currAddrs
+ select {
+ case <-ticker.C:
+ case notifCh = <-a.triggerAddrsUpdateChan:
+ case <-a.triggerReachabilityUpdate:
+ case e := <-autoRelayAddrsSub.Out():
+ if evt, ok := e.(event.EvtAutoRelayAddrsUpdated); ok {
+ relayAddrs = slices.Clone(evt.RelayAddrs)
+ }
+ case e := <-autonatReachabilitySub.Out():
+ if evt, ok := e.(event.EvtLocalReachabilityChanged); ok {
+ a.hostReachability.Store(&evt.Reachability)
+ }
+ case <-a.ctx.Done():
+ return
+ }
+ }
+}
+
+// updateAddrs updates the addresses of the host and returns the new updated
+// addrs. This must only be called from the background goroutine or from the Start method otherwise
+// we may end up with stale addrs.
+func (a *addrsManager) updateAddrs(relayAddrs []ma.Multiaddr) hostAddrs {
+ localAddrs := a.getLocalAddrs()
+ var currReachableAddrs, currUnreachableAddrs, currUnknownAddrs []ma.Multiaddr
+ if a.addrsReachabilityTracker != nil {
+ currReachableAddrs, currUnreachableAddrs, currUnknownAddrs = a.getConfirmedAddrs(localAddrs)
+ }
+ relayAddrs = slices.Clone(relayAddrs)
+ currAddrs := a.getAddrs(slices.Clone(localAddrs), relayAddrs)
+
+ a.addrsMx.Lock()
+ a.currentAddrs = hostAddrs{
+ addrs: append(a.currentAddrs.addrs[:0], currAddrs...),
+ localAddrs: append(a.currentAddrs.localAddrs[:0], localAddrs...),
+ reachableAddrs: append(a.currentAddrs.reachableAddrs[:0], currReachableAddrs...),
+ unreachableAddrs: append(a.currentAddrs.unreachableAddrs[:0], currUnreachableAddrs...),
+ unknownAddrs: append(a.currentAddrs.unknownAddrs[:0], currUnknownAddrs...),
+ relayAddrs: append(a.currentAddrs.relayAddrs[:0], relayAddrs...),
+ }
+ a.addrsMx.Unlock()
+
+ return hostAddrs{
+ localAddrs: localAddrs,
+ addrs: currAddrs,
+ reachableAddrs: currReachableAddrs,
+ unreachableAddrs: currUnreachableAddrs,
+ unknownAddrs: currUnknownAddrs,
+ relayAddrs: relayAddrs,
+ }
+}
+
+func (a *addrsManager) notifyAddrsChanged(emitter event.Emitter, previous, current hostAddrs) {
+ if areAddrsDifferent(previous.localAddrs, current.localAddrs) {
+ log.Debug("host local addresses updated", "addrs", current.localAddrs)
+ if a.addrsReachabilityTracker != nil {
+ a.addrsReachabilityTracker.UpdateAddrs(current.localAddrs)
+ }
+ }
+ if areAddrsDifferent(previous.addrs, current.addrs) {
+ log.Debug("host addresses updated", "addrs", current.localAddrs)
+ select {
+ case a.addrsUpdatedChan <- struct{}{}:
+ default:
+ }
+ }
+
+ // We *must* send both reachability changed and addrs changed events from the
+ // same goroutine to ensure correct ordering
+ // Consider the events:
+ // - addr x discovered
+ // - addr x is reachable
+ // - addr x removed
+ // We must send these events in the same order. It'll be confusing for consumers
+ // if the reachable event is received after the addr removed event.
+ if areAddrsDifferent(previous.reachableAddrs, current.reachableAddrs) ||
+ areAddrsDifferent(previous.unreachableAddrs, current.unreachableAddrs) ||
+ areAddrsDifferent(previous.unknownAddrs, current.unknownAddrs) {
+ log.Debug("host reachable addrs updated",
+ "reachable", current.reachableAddrs,
+ "unreachable", current.unreachableAddrs,
+ "unknown", current.unknownAddrs)
+ if err := emitter.Emit(event.EvtHostReachableAddrsChanged{
+ Reachable: slices.Clone(current.reachableAddrs),
+ Unreachable: slices.Clone(current.unreachableAddrs),
+ Unknown: slices.Clone(current.unknownAddrs),
+ }); err != nil {
+ log.Error("error sending host reachable addrs changed event", "err", err)
+ }
+ }
+}
+
+// Addrs returns the node's dialable addresses both public and private.
+// If autorelay is enabled and node reachability is private, it returns
+// the node's relay addresses and private network addresses.
+func (a *addrsManager) Addrs() []ma.Multiaddr {
+ a.addrsMx.RLock()
+ directAddrs := slices.Clone(a.currentAddrs.localAddrs)
+ relayAddrs := slices.Clone(a.currentAddrs.relayAddrs)
+ a.addrsMx.RUnlock()
+ return a.getAddrs(directAddrs, relayAddrs)
+}
+
+// getAddrs returns the node's dialable addresses. Mutates localAddrs
+func (a *addrsManager) getAddrs(localAddrs []ma.Multiaddr, relayAddrs []ma.Multiaddr) []ma.Multiaddr {
+ addrs := localAddrs
+ rch := a.hostReachability.Load()
+ if rch != nil && *rch == network.ReachabilityPrivate {
+ // Delete public addresses if the node's reachability is private, and we have relay addresses
+ if len(relayAddrs) > 0 {
+ addrs = slices.DeleteFunc(addrs, manet.IsPublicAddr)
+ addrs = append(addrs, relayAddrs...)
+ }
+ }
+ // Make a copy. Consumers can modify the slice elements
+ addrs = slices.Clone(a.addrsFactory(addrs))
+ // Add certhashes for the addresses provided by the user via address factory.
+ addrs = a.addCertHashes(ma.Unique(addrs))
+ slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ return addrs
+}
+
+// HolePunchAddrs returns all the host's direct public addresses, reachable or unreachable,
+// suitable for hole punching.
+func (a *addrsManager) HolePunchAddrs() []ma.Multiaddr {
+ addrs := a.DirectAddrs()
+ addrs = slices.Clone(a.addrsFactory(addrs))
+ // AllAddrs may ignore observed addresses in favour of NAT mappings.
+ // Use both for hole punching.
+ if a.observedAddrsManager != nil {
+ // For holepunching, include all the best addresses we know even ones with only 1 observer.
+ addrs = append(addrs, a.observedAddrsManager.Addrs(1)...)
+ }
+ addrs = ma.Unique(addrs)
+ return slices.DeleteFunc(addrs, func(a ma.Multiaddr) bool { return !manet.IsPublicAddr(a) })
+}
+
+// DirectAddrs returns all the addresses the host is listening on except circuit addresses.
+func (a *addrsManager) DirectAddrs() []ma.Multiaddr {
+ a.addrsMx.RLock()
+ defer a.addrsMx.RUnlock()
+ return slices.Clone(a.currentAddrs.localAddrs)
+}
+
+// ConfirmedAddrs returns all addresses of the host that are reachable from the internet
+func (a *addrsManager) ConfirmedAddrs() (reachable []ma.Multiaddr, unreachable []ma.Multiaddr, unknown []ma.Multiaddr) {
+ a.addrsMx.RLock()
+ defer a.addrsMx.RUnlock()
+ return slices.Clone(a.currentAddrs.reachableAddrs), slices.Clone(a.currentAddrs.unreachableAddrs), slices.Clone(a.currentAddrs.unknownAddrs)
+}
+
+func (a *addrsManager) getConfirmedAddrs(localAddrs []ma.Multiaddr) (reachableAddrs, unreachableAddrs, unknownAddrs []ma.Multiaddr) {
+ reachableAddrs, unreachableAddrs, unknownAddrs = a.addrsReachabilityTracker.ConfirmedAddrs()
+ return removeNotInSource(reachableAddrs, localAddrs), removeNotInSource(unreachableAddrs, localAddrs), removeNotInSource(unknownAddrs, localAddrs)
+}
+
+var p2pCircuitAddr = ma.StringCast("/p2p-circuit")
+
+func (a *addrsManager) getLocalAddrs() []ma.Multiaddr {
+ listenAddrs := a.listenAddrs()
+ if len(listenAddrs) == 0 {
+ return nil
+ }
+
+ finalAddrs := make([]ma.Multiaddr, 0, 8)
+ finalAddrs = a.appendPrimaryInterfaceAddrs(finalAddrs, listenAddrs)
+ if a.natManager != nil {
+ finalAddrs = a.appendNATAddrs(finalAddrs, listenAddrs)
+ }
+ if a.observedAddrsManager != nil {
+ finalAddrs = a.appendObservedAddrs(finalAddrs, listenAddrs, a.interfaceAddrs.All())
+ }
+
+ // Remove "/p2p-circuit" addresses from the list.
+ // The p2p-circuit listener reports its address as just /p2p-circuit. This is
+ // useless for dialing. Users need to manage their circuit addresses themselves,
+ // or use AutoRelay.
+ finalAddrs = slices.DeleteFunc(finalAddrs, func(a ma.Multiaddr) bool {
+ return a.Equal(p2pCircuitAddr)
+ })
+
+ // Remove any unspecified address from the list
+ finalAddrs = slices.DeleteFunc(finalAddrs, func(a ma.Multiaddr) bool {
+ return manet.IsIPUnspecified(a)
+ })
+
+ // Add certhashes for /webrtc-direct, /webtransport, etc addresses discovered
+ // using identify.
+ finalAddrs = a.addCertHashes(finalAddrs)
+ finalAddrs = ma.Unique(finalAddrs)
+ slices.SortFunc(finalAddrs, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ return finalAddrs
+}
+
+// appendPrimaryInterfaceAddrs appends the primary interface addresses to `dst`.
+func (a *addrsManager) appendPrimaryInterfaceAddrs(dst []ma.Multiaddr, listenAddrs []ma.Multiaddr) []ma.Multiaddr {
+ // resolving any unspecified listen addressees to use only the primary
+ // interface to avoid advertising too many addresses.
+ if resolved, err := manet.ResolveUnspecifiedAddresses(listenAddrs, a.interfaceAddrs.Filtered()); err != nil {
+ log.Warn("failed to resolve listen addrs", "err", err)
+ } else {
+ dst = append(dst, resolved...)
+ }
+ return dst
+}
+
+// appendNATAddrs appends the NAT-ed addrs for the listenAddrs. For unspecified listen addrs it appends the
+// public address for all the interfaces.
+// Inferring WebTransport from QUIC depends on the observed address manager.
+func (a *addrsManager) appendNATAddrs(dst []ma.Multiaddr, listenAddrs []ma.Multiaddr) []ma.Multiaddr {
+ for _, listenAddr := range listenAddrs {
+ natAddr := a.natManager.GetMapping(listenAddr)
+ if natAddr != nil {
+ dst = append(dst, natAddr)
+ }
+ }
+ return dst
+}
+
+func (a *addrsManager) appendObservedAddrs(dst []ma.Multiaddr, listenAddrs, ifaceAddrs []ma.Multiaddr) []ma.Multiaddr {
+ // Add it for all the listenAddr first.
+ // listenAddr maybe unspecified. That's okay as connections on UDP transports
+ // will have the unspecified address as the local address.
+ for _, la := range listenAddrs {
+ obsAddrs := a.observedAddrsManager.AddrsFor(la)
+ if len(obsAddrs) > maxObservedAddrsPerListenAddr {
+ obsAddrs = obsAddrs[:maxObservedAddrsPerListenAddr]
+ }
+ dst = append(dst, obsAddrs...)
+ }
+
+ // if it can be resolved into more addresses, add them too
+ resolved, err := manet.ResolveUnspecifiedAddresses(listenAddrs, ifaceAddrs)
+ if err != nil {
+ log.Warn("failed to resolve listen addr", "listen_addr", listenAddrs, "iface_addrs", ifaceAddrs, "err", err)
+ return dst
+ }
+ for _, addr := range resolved {
+ obsAddrs := a.observedAddrsManager.AddrsFor(addr)
+ if len(obsAddrs) > maxObservedAddrsPerListenAddr {
+ obsAddrs = obsAddrs[:maxObservedAddrsPerListenAddr]
+ }
+ dst = append(dst, obsAddrs...)
+ }
+ return dst
+}
+
+func areAddrsDifferent(prev, current []ma.Multiaddr) bool {
+ // TODO: make the sorted nature of ma.Unique a guarantee in multiaddrs
+ prev = ma.Unique(prev)
+ current = ma.Unique(current)
+ if len(prev) != len(current) {
+ return true
+ }
+ slices.SortFunc(prev, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ slices.SortFunc(current, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ for i := range prev {
+ if !prev[i].Equal(current[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+const interfaceAddrsCacheTTL = time.Minute
+
+type interfaceAddrsCache struct {
+ mx sync.RWMutex
+ filtered []ma.Multiaddr
+ all []ma.Multiaddr
+ updateLocalIPv4Backoff backoff.ExpBackoff
+ updateLocalIPv6Backoff backoff.ExpBackoff
+ lastUpdated time.Time
+}
+
+func (i *interfaceAddrsCache) Filtered() []ma.Multiaddr {
+ i.mx.RLock()
+ if time.Now().After(i.lastUpdated.Add(interfaceAddrsCacheTTL)) {
+ i.mx.RUnlock()
+ return i.update(true)
+ }
+ defer i.mx.RUnlock()
+ return i.filtered
+}
+
+func (i *interfaceAddrsCache) All() []ma.Multiaddr {
+ i.mx.RLock()
+ if time.Now().After(i.lastUpdated.Add(interfaceAddrsCacheTTL)) {
+ i.mx.RUnlock()
+ return i.update(false)
+ }
+ defer i.mx.RUnlock()
+ return i.all
+}
+
+func (i *interfaceAddrsCache) update(filtered bool) []ma.Multiaddr {
+ i.mx.Lock()
+ defer i.mx.Unlock()
+ if !time.Now().After(i.lastUpdated.Add(interfaceAddrsCacheTTL)) {
+ if filtered {
+ return i.filtered
+ }
+ return i.all
+ }
+ i.updateUnlocked()
+ i.lastUpdated = time.Now()
+ if filtered {
+ return i.filtered
+ }
+ return i.all
+}
+
+func (i *interfaceAddrsCache) updateUnlocked() {
+ i.filtered = nil
+ i.all = nil
+
+ // Try to use the default ipv4/6 addresses.
+ // TODO: Remove this. We should advertise all interface addresses.
+ if r, err := netroute.New(); err != nil {
+ log.Debug("failed to build Router for kernel's routing table", "err", err)
+ } else {
+
+ var localIPv4 net.IP
+ var ran bool
+ err, ran = i.updateLocalIPv4Backoff.Run(func() error {
+ _, _, localIPv4, err = r.Route(net.IPv4zero)
+ return err
+ })
+
+ if ran && err != nil {
+ log.Debug("failed to fetch local IPv4 address", "err", err)
+ } else if ran && localIPv4.IsGlobalUnicast() {
+ maddr, err := manet.FromIP(localIPv4)
+ if err == nil {
+ i.filtered = append(i.filtered, maddr)
+ }
+ }
+
+ var localIPv6 net.IP
+ err, ran = i.updateLocalIPv6Backoff.Run(func() error {
+ _, _, localIPv6, err = r.Route(net.IPv6unspecified)
+ return err
+ })
+
+ if ran && err != nil {
+ log.Debug("failed to fetch local IPv6 address", "err", err)
+ } else if ran && localIPv6.IsGlobalUnicast() {
+ maddr, err := manet.FromIP(localIPv6)
+ if err == nil {
+ i.filtered = append(i.filtered, maddr)
+ }
+ }
+ }
+
+ // Resolve the interface addresses
+ ifaceAddrs, err := manet.InterfaceMultiaddrs()
+ if err != nil {
+ // This usually shouldn't happen, but we could be in some kind
+ // of funky restricted environment.
+ log.Error("failed to resolve local interface addresses", "err", err)
+
+ // Add the loopback addresses to the filtered addrs and use them as the non-filtered addrs.
+ // Then bail. There's nothing else we can do here.
+ i.filtered = append(i.filtered, manet.IP4Loopback, manet.IP6Loopback)
+ i.all = i.filtered
+ return
+ }
+
+ // remove link local ipv6 addresses
+ i.all = slices.DeleteFunc(ifaceAddrs, manet.IsIP6LinkLocal)
+
+ // If netroute failed to get us any interface addresses, use all of
+ // them.
+ if len(i.filtered) == 0 {
+ // Add all addresses.
+ i.filtered = i.all
+ } else {
+ // Only add loopback addresses. Filter these because we might
+ // not _have_ an IPv6 loopback address.
+ for _, addr := range i.all {
+ if manet.IsIPLoopback(addr) {
+ i.filtered = append(i.filtered, addr)
+ }
+ }
+ }
+}
+
+// removeNotInSource removes items from addrs that are not present in source.
+// Modifies the addrs slice in place
+// addrs and source must be sorted using multiaddr.Compare.
+func removeNotInSource(addrs, source []ma.Multiaddr) []ma.Multiaddr {
+ j := 0
+ // mark entries not in source as nil
+ for i, a := range addrs {
+ // move right as long as a > source[j]
+ for j < len(source) && a.Compare(source[j]) > 0 {
+ j++
+ }
+ // a is not in source if we've reached the end, or a is lesser
+ if j == len(source) || a.Compare(source[j]) < 0 {
+ addrs[i] = nil
+ }
+ // a is in source, nothing to do
+ }
+ // j is the current element, i is the lowest index nil element
+ i := 0
+ for j := range len(addrs) {
+ if addrs[j] != nil {
+ addrs[i], addrs[j] = addrs[j], addrs[i]
+ i++
+ }
+ }
+ return addrs[:i]
+}
+
+type multiCloser []io.Closer
+
+func (mc *multiCloser) Close() error {
+ var errs []error
+ for _, closer := range *mc {
+ if err := closer.Close(); err != nil {
+ var closerName string
+ if named, ok := closer.(interface{ Name() string }); ok {
+ closerName = named.Name()
+ } else {
+ closerName = fmt.Sprintf("%T", closer)
+ }
+ errs = append(errs, fmt.Errorf("error closing %s: %w", closerName, err))
+ }
+ }
+ return errors.Join(errs...)
+}
diff --git a/p2p/host/basic/addrs_manager_test.go b/p2p/host/basic/addrs_manager_test.go
new file mode 100644
index 0000000000..442979dc16
--- /dev/null
+++ b/p2p/host/basic/addrs_manager_test.go
@@ -0,0 +1,471 @@
+package basichost
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multiaddr/matest"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type mockNatManager struct {
+ GetMappingFunc func(addr ma.Multiaddr) ma.Multiaddr
+}
+
+func (*mockNatManager) Close() error {
+ return nil
+}
+
+func (m *mockNatManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
+ if m.GetMappingFunc == nil {
+ return nil
+ }
+ return m.GetMappingFunc(addr)
+}
+
+func (*mockNatManager) HasDiscoveredNAT() bool {
+ return true
+}
+
+var _ NATManager = &mockNatManager{}
+
+type mockObservedAddrs struct {
+ AddrsFunc func() []ma.Multiaddr
+ AddrsForFunc func(ma.Multiaddr) []ma.Multiaddr
+}
+
+func (m *mockObservedAddrs) Addrs(int) []ma.Multiaddr { return m.AddrsFunc() }
+
+func (m *mockObservedAddrs) AddrsFor(local ma.Multiaddr) []ma.Multiaddr { return m.AddrsForFunc(local) }
+
+var _ ObservedAddrsManager = &mockObservedAddrs{}
+
+type addrsManagerArgs struct {
+ NATManager NATManager
+ AddrsFactory AddrsFactory
+ ObservedAddrsManager ObservedAddrsManager
+ ListenAddrs func() []ma.Multiaddr
+ AddCertHashes func([]ma.Multiaddr) []ma.Multiaddr
+ AutoNATClient autonatv2Client
+ Bus event.Bus
+}
+
+type addrsManagerTestCase struct {
+ *addrsManager
+ PushRelay func(relayAddrs []ma.Multiaddr)
+ PushReachability func(rch network.Reachability)
+}
+
+func newAddrsManagerTestCase(tb testing.TB, args addrsManagerArgs) addrsManagerTestCase {
+ eb := args.Bus
+ if eb == nil {
+ eb = eventbus.NewBus()
+ }
+ if args.AddrsFactory == nil {
+ args.AddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { return addrs }
+ }
+ addrsUpdatedChan := make(chan struct{}, 1)
+
+ addCertHashes := func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return addrs
+ }
+ if args.AddCertHashes != nil {
+ addCertHashes = args.AddCertHashes
+ }
+ am, err := newAddrsManager(
+ eb,
+ args.NATManager,
+ args.AddrsFactory,
+ args.ListenAddrs,
+ addCertHashes,
+ args.ObservedAddrsManager,
+ addrsUpdatedChan,
+ args.AutoNATClient,
+ true,
+ prometheus.DefaultRegisterer,
+ )
+ require.NoError(tb, err)
+
+ require.NoError(tb, am.Start())
+ raEm, err := eb.Emitter(new(event.EvtAutoRelayAddrsUpdated), eventbus.Stateful)
+ require.NoError(tb, err)
+
+ rchEm, err := eb.Emitter(new(event.EvtLocalReachabilityChanged), eventbus.Stateful)
+ require.NoError(tb, err)
+
+ tb.Cleanup(am.Close)
+ return addrsManagerTestCase{
+ addrsManager: am,
+ PushRelay: func(relayAddrs []ma.Multiaddr) {
+ err := raEm.Emit(event.EvtAutoRelayAddrsUpdated{RelayAddrs: relayAddrs})
+ require.NoError(tb, err)
+ },
+ PushReachability: func(rch network.Reachability) {
+ err := rchEm.Emit(event.EvtLocalReachabilityChanged{Reachability: rch})
+ require.NoError(tb, err)
+ },
+ }
+}
+
+func TestAddrsManager(t *testing.T) {
+ lhquic := ma.StringCast("/ip4/127.0.0.1/udp/1/quic-v1")
+ lhtcp := ma.StringCast("/ip4/127.0.0.1/tcp/1")
+
+ publicQUIC := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ publicQUIC2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ publicTCP := ma.StringCast("/ip4/1.2.3.4/tcp/1")
+ privQUIC := ma.StringCast("/ip4/100.100.100.101/udp/1/quic-v1")
+
+ t.Run("only nat", func(t *testing.T) {
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ NATManager: &mockNatManager{
+ GetMappingFunc: func(addr ma.Multiaddr) ma.Multiaddr {
+ if _, err := addr.ValueForProtocol(ma.P_UDP); err == nil {
+ return publicQUIC
+ }
+ return nil
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+ am.updateAddrsSync()
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ expected := []ma.Multiaddr{publicQUIC, lhquic, lhtcp}
+ assert.ElementsMatch(collect, am.Addrs(), expected, "%s\n%s", am.Addrs(), expected)
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+
+ t.Run("nat and observed addrs", func(t *testing.T) {
+ // nat mapping for udp, observed addrs for tcp
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ NATManager: &mockNatManager{
+ GetMappingFunc: func(addr ma.Multiaddr) ma.Multiaddr {
+ if _, err := addr.ValueForProtocol(ma.P_UDP); err == nil {
+ return privQUIC
+ }
+ return nil
+ },
+ },
+ ObservedAddrsManager: &mockObservedAddrs{
+ AddrsForFunc: func(addr ma.Multiaddr) []ma.Multiaddr {
+ if _, err := addr.ValueForProtocol(ma.P_TCP); err == nil {
+ return []ma.Multiaddr{publicTCP}
+ }
+ if _, err := addr.ValueForProtocol(ma.P_UDP); err == nil {
+ return []ma.Multiaddr{publicQUIC2}
+ }
+ return nil
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ expected := []ma.Multiaddr{lhquic, lhtcp, privQUIC, publicTCP, publicQUIC2}
+ assert.ElementsMatch(collect, am.Addrs(), expected, "%s\n%s", am.Addrs(), expected)
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+ t.Run("nat returns unspecified addr", func(t *testing.T) {
+ quicPort1 := ma.StringCast("/ip4/3.3.3.3/udp/1/quic-v1")
+ // port from nat, IP from observed addr
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ NATManager: &mockNatManager{
+ GetMappingFunc: func(addr ma.Multiaddr) ma.Multiaddr {
+ if addr.Equal(lhquic) {
+ return ma.StringCast("/ip4/0.0.0.0/udp/2/quic-v1")
+ }
+ return nil
+ },
+ },
+ ObservedAddrsManager: &mockObservedAddrs{
+ AddrsForFunc: func(addr ma.Multiaddr) []ma.Multiaddr {
+ if addr.Equal(lhquic) {
+ return []ma.Multiaddr{quicPort1}
+ }
+ return nil
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic} },
+ })
+ expected := []ma.Multiaddr{lhquic, quicPort1}
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ assert.ElementsMatch(collect, am.Addrs(), expected, "%s\n%s", am.Addrs(), expected)
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+ t.Run("only observed addrs", func(t *testing.T) {
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ ObservedAddrsManager: &mockObservedAddrs{
+ AddrsForFunc: func(addr ma.Multiaddr) []ma.Multiaddr {
+ if addr.Equal(lhtcp) {
+ return []ma.Multiaddr{publicTCP}
+ }
+ if addr.Equal(lhquic) {
+ return []ma.Multiaddr{publicQUIC}
+ }
+ return nil
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+ am.updateAddrsSync()
+ expected := []ma.Multiaddr{lhquic, lhtcp, publicTCP, publicQUIC}
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ assert.ElementsMatch(collect, am.Addrs(), expected, "%s\n%s", am.Addrs(), expected)
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+
+ t.Run("observed addrs limit", func(t *testing.T) {
+ quicAddrs := []ma.Multiaddr{
+ ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/4/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/5/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/6/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/7/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/8/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/9/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/10/quic-v1"),
+ }
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ ObservedAddrsManager: &mockObservedAddrs{
+ AddrsForFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
+ return quicAddrs
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic} },
+ })
+ am.updateAddrsSync()
+ expected := []ma.Multiaddr{lhquic}
+ expected = append(expected, quicAddrs[:maxObservedAddrsPerListenAddr]...)
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ matest.AssertMultiaddrsMatch(collect, expected, am.Addrs())
+ }, 2*time.Second, 50*time.Millisecond)
+ })
+ t.Run("public addrs removed when private", func(t *testing.T) {
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ ObservedAddrsManager: &mockObservedAddrs{
+ AddrsForFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{publicQUIC}
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+
+ // remove public addrs
+ am.PushReachability(network.ReachabilityPrivate)
+ relayAddr := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/p2p/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo/p2p-circuit")
+ am.PushRelay([]ma.Multiaddr{relayAddr})
+
+ expectedAddrs := []ma.Multiaddr{relayAddr, lhquic, lhtcp}
+ expectedAllAddrs := []ma.Multiaddr{publicQUIC, lhquic, lhtcp}
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ assert.ElementsMatch(collect, am.Addrs(), expectedAddrs, "%s\n%s", am.Addrs(), expectedAddrs)
+ assert.ElementsMatch(collect, am.DirectAddrs(), expectedAllAddrs, "%s\n%s", am.DirectAddrs(), expectedAllAddrs)
+ }, 5*time.Second, 50*time.Millisecond)
+
+ // add public addrs
+ am.PushReachability(network.ReachabilityPublic)
+
+ expectedAddrs = expectedAllAddrs
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ assert.ElementsMatch(collect, am.Addrs(), expectedAddrs, "%s\n%s", am.Addrs(), expectedAddrs)
+ assert.ElementsMatch(collect, am.DirectAddrs(), expectedAllAddrs, "%s\n%s", am.DirectAddrs(), expectedAllAddrs)
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+
+ t.Run("addrs factory gets relay addrs", func(t *testing.T) {
+ relayAddr := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/p2p/QmdXGaeGiVA745XorV1jr11RHxB9z4fqykm6xCUPX1aTJo/p2p-circuit")
+ publicQUIC2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ AddrsFactory: func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ for _, a := range addrs {
+ if a.Equal(relayAddr) {
+ return []ma.Multiaddr{publicQUIC2}
+ }
+ }
+ return nil
+ },
+ ObservedAddrsManager: &mockObservedAddrs{
+ AddrsForFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{publicQUIC}
+ },
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+ am.PushReachability(network.ReachabilityPrivate)
+ am.PushRelay([]ma.Multiaddr{relayAddr})
+
+ expectedAddrs := []ma.Multiaddr{publicQUIC2}
+ expectedAllAddrs := []ma.Multiaddr{publicQUIC, lhquic, lhtcp}
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ assert.ElementsMatch(collect, am.Addrs(), expectedAddrs, "%s\n%s", am.Addrs(), expectedAddrs)
+ assert.ElementsMatch(collect, am.DirectAddrs(), expectedAllAddrs, "%s\n%s", am.DirectAddrs(), expectedAllAddrs)
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+
+ t.Run("updates addresses on signaling", func(t *testing.T) {
+ updateChan := make(chan struct{})
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ AddrsFactory: func(_ []ma.Multiaddr) []ma.Multiaddr {
+ select {
+ case <-updateChan:
+ return []ma.Multiaddr{publicQUIC}
+ default:
+ return []ma.Multiaddr{publicTCP}
+ }
+ },
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+ require.Contains(t, am.Addrs(), publicTCP)
+ require.NotContains(t, am.Addrs(), publicQUIC)
+ close(updateChan)
+ am.updateAddrsSync()
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ assert.Contains(collect, am.Addrs(), publicQUIC)
+ assert.NotContains(collect, am.Addrs(), publicTCP)
+ }, 1*time.Second, 50*time.Millisecond)
+ })
+
+ t.Run("addrs factory depends on confirmed addrs", func(t *testing.T) {
+ var amp atomic.Pointer[addrsManager]
+ q1 := ma.StringCast("/ip4/1.1.1.1/udp/1/quic-v1")
+ addrsFactory := func(_ []ma.Multiaddr) []ma.Multiaddr {
+ if amp.Load() == nil {
+ return nil
+ }
+ // r is empty as there's no reachability tracker
+ r, _, _ := amp.Load().ConfirmedAddrs()
+ return append(r, q1)
+ }
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ AddrsFactory: addrsFactory,
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{lhquic, lhtcp} },
+ })
+ amp.Store(am.addrsManager)
+ am.updateAddrsSync()
+ matest.AssertEqualMultiaddrs(t, []ma.Multiaddr{q1}, am.Addrs())
+ })
+}
+
+func TestAddrsManagerReachabilityEvent(t *testing.T) {
+ publicQUIC, _ := ma.NewMultiaddr("/ip4/1.2.3.4/udp/1234/quic-v1")
+ publicQUIC2, _ := ma.NewMultiaddr("/ip4/1.2.3.4/udp/1235/quic-v1")
+ publicTCP, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
+
+ bus := eventbus.NewBus()
+
+ sub, err := bus.Subscribe(new(event.EvtHostReachableAddrsChanged))
+ require.NoError(t, err)
+ defer sub.Close()
+
+ am := newAddrsManagerTestCase(t, addrsManagerArgs{
+ Bus: bus,
+ // currently they aren't being passed to the reachability tracker
+ ListenAddrs: func() []ma.Multiaddr { return []ma.Multiaddr{publicQUIC, publicQUIC2, publicTCP} },
+ AutoNATClient: mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ if reqs[0].Addr.Equal(publicQUIC) {
+ return autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil
+ } else if reqs[0].Addr.Equal(publicTCP) || reqs[0].Addr.Equal(publicQUIC2) {
+ return autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPrivate}, nil
+ }
+ return autonatv2.Result{}, errors.New("invalid")
+ },
+ },
+ })
+
+ initialUnknownAddrs := []ma.Multiaddr{publicQUIC, publicTCP, publicQUIC2}
+
+ // First event: all addresses are initially unknown
+ select {
+ case e := <-sub.Out():
+ evt := e.(event.EvtHostReachableAddrsChanged)
+ require.Empty(t, evt.Reachable)
+ require.Empty(t, evt.Unreachable)
+ require.ElementsMatch(t, initialUnknownAddrs, evt.Unknown)
+ case <-time.After(5 * time.Second):
+ t.Fatal("expected initial event for reachability change")
+ }
+
+ // Wait for probes to complete and addresses to be classified
+ reachableAddrs := []ma.Multiaddr{publicQUIC}
+ unreachableAddrs := []ma.Multiaddr{publicTCP, publicQUIC2}
+ select {
+ case e := <-sub.Out():
+ evt := e.(event.EvtHostReachableAddrsChanged)
+ require.ElementsMatch(t, reachableAddrs, evt.Reachable)
+ require.ElementsMatch(t, unreachableAddrs, evt.Unreachable)
+ require.Empty(t, evt.Unknown)
+ reachable, unreachable, unknown := am.ConfirmedAddrs()
+ require.ElementsMatch(t, reachable, reachableAddrs)
+ require.ElementsMatch(t, unreachable, unreachableAddrs)
+ require.Empty(t, unknown)
+ case <-time.After(5 * time.Second):
+ t.Fatal("expected final event for reachability change after probing")
+ }
+}
+
+func TestRemoveIfNotInSource(t *testing.T) {
+ var addrs []ma.Multiaddr
+ for i := 0; i < 10; i++ {
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)))
+ }
+ slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ cases := []struct {
+ addrs []ma.Multiaddr
+ source []ma.Multiaddr
+ expected []ma.Multiaddr
+ }{
+ {},
+ {addrs: slices.Clone(addrs[:5]), source: nil, expected: nil},
+ {addrs: nil, source: addrs, expected: nil},
+ {addrs: []ma.Multiaddr{addrs[0]}, source: []ma.Multiaddr{addrs[0]}, expected: []ma.Multiaddr{addrs[0]}},
+ {addrs: slices.Clone(addrs), source: []ma.Multiaddr{addrs[0]}, expected: []ma.Multiaddr{addrs[0]}},
+ {addrs: slices.Clone(addrs), source: slices.Clone(addrs[5:]), expected: slices.Clone(addrs[5:])},
+ {addrs: slices.Clone(addrs[:5]), source: []ma.Multiaddr{addrs[0], addrs[2], addrs[8]}, expected: []ma.Multiaddr{addrs[0], addrs[2]}},
+ }
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ addrs := removeNotInSource(tc.addrs, tc.source)
+ require.ElementsMatch(t, tc.expected, addrs, "%s\n%s", tc.expected, tc.addrs)
+ })
+ }
+}
+
+func BenchmarkAreAddrsDifferent(b *testing.B) {
+ var addrs [10]ma.Multiaddr
+ for i := 0; i < len(addrs); i++ {
+ addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1", i))
+ }
+ b.Run("areAddrsDifferent", func(b *testing.B) {
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ areAddrsDifferent(addrs[:], addrs[:])
+ }
+ })
+}
+
+func BenchmarkRemoveIfNotInSource(b *testing.B) {
+ var addrs [10]ma.Multiaddr
+ for i := 0; i < len(addrs); i++ {
+ addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1", i))
+ }
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ removeNotInSource(slices.Clone(addrs[:5]), addrs[:])
+ }
+}
diff --git a/p2p/host/basic/addrs_metrics.go b/p2p/host/basic/addrs_metrics.go
new file mode 100644
index 0000000000..6c04a6b362
--- /dev/null
+++ b/p2p/host/basic/addrs_metrics.go
@@ -0,0 +1,154 @@
+package basichost
+
+import (
+ "maps"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ ma "github.com/multiformats/go-multiaddr"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_host_addrs"
+
+var (
+ reachableAddrs = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "reachable",
+ Help: "Number of reachable addresses by transport",
+ },
+ []string{"ipv", "transport"},
+ )
+ unreachableAddrs = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "unreachable",
+ Help: "Number of unreachable addresses by transport",
+ },
+ []string{"ipv", "transport"},
+ )
+ unknownAddrs = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "unknown",
+ Help: "Number of addresses with unknown reachability by transport",
+ },
+ []string{"ipv", "transport"},
+ )
+ collectors = []prometheus.Collector{
+ reachableAddrs,
+ unreachableAddrs,
+ unknownAddrs,
+ }
+)
+
+// MetricsTracker tracks autonatv2 reachability metrics
+type MetricsTracker interface {
+ // ConfirmedAddrsChanged updates metrics with current address reachability status
+ ConfirmedAddrsChanged(reachable, unreachable, unknown []ma.Multiaddr)
+ // ReachabilityTrackerClosed updated metrics on host close
+ ReachabilityTrackerClosed()
+}
+
+type metricsTracker struct {
+ prevReachableCounts map[metricKey]int
+ prevUnreachableCounts map[metricKey]int
+ prevUnknownCounts map[metricKey]int
+ currentReachable map[metricKey]int
+ currentUnreachable map[metricKey]int
+ currentUnknown map[metricKey]int
+}
+
+var _ MetricsTracker = &metricsTracker{}
+
+type metricsTrackerSetting struct {
+ reg prometheus.Registerer
+}
+
+type metricsTrackerOption func(*metricsTrackerSetting)
+
+// withRegisterer sets the prometheus registerer for the metrics
+func withRegisterer(reg prometheus.Registerer) metricsTrackerOption {
+ return func(s *metricsTrackerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+type metricKey struct {
+ ipv string
+ transport string
+}
+
+// newMetricsTracker creates a new metrics tracker for autonatv2
+func newMetricsTracker(opts ...metricsTrackerOption) MetricsTracker {
+ setting := &metricsTrackerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracker{
+ prevReachableCounts: make(map[metricKey]int),
+ prevUnreachableCounts: make(map[metricKey]int),
+ prevUnknownCounts: make(map[metricKey]int),
+ currentReachable: make(map[metricKey]int),
+ currentUnreachable: make(map[metricKey]int),
+ currentUnknown: make(map[metricKey]int),
+ }
+}
+
+func (t *metricsTracker) ReachabilityTrackerClosed() {
+ resetMetric(reachableAddrs, t.currentReachable, t.prevReachableCounts)
+ resetMetric(unreachableAddrs, t.currentUnreachable, t.prevUnreachableCounts)
+ resetMetric(unknownAddrs, t.currentUnknown, t.prevUnknownCounts)
+}
+
+// ConfirmedAddrsChanged updates the metrics with current address reachability counts by transport
+func (t *metricsTracker) ConfirmedAddrsChanged(reachable, unreachable, unknown []ma.Multiaddr) {
+ updateMetric(reachableAddrs, reachable, t.currentReachable, t.prevReachableCounts)
+ updateMetric(unreachableAddrs, unreachable, t.currentUnreachable, t.prevUnreachableCounts)
+ updateMetric(unknownAddrs, unknown, t.currentUnknown, t.prevUnknownCounts)
+}
+
+func updateMetric(metric *prometheus.GaugeVec, addrs []ma.Multiaddr, current map[metricKey]int, prev map[metricKey]int) {
+ clear(prev)
+ maps.Copy(prev, current)
+ clear(current)
+ for _, addr := range addrs {
+ transport := metricshelper.GetTransport(addr)
+ ipv := metricshelper.GetIPVersion(addr)
+ key := metricKey{ipv: ipv, transport: transport}
+ current[key]++
+ }
+
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ for k, v := range current {
+ *tags = append(*tags, k.ipv, k.transport)
+ metric.WithLabelValues(*tags...).Set(float64(v))
+ *tags = (*tags)[:0]
+ }
+ for k := range prev {
+ if _, ok := current[k]; ok {
+ continue
+ }
+ *tags = append(*tags, k.ipv, k.transport)
+ metric.WithLabelValues(*tags...).Set(0)
+ *tags = (*tags)[:0]
+ }
+}
+
+func resetMetric(metric *prometheus.GaugeVec, current map[metricKey]int, prev map[metricKey]int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ for k := range current {
+ *tags = append(*tags, k.ipv, k.transport)
+ metric.WithLabelValues(*tags...).Set(0)
+ *tags = (*tags)[:0]
+ }
+ clear(current)
+ clear(prev)
+}
diff --git a/p2p/host/basic/addrs_metrics_test.go b/p2p/host/basic/addrs_metrics_test.go
new file mode 100644
index 0000000000..7022ea35b9
--- /dev/null
+++ b/p2p/host/basic/addrs_metrics_test.go
@@ -0,0 +1,46 @@
+//go:build nocover
+
+package basichost
+
+import (
+ "math/rand"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ addrs := []ma.Multiaddr{
+ ma.StringCast("/ip4/1.2.3.4/tcp/1"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/2"),
+ ma.StringCast("/ip4/1.2.3.4/udp/2345/quic"),
+ ma.StringCast("/ip4/1.2.3.4/udp/2346/webrtc-direct"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/80/ws"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/443/wss"),
+ ma.StringCast("/ip4/1.2.3.4/udp/443/quic-v1/webtransport"),
+ }
+
+ randAddrs := func() []ma.Multiaddr {
+ n := rand.Intn(len(addrs))
+ k := n + rand.Intn(len(addrs)-n)
+ return addrs[n:k]
+ }
+
+ mt := newMetricsTracker(withRegisterer(prometheus.DefaultRegisterer))
+ tests := map[string]func(){
+ "ConfirmedAddrsChanged": func() {
+ mt.ConfirmedAddrsChanged(randAddrs(), randAddrs(), randAddrs())
+ },
+ "ReachabilityTrackerClosed": func() {
+ mt.ReachabilityTrackerClosed()
+ },
+ }
+
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+ if allocs > 0 {
+ t.Fatalf("Alloc Test: %s, got: %0.2f, expected: 0 allocs", method, allocs)
+ }
+ }
+}
diff --git a/p2p/host/basic/addrs_reachability_tracker.go b/p2p/host/basic/addrs_reachability_tracker.go
new file mode 100644
index 0000000000..cf75ae3d98
--- /dev/null
+++ b/p2p/host/basic/addrs_reachability_tracker.go
@@ -0,0 +1,680 @@
+package basichost
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "slices"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type autonatv2Client interface {
+ GetReachability(ctx context.Context, reqs []autonatv2.Request) (autonatv2.Result, error)
+}
+
+const (
+
+ // maxAddrsPerRequest is the maximum number of addresses to probe in a single request
+ maxAddrsPerRequest = 10
+ // maxTrackedAddrs is the maximum number of addresses to track
+ // 10 addrs per transport for 5 transports
+ maxTrackedAddrs = 50
+ // defaultMaxConcurrency is the default number of concurrent workers for reachability checks
+ defaultMaxConcurrency = 5
+ // newAddrsProbeDelay is the delay before probing new addr's reachability.
+ newAddrsProbeDelay = 1 * time.Second
+)
+
+// addrsReachabilityTracker tracks reachability for addresses.
+// Use UpdateAddrs to provide addresses for tracking reachability.
+// reachabilityUpdateCh is notified when reachability for any of the tracked address changes.
+type addrsReachabilityTracker struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ client autonatv2Client
+ // reachabilityUpdateCh is used to notify when reachability may have changed
+ reachabilityUpdateCh chan struct{}
+ maxConcurrency int
+ newAddrsProbeDelay time.Duration
+ probeManager *probeManager
+ newAddrs chan []ma.Multiaddr
+ clock clock.Clock
+ metricsTracker MetricsTracker
+
+ mx sync.Mutex
+ reachableAddrs []ma.Multiaddr
+ unreachableAddrs []ma.Multiaddr
+ unknownAddrs []ma.Multiaddr
+}
+
+// newAddrsReachabilityTracker returns a new addrsReachabilityTracker.
+// reachabilityUpdateCh is notified when reachability for any of the tracked address changes.
+func newAddrsReachabilityTracker(client autonatv2Client, reachabilityUpdateCh chan struct{}, cl clock.Clock, metricsTracker MetricsTracker) *addrsReachabilityTracker {
+ ctx, cancel := context.WithCancel(context.Background())
+ if cl == nil {
+ cl = clock.New()
+ }
+ return &addrsReachabilityTracker{
+ ctx: ctx,
+ cancel: cancel,
+ client: client,
+ reachabilityUpdateCh: reachabilityUpdateCh,
+ probeManager: newProbeManager(cl.Now),
+ newAddrsProbeDelay: newAddrsProbeDelay,
+ maxConcurrency: defaultMaxConcurrency,
+ newAddrs: make(chan []ma.Multiaddr, 1),
+ clock: cl,
+ metricsTracker: metricsTracker,
+ }
+}
+
+func (r *addrsReachabilityTracker) UpdateAddrs(addrs []ma.Multiaddr) {
+ select {
+ case r.newAddrs <- slices.Clone(addrs):
+ case <-r.ctx.Done():
+ }
+}
+
+func (r *addrsReachabilityTracker) ConfirmedAddrs() (reachableAddrs, unreachableAddrs, unknownAddrs []ma.Multiaddr) {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+ return slices.Clone(r.reachableAddrs), slices.Clone(r.unreachableAddrs), slices.Clone(r.unknownAddrs)
+}
+
+func (r *addrsReachabilityTracker) Start() error {
+ r.wg.Add(1)
+ go r.background()
+ return nil
+}
+
+func (r *addrsReachabilityTracker) Close() error {
+ r.cancel()
+ r.wg.Wait()
+ return nil
+}
+
+const (
+ // defaultReachabilityRefreshInterval is the default interval to refresh reachability.
+ // In steady state, we check for any required probes every refresh interval.
+ // This doesn't mean we'll probe for any particular address, only that we'll check
+ // if any address needs to be probed.
+ defaultReachabilityRefreshInterval = 5 * time.Minute
+ // maxBackoffInterval is the maximum back off in case we're unable to probe for reachability.
+ // We may be unable to confirm addresses in case there are no valid peers with autonatv2
+ // or the autonatv2 subsystem is consistently erroring.
+ maxBackoffInterval = 5 * time.Minute
+ // backoffStartInterval is the initial back off in case we're unable to probe for reachability.
+ backoffStartInterval = 5 * time.Second
+)
+
+func (r *addrsReachabilityTracker) background() {
+ defer r.wg.Done()
+
+ // probeTicker is used to trigger probes at regular intervals
+ probeTicker := r.clock.Ticker(defaultReachabilityRefreshInterval)
+ defer probeTicker.Stop()
+
+ // probeTimer is used to trigger probes at specific times
+ probeTimer := r.clock.Timer(time.Duration(math.MaxInt64))
+ defer probeTimer.Stop()
+ nextProbeTime := time.Time{}
+
+ var task reachabilityTask
+ var backoffInterval time.Duration
+ var currReachable, currUnreachable, currUnknown, prevReachable, prevUnreachable, prevUnknown []ma.Multiaddr
+ for {
+ select {
+ case <-probeTicker.C:
+ // don't start a probe if we have a scheduled probe
+ if task.BackoffCh == nil && nextProbeTime.IsZero() {
+ task = r.refreshReachability()
+ }
+ case <-probeTimer.C:
+ if task.BackoffCh == nil {
+ task = r.refreshReachability()
+ }
+ nextProbeTime = time.Time{}
+ case backoff := <-task.BackoffCh:
+ task = reachabilityTask{}
+ // On completion, start the next probe immediately, or wait for backoff.
+ // In case there are no further probes, the reachability tracker will return an empty task,
+ // which hangs forever. Eventually, we'll refresh again when the ticker fires.
+ if backoff {
+ backoffInterval = newBackoffInterval(backoffInterval)
+ } else {
+ backoffInterval = -1 * time.Second // negative to trigger next probe immediately
+ }
+ nextProbeTime = r.clock.Now().Add(backoffInterval)
+ case addrs := <-r.newAddrs:
+ if task.BackoffCh != nil { // cancel running task.
+ task.Cancel()
+ <-task.BackoffCh // ignore backoff from cancelled task
+ task = reachabilityTask{}
+ }
+ r.updateTrackedAddrs(addrs)
+ newAddrsNextTime := r.clock.Now().Add(r.newAddrsProbeDelay)
+ if nextProbeTime.Before(newAddrsNextTime) {
+ nextProbeTime = newAddrsNextTime
+ }
+ case <-r.ctx.Done():
+ if task.BackoffCh != nil {
+ task.Cancel()
+ <-task.BackoffCh
+ task = reachabilityTask{}
+ }
+ if r.metricsTracker != nil {
+ r.metricsTracker.ReachabilityTrackerClosed()
+ }
+ return
+ }
+
+ currReachable, currUnreachable, currUnknown = r.appendConfirmedAddrs(currReachable[:0], currUnreachable[:0], currUnknown[:0])
+ if areAddrsDifferent(prevReachable, currReachable) || areAddrsDifferent(prevUnreachable, currUnreachable) || areAddrsDifferent(prevUnknown, currUnknown) {
+ if r.metricsTracker != nil {
+ r.metricsTracker.ConfirmedAddrsChanged(currReachable, currUnreachable, currUnknown)
+ }
+ r.notify()
+ }
+ prevReachable = append(prevReachable[:0], currReachable...)
+ prevUnreachable = append(prevUnreachable[:0], currUnreachable...)
+ prevUnknown = append(prevUnknown[:0], currUnknown...)
+ if !nextProbeTime.IsZero() {
+ probeTimer.Reset(nextProbeTime.Sub(r.clock.Now()))
+ }
+ }
+}
+
+func newBackoffInterval(current time.Duration) time.Duration {
+ if current <= 0 {
+ return backoffStartInterval
+ }
+ current *= 2
+ if current > maxBackoffInterval {
+ return maxBackoffInterval
+ }
+ return current
+}
+
+func (r *addrsReachabilityTracker) appendConfirmedAddrs(reachable, unreachable, unknown []ma.Multiaddr) (reachableAddrs, unreachableAddrs, unknownAddrs []ma.Multiaddr) {
+ reachable, unreachable, unknown = r.probeManager.AppendConfirmedAddrs(reachable, unreachable, unknown)
+ r.mx.Lock()
+ r.reachableAddrs = append(r.reachableAddrs[:0], reachable...)
+ r.unreachableAddrs = append(r.unreachableAddrs[:0], unreachable...)
+ r.unknownAddrs = append(r.unknownAddrs[:0], unknown...)
+ r.mx.Unlock()
+
+ return reachable, unreachable, unknown
+}
+
+func (r *addrsReachabilityTracker) notify() {
+ select {
+ case r.reachabilityUpdateCh <- struct{}{}:
+ default:
+ }
+}
+
+func (r *addrsReachabilityTracker) updateTrackedAddrs(addrs []ma.Multiaddr) {
+ addrs = slices.DeleteFunc(addrs, func(a ma.Multiaddr) bool {
+ return !manet.IsPublicAddr(a)
+ })
+ if len(addrs) > maxTrackedAddrs {
+ log.Error("too many addresses for addrs reachability tracker; dropping some", "total", len(addrs), "max", maxTrackedAddrs, "dropping", len(addrs)-maxTrackedAddrs)
+ addrs = addrs[:maxTrackedAddrs]
+ }
+ r.probeManager.UpdateAddrs(addrs)
+}
+
+type probe = []autonatv2.Request
+
+const probeTimeout = 30 * time.Second
+
+// reachabilityTask is a task to refresh reachability.
+// Waiting on the zero value blocks forever.
+type reachabilityTask struct {
+ Cancel context.CancelFunc
+ // BackoffCh returns whether the caller should backoff before
+ // refreshing reachability
+ BackoffCh chan bool
+}
+
+func (r *addrsReachabilityTracker) refreshReachability() reachabilityTask {
+ if len(r.probeManager.GetProbe()) == 0 {
+ return reachabilityTask{}
+ }
+ resCh := make(chan bool, 1)
+ ctx, cancel := context.WithTimeout(r.ctx, 5*time.Minute)
+ r.wg.Add(1)
+ // We run probes provided by addrsTracker. It stops probing when any
+ // of the following happens:
+ // - there are no more probes to run
+ // - context is completed
+ // - there are too many consecutive failures from the client
+ // - the client has no valid peers to probe
+ go func() {
+ defer r.wg.Done()
+ defer cancel()
+ client := &errCountingClient{autonatv2Client: r.client, MaxConsecutiveErrors: maxConsecutiveErrors}
+ var backoff atomic.Bool
+ var wg sync.WaitGroup
+ wg.Add(r.maxConcurrency)
+ for range r.maxConcurrency {
+ go func() {
+ defer wg.Done()
+ for {
+ if ctx.Err() != nil {
+ return
+ }
+ reqs := r.probeManager.GetProbe()
+ if len(reqs) == 0 {
+ return
+ }
+ r.probeManager.MarkProbeInProgress(reqs)
+ rctx, cancel := context.WithTimeout(ctx, probeTimeout)
+ res, err := client.GetReachability(rctx, reqs)
+ cancel()
+ r.probeManager.CompleteProbe(reqs, res, err)
+ if isErrorPersistent(err) {
+ backoff.Store(true)
+ return
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ resCh <- backoff.Load()
+ }()
+ return reachabilityTask{Cancel: cancel, BackoffCh: resCh}
+}
+
+var errTooManyConsecutiveFailures = errors.New("too many consecutive failures")
+
+// errCountingClient counts errors from autonatv2Client and wraps the errors in response with a
+// errTooManyConsecutiveFailures in case of persistent failures from autonatv2 module.
+type errCountingClient struct {
+ autonatv2Client
+ MaxConsecutiveErrors int
+ mx sync.Mutex
+ consecutiveErrors int
+}
+
+func (c *errCountingClient) GetReachability(ctx context.Context, reqs probe) (autonatv2.Result, error) {
+ res, err := c.autonatv2Client.GetReachability(ctx, reqs)
+ c.mx.Lock()
+ defer c.mx.Unlock()
+ if err != nil && !errors.Is(err, context.Canceled) { // ignore canceled errors, they're not errors from autonatv2
+ c.consecutiveErrors++
+ if c.consecutiveErrors > c.MaxConsecutiveErrors {
+ err = fmt.Errorf("%w:%w", errTooManyConsecutiveFailures, err)
+ }
+ if errors.Is(err, autonatv2.ErrPrivateAddrs) {
+ log.Error("private IP addr in autonatv2 request", "err", err)
+ }
+ } else {
+ c.consecutiveErrors = 0
+ }
+ return res, err
+}
+
+const maxConsecutiveErrors = 20
+
+// isErrorPersistent returns whether the error will repeat on future probes for a while
+func isErrorPersistent(err error) bool {
+ if err == nil {
+ return false
+ }
+ return errors.Is(err, autonatv2.ErrPrivateAddrs) || errors.Is(err, autonatv2.ErrNoPeers) ||
+ errors.Is(err, errTooManyConsecutiveFailures)
+}
+
+const (
+ // recentProbeInterval is the interval to probe addresses that have been refused
+ // these are generally addresses with newer transports for which we don't have many peers
+ // capable of dialing the transport
+ recentProbeInterval = 10 * time.Minute
+ // maxConsecutiveRefusals is the maximum number of consecutive refusals for an address after which
+ // we wait for `recentProbeInterval` before probing again
+ maxConsecutiveRefusals = 5
+ // maxRecentDialsPerAddr is the maximum number of dials on an address before we stop probing for the address.
+ // This is used to prevent infinite probing of an address whose status is indeterminate for any reason.
+ maxRecentDialsPerAddr = 10
+ // confidence is the absolute difference between the number of successes and failures for an address
+ // targetConfidence is the confidence threshold for an address after which we wait for `maxProbeInterval`
+ // before probing again.
+ targetConfidence = 3
+ // minConfidence is the confidence threshold for an address to be considered reachable or unreachable
+ // confidence is the absolute difference between the number of successes and failures for an address
+ minConfidence = 2
+ // maxRecentDialsWindow is the maximum number of recent probe results to consider for a single address
+ //
+ // +2 allows for 1 invalid probe result. Consider a string of successes, after which we have a single failure
+ // and then a success(...S S S S F S). The confidence in the targetConfidence window will be equal to
+ // targetConfidence, the last F and S cancel each other, and we won't probe again for maxProbeInterval.
+ maxRecentDialsWindow = targetConfidence + 2
+ // highConfidenceAddrProbeInterval is the maximum interval between probes for an address
+ highConfidenceAddrProbeInterval = 1 * time.Hour
+ // maxProbeResultTTL is the maximum time to keep probe results for an address
+ maxProbeResultTTL = maxRecentDialsWindow * highConfidenceAddrProbeInterval
+)
+
+// probeManager tracks reachability for a set of addresses by periodically probing reachability with autonatv2.
+// A Probe is a list of addresses which can be tested for reachability with autonatv2.
+// This struct decides the priority order of addresses for testing reachability, and throttles in case there have
+// been too many probes for an address in the `ProbeInterval`.
+//
+// Use the `runProbes` function to execute the probes with an autonatv2 client.
+type probeManager struct {
+ now func() time.Time
+
+ mx sync.Mutex
+ inProgressProbes map[string]int // addr -> count
+ inProgressProbesTotal int
+ statuses map[string]*addrStatus
+ addrs []ma.Multiaddr
+}
+
+// newProbeManager creates a new probe manager.
+func newProbeManager(now func() time.Time) *probeManager {
+ return &probeManager{
+ statuses: make(map[string]*addrStatus),
+ inProgressProbes: make(map[string]int),
+ now: now,
+ }
+}
+
+// AppendConfirmedAddrs appends the current confirmed reachable and unreachable addresses.
+func (m *probeManager) AppendConfirmedAddrs(reachable, unreachable, unknown []ma.Multiaddr) (reachableAddrs, unreachableAddrs, unknownAddrs []ma.Multiaddr) {
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ for _, a := range m.addrs {
+ s := m.statuses[string(a.Bytes())]
+ s.RemoveBefore(m.now().Add(-maxProbeResultTTL)) // cleanup stale results
+ switch s.Reachability() {
+ case network.ReachabilityPublic:
+ reachable = append(reachable, a)
+ case network.ReachabilityPrivate:
+ unreachable = append(unreachable, a)
+ case network.ReachabilityUnknown:
+ unknown = append(unknown, a)
+ }
+ }
+ return reachable, unreachable, unknown
+}
+
+// UpdateAddrs updates the tracked addrs
+func (m *probeManager) UpdateAddrs(addrs []ma.Multiaddr) {
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ statuses := make(map[string]*addrStatus, len(addrs))
+ for _, addr := range addrs {
+ k := string(addr.Bytes())
+ if _, ok := m.statuses[k]; !ok {
+ statuses[k] = &addrStatus{Addr: addr}
+ } else {
+ statuses[k] = m.statuses[k]
+ }
+ }
+ m.addrs = addrs
+ m.statuses = statuses
+}
+
+// GetProbe returns the next probe. Returns zero value in case there are no more probes.
+// Probes that are run against an autonatv2 client should be marked in progress with
+// `MarkProbeInProgress` before running.
+func (m *probeManager) GetProbe() probe {
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ now := m.now()
+ for i, a := range m.addrs {
+ ab := a.Bytes()
+ pc := m.statuses[string(ab)].RequiredProbeCount(now)
+ if m.inProgressProbes[string(ab)] >= pc {
+ continue
+ }
+ reqs := make(probe, 0, maxAddrsPerRequest)
+ reqs = append(reqs, autonatv2.Request{Addr: a, SendDialData: true})
+ // We have the first(primary) address. Append other addresses, ignoring inprogress probes
+ // on secondary addresses. The expectation is that the primary address will
+ // be dialed.
+ for j := 1; j < len(m.addrs); j++ {
+ k := (i + j) % len(m.addrs)
+ ab := m.addrs[k].Bytes()
+ pc := m.statuses[string(ab)].RequiredProbeCount(now)
+ if pc == 0 {
+ continue
+ }
+ reqs = append(reqs, autonatv2.Request{Addr: m.addrs[k], SendDialData: true})
+ if len(reqs) >= maxAddrsPerRequest {
+ break
+ }
+ }
+ return reqs
+ }
+ return nil
+}
+
+// MarkProbeInProgress should be called when a probe is started.
+// All in progress probes *MUST* be completed with `CompleteProbe`
+func (m *probeManager) MarkProbeInProgress(reqs probe) {
+ if len(reqs) == 0 {
+ return
+ }
+ m.mx.Lock()
+ defer m.mx.Unlock()
+ m.inProgressProbes[string(reqs[0].Addr.Bytes())]++
+ m.inProgressProbesTotal++
+}
+
+// InProgressProbes returns the number of probes that are currently in progress.
+func (m *probeManager) InProgressProbes() int {
+ m.mx.Lock()
+ defer m.mx.Unlock()
+ return m.inProgressProbesTotal
+}
+
+// CompleteProbe should be called when a probe completes.
+func (m *probeManager) CompleteProbe(reqs probe, res autonatv2.Result, err error) {
+ now := m.now()
+
+ if len(reqs) == 0 {
+ // should never happen
+ return
+ }
+
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ // decrement in-progress count for the first address
+ primaryAddrKey := string(reqs[0].Addr.Bytes())
+ m.inProgressProbes[primaryAddrKey]--
+ if m.inProgressProbes[primaryAddrKey] <= 0 {
+ delete(m.inProgressProbes, primaryAddrKey)
+ }
+ m.inProgressProbesTotal--
+
+ // nothing to do if the request errored.
+ if err != nil {
+ return
+ }
+
+ // Consider only primary address as refused. This increases the number of
+ // refused probes, but refused probes are cheap for a server as no dials are made.
+ if res.AllAddrsRefused {
+ if s, ok := m.statuses[primaryAddrKey]; ok {
+ s.AddRefusal(now)
+ }
+ return
+ }
+ dialAddrKey := string(res.Addr.Bytes())
+ if dialAddrKey != primaryAddrKey {
+ if s, ok := m.statuses[primaryAddrKey]; ok {
+ s.AddRefusal(now)
+ }
+ }
+
+ // record the result for the dialed address
+ if s, ok := m.statuses[dialAddrKey]; ok {
+ s.AddOutcome(now, res.Reachability, maxRecentDialsWindow)
+ }
+}
+
+type dialOutcome struct {
+ Success bool
+ At time.Time
+}
+
+type addrStatus struct {
+ Addr ma.Multiaddr
+ lastRefusalTime time.Time
+ consecutiveRefusals int
+ dialTimes []time.Time
+ outcomes []dialOutcome
+}
+
+func (s *addrStatus) Reachability() network.Reachability {
+ rch, _, _ := s.reachabilityAndCounts()
+ return rch
+}
+
+func (s *addrStatus) RequiredProbeCount(now time.Time) int {
+ if s.consecutiveRefusals >= maxConsecutiveRefusals {
+ if now.Sub(s.lastRefusalTime) < recentProbeInterval {
+ return 0
+ }
+ // reset every `recentProbeInterval`
+ s.lastRefusalTime = time.Time{}
+ s.consecutiveRefusals = 0
+ }
+
+ // Don't probe if we have probed too many times recently
+ rd := s.recentDialCount(now)
+ if rd >= maxRecentDialsPerAddr {
+ return 0
+ }
+
+ return s.requiredProbeCountForConfirmation(now)
+}
+
+func (s *addrStatus) requiredProbeCountForConfirmation(now time.Time) int {
+ reachability, successes, failures := s.reachabilityAndCounts()
+ confidence := successes - failures
+ if confidence < 0 {
+ confidence = -confidence
+ }
+ cnt := targetConfidence - confidence
+ if cnt > 0 {
+ return cnt
+ }
+ // we have enough confirmations; check if we should refresh
+
+ // Should never happen. The confidence logic above should require a few probes.
+ if len(s.outcomes) == 0 {
+ return 0
+ }
+ lastOutcome := s.outcomes[len(s.outcomes)-1]
+ // If the last probe result is old, we need to retest
+ if now.Sub(lastOutcome.At) > highConfidenceAddrProbeInterval {
+ return 1
+ }
+ // if the last probe result was different from reachability, probe again.
+ switch reachability {
+ case network.ReachabilityPublic:
+ if !lastOutcome.Success {
+ return 1
+ }
+ case network.ReachabilityPrivate:
+ if lastOutcome.Success {
+ return 1
+ }
+ default:
+ // this should never happen
+ return 1
+ }
+ return 0
+}
+
+func (s *addrStatus) AddRefusal(now time.Time) {
+ s.lastRefusalTime = now
+ s.consecutiveRefusals++
+}
+
+func (s *addrStatus) AddOutcome(at time.Time, rch network.Reachability, windowSize int) {
+ s.lastRefusalTime = time.Time{}
+ s.consecutiveRefusals = 0
+
+ s.dialTimes = append(s.dialTimes, at)
+ for i, t := range s.dialTimes {
+ if at.Sub(t) < recentProbeInterval {
+ s.dialTimes = slices.Delete(s.dialTimes, 0, i)
+ break
+ }
+ }
+
+ s.RemoveBefore(at.Add(-maxProbeResultTTL)) // remove old outcomes
+ success := false
+ switch rch {
+ case network.ReachabilityPublic:
+ success = true
+ case network.ReachabilityPrivate:
+ success = false
+ default:
+ return // don't store the outcome if reachability is unknown
+ }
+ s.outcomes = append(s.outcomes, dialOutcome{At: at, Success: success})
+ if len(s.outcomes) > windowSize {
+ s.outcomes = slices.Delete(s.outcomes, 0, len(s.outcomes)-windowSize)
+ }
+}
+
+// RemoveBefore removes outcomes before t
+func (s *addrStatus) RemoveBefore(t time.Time) {
+ end := 0
+ for ; end < len(s.outcomes); end++ {
+ if !s.outcomes[end].At.Before(t) {
+ break
+ }
+ }
+ s.outcomes = slices.Delete(s.outcomes, 0, end)
+}
+
+func (s *addrStatus) recentDialCount(now time.Time) int {
+ cnt := 0
+ for _, t := range slices.Backward(s.dialTimes) {
+ if now.Sub(t) > recentProbeInterval {
+ break
+ }
+ cnt++
+ }
+ return cnt
+}
+
+func (s *addrStatus) reachabilityAndCounts() (rch network.Reachability, successes int, failures int) {
+ for _, r := range s.outcomes {
+ if r.Success {
+ successes++
+ } else {
+ failures++
+ }
+ }
+ if successes-failures >= minConfidence {
+ return network.ReachabilityPublic, successes, failures
+ }
+ if failures-successes >= minConfidence {
+ return network.ReachabilityPrivate, successes, failures
+ }
+ return network.ReachabilityUnknown, successes, failures
+}
diff --git a/p2p/host/basic/addrs_reachability_tracker_test.go b/p2p/host/basic/addrs_reachability_tracker_test.go
new file mode 100644
index 0000000000..74f9828d2d
--- /dev/null
+++ b/p2p/host/basic/addrs_reachability_tracker_test.go
@@ -0,0 +1,942 @@
+package basichost
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math/rand"
+ "net"
+ "net/netip"
+ "slices"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestProbeManager(t *testing.T) {
+ pub1 := ma.StringCast("/ip4/1.1.1.1/tcp/1")
+ pub2 := ma.StringCast("/ip4/1.1.1.2/tcp/1")
+ pub3 := ma.StringCast("/ip4/1.1.1.3/tcp/1")
+
+ cl := clock.NewMock()
+
+ nextProbe := func(pm *probeManager) []autonatv2.Request {
+ reqs := pm.GetProbe()
+ if len(reqs) != 0 {
+ pm.MarkProbeInProgress(reqs)
+ }
+ return reqs
+ }
+
+ makeNewProbeManager := func(addrs []ma.Multiaddr) *probeManager {
+ pm := newProbeManager(cl.Now)
+ pm.UpdateAddrs(addrs)
+ return pm
+ }
+
+ t.Run("addrs updates", func(t *testing.T) {
+ pm := newProbeManager(cl.Now)
+ pm.UpdateAddrs([]ma.Multiaddr{pub1, pub2})
+ for {
+ reqs := nextProbe(pm)
+ if len(reqs) == 0 {
+ break
+ }
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil)
+ }
+ reachable, _, _ := pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Equal(t, reachable, []ma.Multiaddr{pub1, pub2})
+ pm.UpdateAddrs([]ma.Multiaddr{pub3})
+
+ reachable, _, _ = pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Empty(t, reachable)
+ require.Len(t, pm.statuses, 1)
+ })
+
+ t.Run("inprogress", func(t *testing.T) {
+ pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2})
+ reqs1 := pm.GetProbe()
+ reqs2 := pm.GetProbe()
+ require.Equal(t, reqs1, reqs2)
+ for range targetConfidence {
+ reqs := nextProbe(pm)
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub1, SendDialData: true}, {Addr: pub2, SendDialData: true}})
+ }
+ for range targetConfidence {
+ reqs := nextProbe(pm)
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub2, SendDialData: true}, {Addr: pub1, SendDialData: true}})
+ }
+ reqs := pm.GetProbe()
+ require.Empty(t, reqs)
+ })
+
+ t.Run("refusals", func(t *testing.T) {
+ pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2})
+ var probes [][]autonatv2.Request
+ for range targetConfidence {
+ reqs := nextProbe(pm)
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub1, SendDialData: true}, {Addr: pub2, SendDialData: true}})
+ probes = append(probes, reqs)
+ }
+ // first one refused second one successful
+ for _, p := range probes {
+ pm.CompleteProbe(p, autonatv2.Result{Addr: pub2, Idx: 1, Reachability: network.ReachabilityPublic}, nil)
+ }
+ // the second address is validated!
+ probes = nil
+ for range targetConfidence {
+ reqs := nextProbe(pm)
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub1, SendDialData: true}})
+ probes = append(probes, reqs)
+ }
+ reqs := pm.GetProbe()
+ require.Empty(t, reqs)
+ for _, p := range probes {
+ pm.CompleteProbe(p, autonatv2.Result{AllAddrsRefused: true}, nil)
+ }
+ // all requests refused; no more probes for too many refusals
+ reqs = pm.GetProbe()
+ require.Empty(t, reqs)
+
+ cl.Add(recentProbeInterval)
+ reqs = pm.GetProbe()
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub1, SendDialData: true}})
+ })
+
+ t.Run("successes", func(t *testing.T) {
+ pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2})
+ for j := 0; j < 2; j++ {
+ for i := 0; i < targetConfidence; i++ {
+ reqs := nextProbe(pm)
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil)
+ }
+ }
+ // all addrs confirmed
+ reqs := pm.GetProbe()
+ require.Empty(t, reqs)
+
+ cl.Add(highConfidenceAddrProbeInterval + time.Millisecond)
+ reqs = nextProbe(pm)
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub1, SendDialData: true}, {Addr: pub2, SendDialData: true}})
+ reqs = nextProbe(pm)
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub2, SendDialData: true}, {Addr: pub1, SendDialData: true}})
+ })
+
+ t.Run("throttling on indeterminate reachability", func(t *testing.T) {
+ pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2})
+ reachability := network.ReachabilityPublic
+ nextReachability := func() network.Reachability {
+ if reachability == network.ReachabilityPublic {
+ reachability = network.ReachabilityPrivate
+ } else {
+ reachability = network.ReachabilityPublic
+ }
+ return reachability
+ }
+ // both addresses are indeterminate
+ for range 2 * maxRecentDialsPerAddr {
+ reqs := nextProbe(pm)
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: nextReachability()}, nil)
+ }
+ reqs := pm.GetProbe()
+ require.Empty(t, reqs)
+
+ cl.Add(recentProbeInterval + time.Millisecond)
+ reqs = pm.GetProbe()
+ require.Equal(t, reqs, []autonatv2.Request{{Addr: pub1, SendDialData: true}, {Addr: pub2, SendDialData: true}})
+ for range 2 * maxRecentDialsPerAddr {
+ reqs := nextProbe(pm)
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: nextReachability()}, nil)
+ }
+ reqs = pm.GetProbe()
+ require.Empty(t, reqs)
+ })
+
+ t.Run("reachabilityUpdate", func(t *testing.T) {
+ pm := makeNewProbeManager([]ma.Multiaddr{pub1, pub2})
+ for range 2 * targetConfidence {
+ reqs := nextProbe(pm)
+ if reqs[0].Addr.Equal(pub1) {
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: pub1, Idx: 0, Reachability: network.ReachabilityPublic}, nil)
+ } else {
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: pub2, Idx: 0, Reachability: network.ReachabilityPrivate}, nil)
+ }
+ }
+
+ reachable, unreachable, _ := pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Equal(t, reachable, []ma.Multiaddr{pub1})
+ require.Equal(t, unreachable, []ma.Multiaddr{pub2})
+ })
+ t.Run("expiry", func(t *testing.T) {
+ pm := makeNewProbeManager([]ma.Multiaddr{pub1})
+ for range 2 * targetConfidence {
+ reqs := nextProbe(pm)
+ pm.CompleteProbe(reqs, autonatv2.Result{Addr: pub1, Idx: 0, Reachability: network.ReachabilityPublic}, nil)
+ }
+
+ reachable, unreachable, _ := pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Equal(t, reachable, []ma.Multiaddr{pub1})
+ require.Empty(t, unreachable)
+
+ cl.Add(maxProbeResultTTL + 1*time.Second)
+ reachable, unreachable, _ = pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Empty(t, reachable)
+ require.Empty(t, unreachable)
+ })
+}
+
+type mockAutoNATClient struct {
+ F func(context.Context, []autonatv2.Request) (autonatv2.Result, error)
+}
+
+func (m mockAutoNATClient) GetReachability(ctx context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ return m.F(ctx, reqs)
+}
+
+var _ autonatv2Client = mockAutoNATClient{}
+
+func TestAddrsReachabilityTracker(t *testing.T) {
+ pub1 := ma.StringCast("/ip4/1.1.1.1/tcp/1")
+ pub2 := ma.StringCast("/ip4/1.1.1.2/tcp/1")
+ pub3 := ma.StringCast("/ip4/1.1.1.3/tcp/1")
+ pri := ma.StringCast("/ip4/192.168.1.1/tcp/1")
+
+ assertFirstEvent := func(t *testing.T, tr *addrsReachabilityTracker, addrs []ma.Multiaddr) {
+ select {
+ case <-tr.reachabilityUpdateCh:
+ case <-time.After(200 * time.Millisecond):
+ t.Fatal("expected first event quickly")
+ }
+ reachable, unreachable, unknown := tr.ConfirmedAddrs()
+ require.Empty(t, reachable)
+ require.Empty(t, unreachable)
+ require.ElementsMatch(t, unknown, addrs, "%s %s", unknown, addrs)
+ }
+
+ newTracker := func(cli mockAutoNATClient, cl clock.Clock) *addrsReachabilityTracker {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ if cl == nil {
+ cl = clock.New()
+ }
+ tr := &addrsReachabilityTracker{
+ ctx: ctx,
+ cancel: cancel,
+ client: cli,
+ newAddrs: make(chan []ma.Multiaddr, 1),
+ reachabilityUpdateCh: make(chan struct{}, 1),
+ maxConcurrency: 3,
+ newAddrsProbeDelay: 0 * time.Second,
+ probeManager: newProbeManager(cl.Now),
+ clock: cl,
+ }
+ err := tr.Start()
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ err := tr.Close()
+ assert.NoError(t, err)
+ })
+ return tr
+ }
+
+ t.Run("simple", func(t *testing.T) {
+ // pub1 reachable, pub2 unreachable, pub3 ignored
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ for i, req := range reqs {
+ if req.Addr.Equal(pub1) {
+ return autonatv2.Result{Addr: pub1, Idx: i, Reachability: network.ReachabilityPublic}, nil
+ } else if req.Addr.Equal(pub2) {
+ return autonatv2.Result{Addr: pub2, Idx: i, Reachability: network.ReachabilityPrivate}, nil
+ }
+ }
+ return autonatv2.Result{AllAddrsRefused: true}, nil
+ },
+ }
+ tr := newTracker(mockClient, nil)
+ tr.UpdateAddrs([]ma.Multiaddr{pub2, pub1, pri})
+ assertFirstEvent(t, tr, []ma.Multiaddr{pub1, pub2})
+
+ select {
+ case <-tr.reachabilityUpdateCh:
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected reachability update")
+ }
+ reachable, unreachable, unknown := tr.ConfirmedAddrs()
+ require.Equal(t, reachable, []ma.Multiaddr{pub1}, "%s %s", reachable, pub1)
+ require.Equal(t, unreachable, []ma.Multiaddr{pub2}, "%s %s", unreachable, pub2)
+ require.Empty(t, unknown)
+
+ tr.UpdateAddrs([]ma.Multiaddr{pub3, pub1, pub2, pri})
+ select {
+ case <-tr.reachabilityUpdateCh:
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected reachability update")
+ }
+ reachable, unreachable, unknown = tr.ConfirmedAddrs()
+ t.Logf("Second probe - Reachable: %v, Unreachable: %v, Unknown: %v", reachable, unreachable, unknown)
+ require.Equal(t, reachable, []ma.Multiaddr{pub1}, "%s %s", reachable, pub1)
+ require.Equal(t, unreachable, []ma.Multiaddr{pub2}, "%s %s", unreachable, pub2)
+ require.Equal(t, unknown, []ma.Multiaddr{pub3}, "%s %s", unknown, pub3)
+ })
+
+ t.Run("confirmed addrs ordering", func(t *testing.T) {
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ return autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil
+ },
+ }
+ tr := newTracker(mockClient, nil)
+ var addrs []ma.Multiaddr
+ for i := 0; i < 10; i++ {
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.1.1.1/tcp/%d", i)))
+ }
+ slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return -a.Compare(b) }) // sort in reverse order
+ tr.UpdateAddrs(addrs)
+ assertFirstEvent(t, tr, addrs)
+
+ select {
+ case <-tr.reachabilityUpdateCh:
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected reachability update")
+ }
+ reachable, unreachable, _ := tr.ConfirmedAddrs()
+ require.Empty(t, unreachable)
+
+ orderedAddrs := slices.Clone(addrs)
+ slices.Reverse(orderedAddrs)
+ require.Equal(t, reachable, orderedAddrs, "%s %s", reachable, addrs)
+ })
+
+ t.Run("backoff", func(t *testing.T) {
+ notify := make(chan struct{}, 1)
+ drainNotify := func() bool {
+ found := false
+ for {
+ select {
+ case <-notify:
+ found = true
+ default:
+ return found
+ }
+ }
+ }
+
+ var allow atomic.Bool
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ select {
+ case notify <- struct{}{}:
+ default:
+ }
+ if !allow.Load() {
+ return autonatv2.Result{}, autonatv2.ErrNoPeers
+ }
+ if reqs[0].Addr.Equal(pub1) {
+ return autonatv2.Result{Addr: pub1, Idx: 0, Reachability: network.ReachabilityPublic}, nil
+ }
+ return autonatv2.Result{AllAddrsRefused: true}, nil
+ },
+ }
+
+ cl := clock.NewMock()
+ tr := newTracker(mockClient, cl)
+
+ // update addrs and wait for initial checks
+ tr.UpdateAddrs([]ma.Multiaddr{pub1})
+ assertFirstEvent(t, tr, []ma.Multiaddr{pub1})
+ // need to update clock after the background goroutine processes the new addrs
+ time.Sleep(100 * time.Millisecond)
+ cl.Add(1)
+ time.Sleep(100 * time.Millisecond)
+ require.True(t, drainNotify()) // check that we did receive probes
+
+ backoffInterval := backoffStartInterval
+ for i := 0; i < 4; i++ {
+ drainNotify()
+ cl.Add(backoffInterval / 2)
+ select {
+ case <-notify:
+ t.Fatal("unexpected call")
+ case <-time.After(50 * time.Millisecond):
+ }
+ cl.Add(backoffInterval/2 + 1) // +1 to push it slightly over the backoff interval
+ backoffInterval *= 2
+ select {
+ case <-notify:
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected probe")
+ }
+ reachable, unreachable, _ := tr.ConfirmedAddrs()
+ require.Empty(t, reachable)
+ require.Empty(t, unreachable)
+ }
+ allow.Store(true)
+ drainNotify()
+ cl.Add(backoffInterval + 1)
+ select {
+ case <-tr.reachabilityUpdateCh:
+ case <-time.After(1 * time.Second):
+ t.Fatal("unexpected reachability update")
+ }
+ reachable, unreachable, _ := tr.ConfirmedAddrs()
+ require.Equal(t, reachable, []ma.Multiaddr{pub1})
+ require.Empty(t, unreachable)
+ })
+
+ t.Run("event update", func(t *testing.T) {
+ // allow minConfidence probes to pass
+ called := make(chan struct{}, minConfidence)
+ notify := make(chan struct{})
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, _ []autonatv2.Request) (autonatv2.Result, error) {
+ select {
+ case called <- struct{}{}:
+ notify <- struct{}{}
+ return autonatv2.Result{Addr: pub1, Idx: 0, Reachability: network.ReachabilityPublic}, nil
+ default:
+ return autonatv2.Result{AllAddrsRefused: true}, nil
+ }
+ },
+ }
+
+ tr := newTracker(mockClient, nil)
+ tr.UpdateAddrs([]ma.Multiaddr{pub1})
+ assertFirstEvent(t, tr, []ma.Multiaddr{pub1})
+
+ for i := 0; i < minConfidence; i++ {
+ select {
+ case <-notify:
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected call to autonat client")
+ }
+ }
+ select {
+ case <-tr.reachabilityUpdateCh:
+ reachable, unreachable, _ := tr.ConfirmedAddrs()
+ require.Equal(t, reachable, []ma.Multiaddr{pub1})
+ require.Empty(t, unreachable)
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected reachability update")
+ }
+ tr.UpdateAddrs([]ma.Multiaddr{pub1}) // same addrs shouldn't get update
+ select {
+ case <-tr.reachabilityUpdateCh:
+ t.Fatal("didn't expect reachability update")
+ case <-time.After(100 * time.Millisecond):
+ }
+ tr.UpdateAddrs([]ma.Multiaddr{pub2})
+ select {
+ case <-tr.reachabilityUpdateCh:
+ reachable, unreachable, _ := tr.ConfirmedAddrs()
+ require.Empty(t, reachable)
+ require.Empty(t, unreachable)
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected reachability update")
+ }
+ })
+
+ t.Run("refresh after reset interval", func(t *testing.T) {
+ notify := make(chan struct{}, 1)
+ drainNotify := func() bool {
+ found := false
+ for {
+ select {
+ case <-notify:
+ found = true
+ default:
+ return found
+ }
+ }
+ }
+
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ select {
+ case notify <- struct{}{}:
+ default:
+ }
+ if reqs[0].Addr.Equal(pub1) {
+ return autonatv2.Result{Addr: pub1, Idx: 0, Reachability: network.ReachabilityPublic}, nil
+ }
+ return autonatv2.Result{AllAddrsRefused: true}, nil
+ },
+ }
+
+ cl := clock.NewMock()
+ tr := newTracker(mockClient, cl)
+
+ // update addrs and wait for initial checks
+ tr.UpdateAddrs([]ma.Multiaddr{pub1})
+ assertFirstEvent(t, tr, []ma.Multiaddr{pub1})
+ // need to update clock after the background goroutine processes the new addrs
+ time.Sleep(100 * time.Millisecond)
+ cl.Add(1)
+ time.Sleep(100 * time.Millisecond)
+ require.True(t, drainNotify()) // check that we did receive probes
+ cl.Add(highConfidenceAddrProbeInterval / 2)
+ select {
+ case <-notify:
+ t.Fatal("unexpected call")
+ case <-time.After(50 * time.Millisecond):
+ }
+
+ cl.Add(highConfidenceAddrProbeInterval/2 + defaultReachabilityRefreshInterval) // defaultResetInterval for the next probe time
+ select {
+ case <-notify:
+ case <-time.After(1 * time.Second):
+ t.Fatal("expected probe")
+ }
+ })
+}
+
+func TestRefreshReachability(t *testing.T) {
+ pub1 := ma.StringCast("/ip4/1.1.1.1/tcp/1")
+ pub2 := ma.StringCast("/ip4/1.1.1.1/tcp/2")
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ newTracker := func(client autonatv2Client, pm *probeManager) *addrsReachabilityTracker {
+ return &addrsReachabilityTracker{
+ probeManager: pm,
+ client: client,
+ clock: clock.New(),
+ maxConcurrency: 3,
+ ctx: ctx,
+ cancel: cancel,
+ }
+ }
+ t.Run("backoff on ErrNoValidPeers", func(t *testing.T) {
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, _ []autonatv2.Request) (autonatv2.Result, error) {
+ return autonatv2.Result{}, autonatv2.ErrNoPeers
+ },
+ }
+
+ addrTracker := newProbeManager(time.Now)
+ addrTracker.UpdateAddrs([]ma.Multiaddr{pub1})
+ r := newTracker(mockClient, addrTracker)
+ res := r.refreshReachability()
+ require.True(t, <-res.BackoffCh)
+ require.Equal(t, addrTracker.InProgressProbes(), 0)
+ })
+
+ t.Run("returns backoff on errTooManyConsecutiveFailures", func(t *testing.T) {
+ // Create a client that always returns ErrDialRefused
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, _ []autonatv2.Request) (autonatv2.Result, error) {
+ return autonatv2.Result{}, errors.New("test error")
+ },
+ }
+
+ pm := newProbeManager(time.Now)
+ pm.UpdateAddrs([]ma.Multiaddr{pub1})
+ r := newTracker(mockClient, pm)
+ result := r.refreshReachability()
+ require.True(t, <-result.BackoffCh)
+ require.Equal(t, pm.InProgressProbes(), 0)
+ })
+
+ t.Run("quits on cancellation", func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ block := make(chan struct{})
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, _ []autonatv2.Request) (autonatv2.Result, error) {
+ block <- struct{}{}
+ return autonatv2.Result{}, nil
+ },
+ }
+
+ pm := newProbeManager(time.Now)
+ pm.UpdateAddrs([]ma.Multiaddr{pub1})
+ r := &addrsReachabilityTracker{
+ ctx: ctx,
+ cancel: cancel,
+ client: mockClient,
+ probeManager: pm,
+ clock: clock.New(),
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ result := r.refreshReachability()
+ assert.False(t, <-result.BackoffCh)
+ assert.Equal(t, pm.InProgressProbes(), 0)
+ }()
+
+ cancel()
+ time.Sleep(50 * time.Millisecond) // wait for the cancellation to be processed
+
+ outer:
+ for i := 0; i < defaultMaxConcurrency; i++ {
+ select {
+ case <-block:
+ default:
+ break outer
+ }
+ }
+ select {
+ case <-block:
+ t.Fatal("expected no more requests")
+ case <-time.After(50 * time.Millisecond):
+ }
+ wg.Wait()
+ })
+
+ t.Run("handles refusals", func(t *testing.T) {
+ pub1, _ := ma.NewMultiaddr("/ip4/1.1.1.1/tcp/1")
+
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ for i, req := range reqs {
+ if req.Addr.Equal(pub1) {
+ return autonatv2.Result{Addr: pub1, Idx: i, Reachability: network.ReachabilityPublic}, nil
+ }
+ }
+ return autonatv2.Result{AllAddrsRefused: true}, nil
+ },
+ }
+
+ pm := newProbeManager(time.Now)
+ pm.UpdateAddrs([]ma.Multiaddr{pub2, pub1})
+ r := newTracker(mockClient, pm)
+
+ result := r.refreshReachability()
+ require.False(t, <-result.BackoffCh)
+
+ reachable, unreachable, _ := pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Equal(t, reachable, []ma.Multiaddr{pub1})
+ require.Empty(t, unreachable)
+ require.Equal(t, pm.InProgressProbes(), 0)
+ })
+
+ t.Run("handles completions", func(t *testing.T) {
+ mockClient := mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ for i, req := range reqs {
+ if req.Addr.Equal(pub1) {
+ return autonatv2.Result{Addr: pub1, Idx: i, Reachability: network.ReachabilityPublic}, nil
+ }
+ if req.Addr.Equal(pub2) {
+ return autonatv2.Result{Addr: pub2, Idx: i, Reachability: network.ReachabilityPrivate}, nil
+ }
+ }
+ return autonatv2.Result{AllAddrsRefused: true}, nil
+ },
+ }
+ pm := newProbeManager(time.Now)
+ pm.UpdateAddrs([]ma.Multiaddr{pub2, pub1})
+ r := newTracker(mockClient, pm)
+ result := r.refreshReachability()
+ require.False(t, <-result.BackoffCh)
+
+ reachable, unreachable, _ := pm.AppendConfirmedAddrs(nil, nil, nil)
+ require.Equal(t, reachable, []ma.Multiaddr{pub1})
+ require.Equal(t, unreachable, []ma.Multiaddr{pub2})
+ require.Equal(t, pm.InProgressProbes(), 0)
+ })
+}
+
+func TestAddrStatusProbeCount(t *testing.T) {
+ cases := []struct {
+ inputs string
+ wantRequiredProbes int
+ wantReachability network.Reachability
+ }{
+ {
+ inputs: "",
+ wantRequiredProbes: 3,
+ wantReachability: network.ReachabilityUnknown,
+ },
+ {
+ inputs: "S",
+ wantRequiredProbes: 2,
+ wantReachability: network.ReachabilityUnknown,
+ },
+ {
+ inputs: "SS",
+ wantRequiredProbes: 1,
+ wantReachability: network.ReachabilityPublic,
+ },
+ {
+ inputs: "SSS",
+ wantRequiredProbes: 0,
+ wantReachability: network.ReachabilityPublic,
+ },
+ {
+ inputs: "SSSSSSSF",
+ wantRequiredProbes: 1,
+ wantReachability: network.ReachabilityPublic,
+ },
+ {
+ inputs: "SFSFSSSS",
+ wantRequiredProbes: 0,
+ wantReachability: network.ReachabilityPublic,
+ },
+ {
+ inputs: "SSSSSFSF",
+ wantRequiredProbes: 2,
+ wantReachability: network.ReachabilityUnknown,
+ },
+ {
+ inputs: "FF",
+ wantRequiredProbes: 1,
+ wantReachability: network.ReachabilityPrivate,
+ },
+ }
+ for _, c := range cases {
+ t.Run(c.inputs, func(t *testing.T) {
+ now := time.Time{}.Add(1 * time.Second)
+ ao := addrStatus{}
+ for _, r := range c.inputs {
+ if r == 'S' {
+ ao.AddOutcome(now, network.ReachabilityPublic, 5)
+ } else {
+ ao.AddOutcome(now, network.ReachabilityPrivate, 5)
+ }
+ now = now.Add(1 * time.Second)
+ }
+ require.Equal(t, ao.RequiredProbeCount(now), c.wantRequiredProbes)
+ require.Equal(t, ao.Reachability(), c.wantReachability)
+ if c.wantRequiredProbes == 0 {
+ now = now.Add(highConfidenceAddrProbeInterval + 10*time.Microsecond)
+ require.Equal(t, ao.RequiredProbeCount(now), 1)
+ }
+
+ now = now.Add(1 * time.Second)
+ ao.RemoveBefore(now)
+ require.Len(t, ao.outcomes, 0)
+ })
+ }
+}
+
+func BenchmarkAddrTracker(b *testing.B) {
+ cl := clock.NewMock()
+ t := newProbeManager(cl.Now)
+
+ addrs := make([]ma.Multiaddr, 20)
+ for i := range addrs {
+ addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/1.1.1.1/tcp/%d", rand.Intn(1000)))
+ }
+ t.UpdateAddrs(addrs)
+ b.ReportAllocs()
+ b.ResetTimer()
+ p := t.GetProbe()
+ for i := 0; i < b.N; i++ {
+ pp := t.GetProbe()
+ if len(pp) == 0 {
+ pp = p
+ }
+ t.MarkProbeInProgress(pp)
+ t.CompleteProbe(pp, autonatv2.Result{Addr: pp[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}, nil)
+ }
+}
+
+func FuzzAddrsReachabilityTracker(f *testing.F) {
+ type autonatv2Response struct {
+ Result autonatv2.Result
+ Err error
+ }
+
+ newMockClient := func(b []byte) mockAutoNATClient {
+ count := 0
+ return mockAutoNATClient{
+ F: func(_ context.Context, reqs []autonatv2.Request) (autonatv2.Result, error) {
+ if len(b) == 0 {
+ return autonatv2.Result{}, nil
+ }
+ count = (count + 1) % len(b)
+ if b[count]%3 == 0 {
+ // some address confirmed
+ c1 := (count + 1) % len(b)
+ c2 := (count + 2) % len(b)
+ rch := network.Reachability(b[c1] % 3)
+ n := int(b[c2]) % len(reqs)
+ return autonatv2.Result{
+ Addr: reqs[n].Addr,
+ Idx: n,
+ Reachability: rch,
+ }, nil
+ }
+ outcomes := []autonatv2Response{
+ {Result: autonatv2.Result{AllAddrsRefused: true}},
+ {Err: errors.New("test error")},
+ {Err: autonatv2.ErrPrivateAddrs},
+ {Err: autonatv2.ErrNoPeers},
+ {Result: autonatv2.Result{}, Err: nil},
+ {Result: autonatv2.Result{Addr: reqs[0].Addr, Idx: 0, Reachability: network.ReachabilityPublic}},
+ {Result: autonatv2.Result{
+ Addr: reqs[0].Addr,
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ AllAddrsRefused: true,
+ }},
+ {Result: autonatv2.Result{
+ Addr: reqs[0].Addr,
+ Idx: len(reqs) - 1, // invalid idx
+ Reachability: network.ReachabilityPublic,
+ AllAddrsRefused: false,
+ }},
+ }
+ outcome := outcomes[int(b[count])%len(outcomes)]
+ return outcome.Result, outcome.Err
+ },
+ }
+ }
+
+ // TODO: Move this to go-multiaddrs
+ getProto := func(protos []byte) ma.Multiaddr {
+ protoType := 0
+ if len(protos) > 0 {
+ protoType = int(protos[0])
+ }
+
+ port1, port2 := 0, 0
+ if len(protos) > 1 {
+ port1 = int(protos[1])
+ }
+ if len(protos) > 2 {
+ port2 = int(protos[2])
+ }
+ protoTemplates := []string{
+ "/tcp/%d/",
+ "/udp/%d/",
+ "/udp/%d/quic-v1/",
+ "/udp/%d/quic-v1/tcp/%d",
+ "/udp/%d/quic-v1/webtransport/",
+ "/udp/%d/webrtc/",
+ "/udp/%d/webrtc-direct/",
+ "/unix/hello/",
+ }
+ s := protoTemplates[protoType%len(protoTemplates)]
+ port1 %= (1 << 16)
+ if strings.Count(s, "%d") == 1 {
+ return ma.StringCast(fmt.Sprintf(s, port1))
+ }
+ port2 %= (1 << 16)
+ return ma.StringCast(fmt.Sprintf(s, port1, port2))
+ }
+
+ getIP := func(ips []byte) ma.Multiaddr {
+ ipType := 0
+ if len(ips) > 0 {
+ ipType = int(ips[0])
+ }
+ ips = ips[1:]
+ var x, y int64
+ split := 128 / 8
+ if len(ips) < split {
+ split = len(ips)
+ }
+ var b [8]byte
+ copy(b[:], ips[:split])
+ x = int64(binary.LittleEndian.Uint64(b[:]))
+ clear(b[:])
+ copy(b[:], ips[split:])
+ y = int64(binary.LittleEndian.Uint64(b[:]))
+
+ var ip netip.Addr
+ switch ipType % 3 {
+ case 0:
+ ip = netip.AddrFrom4([4]byte{byte(x), byte(x >> 8), byte(x >> 16), byte(x >> 24)})
+ return ma.StringCast(fmt.Sprintf("/ip4/%s/", ip))
+ case 1:
+ pubIP := net.ParseIP("2005::") // Public IP address
+ x := int64(binary.LittleEndian.Uint64(pubIP[0:8]))
+ ip = netip.AddrFrom16([16]byte{
+ byte(x), byte(x >> 8), byte(x >> 16), byte(x >> 24),
+ byte(x >> 32), byte(x >> 40), byte(x >> 48), byte(x >> 56),
+ byte(y), byte(y >> 8), byte(y >> 16), byte(y >> 24),
+ byte(y >> 32), byte(y >> 40), byte(y >> 48), byte(y >> 56),
+ })
+ return ma.StringCast(fmt.Sprintf("/ip6/%s/", ip))
+ default:
+ ip := netip.AddrFrom16([16]byte{
+ byte(x), byte(x >> 8), byte(x >> 16), byte(x >> 24),
+ byte(x >> 32), byte(x >> 40), byte(x >> 48), byte(x >> 56),
+ byte(y), byte(y >> 8), byte(y >> 16), byte(y >> 24),
+ byte(y >> 32), byte(y >> 40), byte(y >> 48), byte(y >> 56),
+ })
+ return ma.StringCast(fmt.Sprintf("/ip6/%s/", ip))
+ }
+ }
+
+ getAddr := func(addrType int, ips, protos []byte) ma.Multiaddr {
+ switch addrType % 4 {
+ case 0:
+ return getIP(ips).Encapsulate(getProto(protos))
+ case 1:
+ return getProto(protos)
+ case 2:
+ return nil
+ default:
+ return getIP(ips).Encapsulate(getProto(protos))
+ }
+ }
+
+ getDNSAddr := func(hostNameBytes, protos []byte) ma.Multiaddr {
+ hostName := strings.ReplaceAll(string(hostNameBytes), "\\", "")
+ hostName = strings.ReplaceAll(hostName, "/", "")
+ if hostName == "" {
+ hostName = "localhost"
+ }
+ dnsType := 0
+ if len(hostNameBytes) > 0 {
+ dnsType = int(hostNameBytes[0])
+ }
+ dnsProtos := []string{"dns", "dns4", "dns6", "dnsaddr"}
+ da := ma.StringCast(fmt.Sprintf("/%s/%s/", dnsProtos[dnsType%len(dnsProtos)], hostName))
+ return da.Encapsulate(getProto(protos))
+ }
+
+ const maxAddrs = 1000
+ getAddrs := func(numAddrs int, ips, protos, hostNames []byte) []ma.Multiaddr {
+ if len(ips) == 0 || len(protos) == 0 || len(hostNames) == 0 {
+ return nil
+ }
+ numAddrs = ((numAddrs % maxAddrs) + maxAddrs) % maxAddrs
+ addrs := make([]ma.Multiaddr, numAddrs)
+ ipIdx := 0
+ protoIdx := 0
+ for i := range numAddrs {
+ addrs[i] = getAddr(i, ips[ipIdx:], protos[protoIdx:])
+ ipIdx = (ipIdx + 1) % len(ips)
+ protoIdx = (protoIdx + 1) % len(protos)
+ }
+ maxDNSAddrs := 10
+ protoIdx = 0
+ for i := 0; i < len(hostNames) && i < maxDNSAddrs; i += 2 {
+ ed := min(i+2, len(hostNames))
+ addrs = append(addrs, getDNSAddr(hostNames[i:ed], protos[protoIdx:]))
+ protoIdx = (protoIdx + 1) % len(protos)
+ }
+ return addrs
+ }
+
+ cl := clock.NewMock()
+ f.Fuzz(func(t *testing.T, numAddrs int, ips, protos, hostNames, autonatResponses []byte) {
+ tr := newAddrsReachabilityTracker(newMockClient(autonatResponses), nil, cl, nil)
+ require.NoError(t, tr.Start())
+ tr.UpdateAddrs(getAddrs(numAddrs, ips, protos, hostNames))
+
+ // fuzz tests need to finish in 10 seconds for some reason
+ // https://github.com/golang/go/issues/48157
+ // https://github.com/golang/go/commit/5d24203c394e6b64c42a9f69b990d94cb6c8aad4#diff-4e3b9481b8794eb058998e2bec389d3db7a23c54e67ac0f7259a3a5d2c79fd04R474-R483
+ const maxIters = 20
+ for range maxIters {
+ cl.Add(5 * time.Minute)
+ time.Sleep(100 * time.Millisecond)
+ }
+ require.NoError(t, tr.Close())
+ })
+}
diff --git a/p2p/host/basic/basic_host.go b/p2p/host/basic/basic_host.go
index 0d566f433a..96e7ed8be1 100644
--- a/p2p/host/basic/basic_host.go
+++ b/p2p/host/basic/basic_host.go
@@ -2,23 +2,37 @@ package basichost
import (
"context"
+ "errors"
+ "fmt"
"io"
+ "log/slog"
+ "slices"
+ "sync"
"time"
- identify "github.com/libp2p/go-libp2p/p2p/protocol/identify"
-
- logging "github.com/ipfs/go-log"
- goprocess "github.com/jbenet/goprocess"
- circuit "github.com/libp2p/go-libp2p-circuit"
- ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr"
- metrics "github.com/libp2p/go-libp2p-metrics"
- mstream "github.com/libp2p/go-libp2p-metrics/stream"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- protocol "github.com/libp2p/go-libp2p-protocol"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/pstoremanager"
+ "github.com/libp2p/go-libp2p/p2p/host/relaysvc"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/prometheus/client_golang/prometheus"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
ma "github.com/multiformats/go-multiaddr"
- madns "github.com/multiformats/go-multiaddr-dns"
+ manet "github.com/multiformats/go-multiaddr/net"
msmux "github.com/multiformats/go-multistream"
)
@@ -26,257 +40,477 @@ var log = logging.Logger("basichost")
var (
// DefaultNegotiationTimeout is the default value for HostOpts.NegotiationTimeout.
- DefaultNegotiationTimeout = time.Second * 60
+ DefaultNegotiationTimeout = 10 * time.Second
// DefaultAddrsFactory is the default value for HostOpts.AddrsFactory.
DefaultAddrsFactory = func(addrs []ma.Multiaddr) []ma.Multiaddr { return addrs }
)
+const maxPeerRecordSize = 8 * 1024 // 8k to be compatible with identify's limit
+
// AddrsFactory functions can be passed to New in order to override
// addresses returned by Addrs.
type AddrsFactory func([]ma.Multiaddr) []ma.Multiaddr
-// Option is a type used to pass in options to the host.
-//
-// Deprecated in favor of HostOpts and NewHost.
-type Option int
-
-// NATPortMap makes the host attempt to open port-mapping in NAT devices
-// for all its listeners. Pass in this option in the constructor to
-// asynchronously a) find a gateway, b) open port mappings, c) republish
-// port mappings periodically. The NATed addresses are included in the
-// Host's Addrs() list.
-//
-// This option is deprecated in favor of HostOpts and NewHost.
-const NATPortMap Option = iota
-
// BasicHost is the basic implementation of the host.Host interface. This
// particular host implementation:
-// * uses a protocol muxer to mux per-protocol streams
-// * uses an identity service to send + receive node information
-// * uses a nat service to establish NAT port mappings
+// - uses a protocol muxer to mux per-protocol streams
+// - uses an identity service to send + receive node information
+// - uses a nat service to establish NAT port mappings
type BasicHost struct {
- network inet.Network
- mux *msmux.MultistreamMuxer
- ids *identify.IDService
- natmgr NATManager
- addrs AddrsFactory
- maResolver *madns.Resolver
- cmgr ifconnmgr.ConnManager
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ // ensures we shutdown ONLY once
+ closeSync sync.Once
+ // keep track of resources we need to wait on before shutting down
+ refCount sync.WaitGroup
+
+ network network.Network
+ psManager *pstoremanager.PeerstoreManager
+ mux *msmux.MultistreamMuxer[protocol.ID]
+ ids identify.IDService
+ hps *holepunch.Service
+ pings *ping.PingService
+ cmgr connmgr.ConnManager
+ eventbus event.Bus
+ relayManager *relaysvc.RelayManager
negtimeout time.Duration
- proc goprocess.Process
+ emitters struct {
+ evtLocalProtocolsUpdated event.Emitter
+ evtLocalAddrsUpdated event.Emitter
+ }
+
+ disableSignedPeerRecord bool
+ signKey crypto.PrivKey
+ caBook peerstore.CertifiedAddrBook
- bwc metrics.Reporter
+ autoNATMx sync.RWMutex
+ autoNat autonat.AutoNAT
+
+ autonatv2 *autonatv2.AutoNAT
+ addressManager *addrsManager
+ addrsUpdatedChan chan struct{}
}
+var _ host.Host = (*BasicHost)(nil)
+
// HostOpts holds options that can be passed to NewHost in order to
// customize construction of the *BasicHost.
type HostOpts struct {
+ // EventBus sets the event bus. Will construct a new event bus if omitted.
+ EventBus event.Bus
// MultistreamMuxer is essential for the *BasicHost and will use a sensible default value if omitted.
- MultistreamMuxer *msmux.MultistreamMuxer
+ MultistreamMuxer *msmux.MultistreamMuxer[protocol.ID]
- // NegotiationTimeout determines the read and write timeouts on streams.
- // If 0 or omitted, it will use DefaultNegotiationTimeout.
- // If below 0, timeouts on streams will be deactivated.
+ // NegotiationTimeout determines the read and write timeouts when negotiating
+ // protocols for streams. If 0 or omitted, it will use
+ // DefaultNegotiationTimeout. If below 0, timeouts on streams will be
+ // deactivated.
NegotiationTimeout time.Duration
- // IdentifyService holds an implementation of the /ipfs/id/ protocol.
- // If omitted, a new *identify.IDService will be used.
- IdentifyService *identify.IDService
-
// AddrsFactory holds a function which can be used to override or filter the result of Addrs.
// If omitted, there's no override or filtering, and the results of Addrs and AllAddrs are the same.
AddrsFactory AddrsFactory
- // MultiaddrResolves holds the go-multiaddr-dns.Resolver used for resolving
- // /dns4, /dns6, and /dnsaddr addresses before trying to connect to a peer.
- MultiaddrResolver *madns.Resolver
-
// NATManager takes care of setting NAT port mappings, and discovering external addresses.
// If omitted, this will simply be disabled.
- NATManager NATManager
-
- // BandwidthReporter is used for collecting aggregate metrics of the
- // bandwidth used by various protocols.
- BandwidthReporter metrics.Reporter
+ NATManager func(network.Network) NATManager
// ConnManager is a libp2p connection manager
- ConnManager ifconnmgr.ConnManager
+ ConnManager connmgr.ConnManager
+
+ // EnablePing indicates whether to instantiate the ping service
+ EnablePing bool
+
+ // EnableRelayService enables the circuit v2 relay (if we're publicly reachable).
+ EnableRelayService bool
+ // RelayServiceOpts are options for the circuit v2 relay.
+ RelayServiceOpts []relayv2.Option
+
+ // UserAgent sets the user-agent for the host.
+ UserAgent string
+
+ // ProtocolVersion sets the protocol version for the host.
+ ProtocolVersion string
- // Relay indicates whether the host should use circuit relay transport
- EnableRelay bool
+ // DisableSignedPeerRecord disables the generation of Signed Peer Records on this host.
+ DisableSignedPeerRecord bool
- // RelayOpts are options for the relay transport; only meaningful when Relay=true
- RelayOpts []circuit.RelayOpt
+ // EnableHolePunching enables the peer to initiate/respond to hole punching attempts for NAT traversal.
+ EnableHolePunching bool
+ // HolePunchingOptions are options for the hole punching service
+ HolePunchingOptions []holepunch.Option
+
+ // EnableMetrics enables the metrics subsystems
+ EnableMetrics bool
+ // PrometheusRegisterer is the PrometheusRegisterer used for metrics
+ PrometheusRegisterer prometheus.Registerer
+ // AutoNATv2MetricsTracker tracks AutoNATv2 address reachability metrics
+ AutoNATv2MetricsTracker MetricsTracker
+
+ // ObservedAddrsManager maps our local listen addresses to external publicly observed addresses.
+ ObservedAddrsManager ObservedAddrsManager
+
+ AutoNATv2 *autonatv2.AutoNAT
}
// NewHost constructs a new *BasicHost and activates it by attaching its stream and connection handlers to the given inet.Network.
-func NewHost(ctx context.Context, net inet.Network, opts *HostOpts) (*BasicHost, error) {
- ctx, cancel := context.WithCancel(ctx)
- h := &BasicHost{
- network: net,
- mux: msmux.NewMultistreamMuxer(),
- negtimeout: DefaultNegotiationTimeout,
- addrs: DefaultAddrsFactory,
- maResolver: madns.DefaultResolver,
+func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) {
+ if opts == nil {
+ opts = &HostOpts{}
+ }
+ if opts.EventBus == nil {
+ opts.EventBus = eventbus.NewBus()
}
- h.proc = goprocess.WithTeardown(func() error {
- if h.natmgr != nil {
- h.natmgr.Close()
- }
- cancel()
- return h.Network().Close()
- })
+ psManager, err := pstoremanager.NewPeerstoreManager(n.Peerstore(), opts.EventBus, n)
+ if err != nil {
+ return nil, err
+ }
+
+ hostCtx, cancel := context.WithCancel(context.Background())
+ h := &BasicHost{
+ network: n,
+ psManager: psManager,
+ mux: msmux.NewMultistreamMuxer[protocol.ID](),
+ negtimeout: DefaultNegotiationTimeout,
+ eventbus: opts.EventBus,
+ ctx: hostCtx,
+ ctxCancel: cancel,
+ disableSignedPeerRecord: opts.DisableSignedPeerRecord,
+ addrsUpdatedChan: make(chan struct{}, 1),
+ }
+
+ if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}, eventbus.Stateful); err != nil {
+ return nil, err
+ }
+ if h.emitters.evtLocalAddrsUpdated, err = h.eventbus.Emitter(&event.EvtLocalAddressesUpdated{}, eventbus.Stateful); err != nil {
+ return nil, err
+ }
if opts.MultistreamMuxer != nil {
h.mux = opts.MultistreamMuxer
}
- if opts.IdentifyService != nil {
- h.ids = opts.IdentifyService
- } else {
- // we can't set this as a default above because it depends on the *BasicHost.
- h.ids = identify.NewIDService(h)
+ idOpts := []identify.Option{
+ identify.UserAgent(opts.UserAgent),
+ identify.ProtocolVersion(opts.ProtocolVersion),
}
- if uint64(opts.NegotiationTimeout) != 0 {
- h.negtimeout = opts.NegotiationTimeout
+ // we can't set this as a default above because it depends on the *BasicHost.
+ if h.disableSignedPeerRecord {
+ idOpts = append(idOpts, identify.DisableSignedPeerRecord())
+ }
+ if opts.EnableMetrics {
+ idOpts = append(idOpts,
+ identify.WithMetricsTracer(
+ identify.NewMetricsTracer(identify.WithRegisterer(opts.PrometheusRegisterer))))
+ }
+
+ h.ids, err = identify.NewIDService(h, idOpts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Identify service: %s", err)
}
+ addrFactory := DefaultAddrsFactory
if opts.AddrsFactory != nil {
- h.addrs = opts.AddrsFactory
+ addrFactory = opts.AddrsFactory
}
+ var natmgr NATManager
if opts.NATManager != nil {
- h.natmgr = opts.NATManager
+ natmgr = opts.NATManager(h.Network())
+ }
+
+ if opts.AutoNATv2 != nil {
+ h.autonatv2 = opts.AutoNATv2
+ }
+
+ var autonatv2Client autonatv2Client // avoid typed nil errors
+ if h.autonatv2 != nil {
+ autonatv2Client = h.autonatv2
+ }
+
+ // Create addCertHashes function with interface assertion for swarm
+ addCertHashesFunc := func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return addrs
+ }
+ if swarm, ok := h.Network().(interface {
+ AddCertHashes(addrs []ma.Multiaddr) []ma.Multiaddr
+ }); ok {
+ addCertHashesFunc = swarm.AddCertHashes
}
- if opts.MultiaddrResolver != nil {
- h.maResolver = opts.MultiaddrResolver
+ h.addressManager, err = newAddrsManager(
+ h.eventbus,
+ natmgr,
+ addrFactory,
+ h.Network().ListenAddresses,
+ addCertHashesFunc,
+ opts.ObservedAddrsManager,
+ h.addrsUpdatedChan,
+ autonatv2Client,
+ opts.EnableMetrics,
+ opts.PrometheusRegisterer,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create address service: %w", err)
+ }
+
+ if opts.EnableHolePunching {
+ if opts.EnableMetrics {
+ hpOpts := []holepunch.Option{
+ holepunch.WithMetricsTracer(holepunch.NewMetricsTracer(holepunch.WithRegisterer(opts.PrometheusRegisterer)))}
+ opts.HolePunchingOptions = append(hpOpts, opts.HolePunchingOptions...)
+
+ }
+ h.hps, err = holepunch.NewService(h, h.ids, h.addressManager.HolePunchAddrs, opts.HolePunchingOptions...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create hole punch service: %w", err)
+ }
}
- if opts.BandwidthReporter != nil {
- h.bwc = opts.BandwidthReporter
- h.ids.Reporter = opts.BandwidthReporter
+ if uint64(opts.NegotiationTimeout) != 0 {
+ h.negtimeout = opts.NegotiationTimeout
}
if opts.ConnManager == nil {
- h.cmgr = &ifconnmgr.NullConnMgr{}
+ h.cmgr = &connmgr.NullConnMgr{}
} else {
h.cmgr = opts.ConnManager
- net.Notify(h.cmgr.Notifee())
+ n.Notify(h.cmgr.Notifee())
+ }
+
+ if opts.EnableRelayService {
+ if opts.EnableMetrics {
+ // Prefer explicitly provided metrics tracer
+ metricsOpt := []relayv2.Option{
+ relayv2.WithMetricsTracer(
+ relayv2.NewMetricsTracer(relayv2.WithRegisterer(opts.PrometheusRegisterer)))}
+ opts.RelayServiceOpts = append(metricsOpt, opts.RelayServiceOpts...)
+ }
+ h.relayManager = relaysvc.NewRelayManager(h, opts.RelayServiceOpts...)
}
- net.SetConnHandler(h.newConnHandler)
- net.SetStreamHandler(h.newStreamHandler)
+ if opts.EnablePing {
+ h.pings = ping.NewPingService(h)
+ }
+
+ if !h.disableSignedPeerRecord {
+ h.signKey = h.Peerstore().PrivKey(h.ID())
+ cab, ok := peerstore.GetCertifiedAddrBook(h.Peerstore())
+ if !ok {
+ return nil, errors.New("peerstore should also be a certified address book")
+ }
+ h.caBook = cab
- if opts.EnableRelay {
- err := circuit.AddRelayTransport(ctx, h, opts.RelayOpts...)
+ rec, err := h.makeSignedPeerRecord(h.addressManager.Addrs())
if err != nil {
- h.Close()
- return nil, err
+ return nil, fmt.Errorf("failed to create signed record for self: %w", err)
+ }
+ if _, err := h.caBook.ConsumePeerRecord(rec, peerstore.PermanentAddrTTL); err != nil {
+ return nil, fmt.Errorf("failed to persist signed record to peerstore: %w", err)
}
}
+ n.SetStreamHandler(h.newStreamHandler)
return h, nil
}
-// New constructs and sets up a new *BasicHost with given Network and options.
-// Three options can be passed: NATPortMap, AddrsFactory, and metrics.Reporter.
-// This function is deprecated in favor of NewHost and HostOpts.
-func New(net inet.Network, opts ...interface{}) *BasicHost {
- hostopts := &HostOpts{}
-
- for _, o := range opts {
- switch o := o.(type) {
- case Option:
- switch o {
- case NATPortMap:
- hostopts.NATManager = newNatManager(net)
- }
- case metrics.Reporter:
- hostopts.BandwidthReporter = o
- case AddrsFactory:
- hostopts.AddrsFactory = AddrsFactory(o)
- case ifconnmgr.ConnManager:
- hostopts.ConnManager = o
- case *madns.Resolver:
- hostopts.MultiaddrResolver = o
+// Start starts background tasks in the host
+// TODO: Return error and handle it in the caller?
+func (h *BasicHost) Start() {
+ h.psManager.Start()
+ if h.autonatv2 != nil {
+ err := h.autonatv2.Start(h)
+ if err != nil {
+ log.Error("autonat v2 failed to start", "err", err)
}
}
+ // register to be notified when the network's listen addrs change,
+ // so we can update our address set and push events if needed
+ h.Network().Notify(h.addressManager.NetNotifee())
+ if err := h.addressManager.Start(); err != nil {
+ log.Error("address service failed to start", "err", err)
+ }
- h, err := NewHost(context.Background(), net, hostopts)
- if err != nil {
- // this cannot happen with legacy options
- // plus we want to keep the (deprecated) legacy interface unchanged
- panic(err)
+ if !h.disableSignedPeerRecord {
+ // Ensure we have the correct peer record after Start returns
+ rec, err := h.makeSignedPeerRecord(h.addressManager.Addrs())
+ if err != nil {
+ log.Error("failed to create signed record", "err", err)
+ }
+ if _, err := h.caBook.ConsumePeerRecord(rec, peerstore.PermanentAddrTTL); err != nil {
+ log.Error("failed to persist signed record to peerstore", "err", err)
+ }
}
- return h
-}
+ h.ids.Start()
-// newConnHandler is the remote-opened conn handler for inet.Network
-func (h *BasicHost) newConnHandler(c inet.Conn) {
- // Clear protocols on connecting to new peer to avoid issues caused
- // by misremembering protocols between reconnects
- h.Peerstore().SetProtocols(c.RemotePeer())
- h.ids.IdentifyConn(c)
+ h.refCount.Add(1)
+ go h.background()
}
-// newStreamHandler is the remote-opened stream handler for inet.Network
+// newStreamHandler is the remote-opened stream handler for network.Network
// TODO: this feels a bit wonky
-func (h *BasicHost) newStreamHandler(s inet.Stream) {
+func (h *BasicHost) newStreamHandler(s network.Stream) {
before := time.Now()
if h.negtimeout > 0 {
if err := s.SetDeadline(time.Now().Add(h.negtimeout)); err != nil {
- log.Error("setting stream deadline: ", err)
+ log.Debug("setting stream deadline", "err", err)
s.Reset()
return
}
}
- lzc, protoID, handle, err := h.Mux().NegotiateLazy(s)
- took := time.Now().Sub(before)
+ protoID, handle, err := h.Mux().Negotiate(s)
+ took := time.Since(before)
if err != nil {
if err == io.EOF {
- logf := log.Debugf
+ lvl := slog.LevelDebug
if took > time.Second*10 {
- logf = log.Warningf
+ lvl = slog.LevelWarn
}
- logf("protocol EOF: %s (took %s)", s.Conn().RemotePeer(), took)
+ log.Log(context.Background(), lvl, "protocol EOF", "remote_peer", s.Conn().RemotePeer(), "duration", took)
} else {
- log.Infof("protocol mux failed: %s (took %s)", err, took)
+ log.Debug("protocol mux failed", "err", err, "duration", took, "stream_id", s.ID(), "remote_peer", s.Conn().RemotePeer(), "remote_multiaddr", s.Conn().RemoteMultiaddr())
}
- s.Reset()
+ s.ResetWithError(network.StreamProtocolNegotiationFailed)
return
}
- s = &streamWrapper{
- Stream: s,
- rw: lzc,
- }
-
if h.negtimeout > 0 {
if err := s.SetDeadline(time.Time{}); err != nil {
- log.Error("resetting stream deadline: ", err)
+ log.Debug("resetting stream deadline", "err", err)
s.Reset()
return
}
}
- s.SetProtocol(protocol.ID(protoID))
+ if err := s.SetProtocol(protoID); err != nil {
+ log.Debug("error setting stream protocol", "err", err)
+ s.ResetWithError(network.StreamResourceLimitExceeded)
+ return
+ }
+
+ log.Debug("negotiated", "protocol", protoID, "duration", took)
+
+ handle(protoID, s)
+}
+
+func (h *BasicHost) makeUpdatedAddrEvent(prev, current []ma.Multiaddr) *event.EvtLocalAddressesUpdated {
+ if prev == nil && current == nil {
+ return nil
+ }
+ prevmap := make(map[string]ma.Multiaddr, len(prev))
+ currmap := make(map[string]ma.Multiaddr, len(current))
+ evt := &event.EvtLocalAddressesUpdated{Diffs: true}
+ addrsAdded := false
- if h.bwc != nil {
- s = mstream.WrapStream(s, h.bwc)
+ for _, addr := range prev {
+ prevmap[string(addr.Bytes())] = addr
+ }
+ for _, addr := range current {
+ currmap[string(addr.Bytes())] = addr
+ }
+ for _, addr := range currmap {
+ _, ok := prevmap[string(addr.Bytes())]
+ updated := event.UpdatedAddress{Address: addr}
+ if ok {
+ updated.Action = event.Maintained
+ } else {
+ updated.Action = event.Added
+ addrsAdded = true
+ }
+ evt.Current = append(evt.Current, updated)
+ delete(prevmap, string(addr.Bytes()))
+ }
+ for _, addr := range prevmap {
+ updated := event.UpdatedAddress{Action: event.Removed, Address: addr}
+ evt.Removed = append(evt.Removed, updated)
}
- log.Debugf("protocol negotiation took %s", took)
- go handle(protoID, s)
+ if !addrsAdded && len(evt.Removed) == 0 {
+ return nil
+ }
+
+ // Our addresses have changed. Make a new signed peer record.
+ if !h.disableSignedPeerRecord {
+ // add signed peer record to the event
+ sr, err := h.makeSignedPeerRecord(current)
+ if err != nil {
+ log.Error("error creating a signed peer record from the set of current addresses", "err", err)
+ // drop this change
+ return nil
+ }
+ evt.SignedPeerRecord = sr
+ }
+
+ return evt
+}
+
+func (h *BasicHost) makeSignedPeerRecord(addrs []ma.Multiaddr) (*record.Envelope, error) {
+ // Limit the length of currentAddrs to ensure that our signed peer records aren't rejected
+ peerRecordSize := 64 // HostID
+ k, err := h.signKey.Raw()
+ if err != nil {
+ peerRecordSize += 2 * len(k) // 1 for signature, 1 for public key
+ }
+ // we want the final address list to be small for keeping the signed peer record in size
+ addrs = trimHostAddrList(addrs, maxPeerRecordSize-peerRecordSize-256) // 256 B of buffer
+ rec := peer.PeerRecordFromAddrInfo(peer.AddrInfo{
+ ID: h.ID(),
+ Addrs: addrs,
+ })
+ return record.Seal(rec, h.signKey)
+}
+
+func (h *BasicHost) background() {
+ defer h.refCount.Done()
+ var lastAddrs []ma.Multiaddr
+
+ emitAddrChange := func(currentAddrs []ma.Multiaddr, lastAddrs []ma.Multiaddr) {
+ changeEvt := h.makeUpdatedAddrEvent(lastAddrs, currentAddrs)
+ if changeEvt == nil {
+ return
+ }
+ // Our addresses have changed.
+ // store the signed peer record in the peer store.
+ if !h.disableSignedPeerRecord {
+ if _, err := h.caBook.ConsumePeerRecord(changeEvt.SignedPeerRecord, peerstore.PermanentAddrTTL); err != nil {
+ log.Error("failed to persist signed peer record in peer store", "err", err)
+ return
+ }
+ }
+ // update host addresses in the peer store
+ removedAddrs := make([]ma.Multiaddr, 0, len(changeEvt.Removed))
+ for _, ua := range changeEvt.Removed {
+ removedAddrs = append(removedAddrs, ua.Address)
+ }
+ h.Peerstore().SetAddrs(h.ID(), currentAddrs, peerstore.PermanentAddrTTL)
+ h.Peerstore().SetAddrs(h.ID(), removedAddrs, 0)
+
+ // emit addr change event
+ if err := h.emitters.evtLocalAddrsUpdated.Emit(*changeEvt); err != nil {
+ log.Warn("error emitting event for updated addrs", "err", err)
+ }
+ }
+
+ for {
+ curr := h.Addrs()
+ emitAddrChange(curr, lastAddrs)
+ lastAddrs = curr
+
+ select {
+ case <-h.addrsUpdatedChan:
+ case <-h.ctx.Done():
+ return
+ }
+ }
}
// ID returns the (local) peer.ID associated with this Host
@@ -285,265 +519,367 @@ func (h *BasicHost) ID() peer.ID {
}
// Peerstore returns the Host's repository of Peer Addresses and Keys.
-func (h *BasicHost) Peerstore() pstore.Peerstore {
+func (h *BasicHost) Peerstore() peerstore.Peerstore {
return h.Network().Peerstore()
}
// Network returns the Network interface of the Host
-func (h *BasicHost) Network() inet.Network {
+func (h *BasicHost) Network() network.Network {
return h.network
}
// Mux returns the Mux multiplexing incoming streams to protocol handlers
-func (h *BasicHost) Mux() *msmux.MultistreamMuxer {
+func (h *BasicHost) Mux() protocol.Switch {
return h.mux
}
// IDService returns
-func (h *BasicHost) IDService() *identify.IDService {
+func (h *BasicHost) IDService() identify.IDService {
return h.ids
}
+func (h *BasicHost) EventBus() event.Bus {
+ return h.eventbus
+}
+
// SetStreamHandler sets the protocol handler on the Host's Mux.
// This is equivalent to:
-// host.Mux().SetHandler(proto, handler)
-// (Threadsafe)
-func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler inet.StreamHandler) {
- h.Mux().AddHandler(string(pid), func(p string, rwc io.ReadWriteCloser) error {
- is := rwc.(inet.Stream)
- is.SetProtocol(protocol.ID(p))
+//
+// host.Mux().SetHandler(proto, handler)
+//
+// (Thread-safe)
+func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
+ h.Mux().AddHandler(pid, func(_ protocol.ID, rwc io.ReadWriteCloser) error {
+ is := rwc.(network.Stream)
handler(is)
return nil
})
+ h.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
+ Added: []protocol.ID{pid},
+ })
}
// SetStreamHandlerMatch sets the protocol handler on the Host's Mux
// using a matching function to do protocol comparisons
-func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler inet.StreamHandler) {
- h.Mux().AddHandlerWithFunc(string(pid), m, func(p string, rwc io.ReadWriteCloser) error {
- is := rwc.(inet.Stream)
- is.SetProtocol(protocol.ID(p))
+func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
+ h.Mux().AddHandlerWithFunc(pid, m, func(_ protocol.ID, rwc io.ReadWriteCloser) error {
+ is := rwc.(network.Stream)
handler(is)
return nil
})
+ h.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
+ Added: []protocol.ID{pid},
+ })
}
// RemoveStreamHandler returns ..
func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) {
- h.Mux().RemoveHandler(string(pid))
+ h.Mux().RemoveHandler(pid)
+ h.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
+ Removed: []protocol.ID{pid},
+ })
}
// NewStream opens a new stream to given peer p, and writes a p2p/protocol
// header with given protocol.ID. If there is no connection to p, attempts
// to create one. If ProtocolID is "", writes no header.
-// (Threadsafe)
-func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (inet.Stream, error) {
- pref, err := h.preferredProtocol(p, pids)
- if err != nil {
- return nil, err
+// (Thread-safe)
+func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (str network.Stream, strErr error) {
+ if _, ok := ctx.Deadline(); !ok {
+ if h.negtimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, h.negtimeout)
+ defer cancel()
+ }
}
- if pref != "" {
- return h.newStream(ctx, p, pref)
+ // If the caller wants to prevent the host from dialing, it should use the NoDial option.
+ if nodial, _ := network.GetNoDial(ctx); !nodial {
+ err := h.Connect(ctx, peer.AddrInfo{ID: p})
+ if err != nil {
+ return nil, err
+ }
}
- var protoStrs []string
- for _, pid := range pids {
- protoStrs = append(protoStrs, string(pid))
+ s, err := h.Network().NewStream(network.WithNoDial(ctx, "already dialed"), p)
+ if err != nil {
+ // TODO: It would be nicer to get the actual error from the swarm,
+ // but this will require some more work.
+ if errors.Is(err, network.ErrNoConn) {
+ return nil, errors.New("connection failed")
+ }
+ return nil, fmt.Errorf("failed to open stream: %w", err)
}
+ defer func() {
+ if strErr != nil && s != nil {
+ s.ResetWithError(network.StreamProtocolNegotiationFailed)
+ }
+ }()
- s, err := h.Network().NewStream(ctx, p)
- if err != nil {
- return nil, err
+ // Wait for any in-progress identifies on the connection to finish. This
+ // is faster than negotiating.
+ //
+ // If the other side doesn't support identify, that's fine. This will
+ // just be a no-op.
+ select {
+ case <-h.ids.IdentifyWait(s.Conn()):
+ case <-ctx.Done():
+ return nil, fmt.Errorf("identify failed to complete: %w", ctx.Err())
}
- selected, err := msmux.SelectOneOf(protoStrs, s)
+ pref, err := h.preferredProtocol(p, pids)
if err != nil {
- s.Reset()
return nil, err
}
- selpid := protocol.ID(selected)
- s.SetProtocol(selpid)
- h.Peerstore().AddProtocols(p, selected)
- if h.bwc != nil {
- s = mstream.WrapStream(s, h.bwc)
+ if pref != "" {
+ if err := s.SetProtocol(pref); err != nil {
+ return nil, err
+ }
+ lzcon := msmux.NewMSSelect(s, pref)
+ return &streamWrapper{
+ Stream: s,
+ rw: lzcon,
+ }, nil
}
- return s, nil
-}
+ // Negotiate the protocol in the background, obeying the context.
+ var selected protocol.ID
+ errCh := make(chan error, 1)
+ go func() {
+ selected, err = msmux.SelectOneOf(pids, s)
+ errCh <- err
+ }()
+ select {
+ case err = <-errCh:
+ if err != nil {
+ return nil, fmt.Errorf("failed to negotiate protocol: %w", err)
+ }
+ case <-ctx.Done():
+ s.ResetWithError(network.StreamProtocolNegotiationFailed)
+ // wait for `SelectOneOf` to error out because of resetting the stream.
+ <-errCh
+ return nil, fmt.Errorf("failed to negotiate protocol: %w", ctx.Err())
+ }
-func pidsToStrings(pids []protocol.ID) []string {
- out := make([]string, len(pids))
- for i, p := range pids {
- out[i] = string(p)
+ if err := s.SetProtocol(selected); err != nil {
+ s.ResetWithError(network.StreamResourceLimitExceeded)
+ return nil, err
}
- return out
+ _ = h.Peerstore().AddProtocols(p, selected) // adding the protocol to the peerstore isn't critical
+ return s, nil
}
func (h *BasicHost) preferredProtocol(p peer.ID, pids []protocol.ID) (protocol.ID, error) {
- pidstrs := pidsToStrings(pids)
- supported, err := h.Peerstore().SupportsProtocols(p, pidstrs...)
+ supported, err := h.Peerstore().SupportsProtocols(p, pids...)
if err != nil {
return "", err
}
var out protocol.ID
if len(supported) > 0 {
- out = protocol.ID(supported[0])
+ out = supported[0]
}
return out, nil
}
-func (h *BasicHost) newStream(ctx context.Context, p peer.ID, pid protocol.ID) (inet.Stream, error) {
- s, err := h.Network().NewStream(ctx, p)
- if err != nil {
- return nil, err
- }
-
- s.SetProtocol(pid)
-
- if h.bwc != nil {
- s = mstream.WrapStream(s, h.bwc)
- }
-
- lzcon := msmux.NewMSSelect(s, string(pid))
- return &streamWrapper{
- Stream: s,
- rw: lzcon,
- }, nil
-}
-
// Connect ensures there is a connection between this host and the peer with
// given peer.ID. If there is not an active connection, Connect will issue a
// h.Network.Dial, and block until a connection is open, or an error is returned.
// Connect will absorb the addresses in pi into its internal peerstore.
// It will also resolve any /dns4, /dns6, and /dnsaddr addresses.
-func (h *BasicHost) Connect(ctx context.Context, pi pstore.PeerInfo) error {
+func (h *BasicHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
// absorb addresses into peerstore
- h.Peerstore().AddAddrs(pi.ID, pi.Addrs, pstore.TempAddrTTL)
-
- cs := h.Network().ConnsToPeer(pi.ID)
- if len(cs) > 0 {
- return nil
- }
-
- resolved, err := h.resolveAddrs(ctx, h.Peerstore().PeerInfo(pi.ID))
- if err != nil {
- return err
- }
- h.Peerstore().AddAddrs(pi.ID, resolved, pstore.TempAddrTTL)
-
- return h.dialPeer(ctx, pi.ID)
-}
-
-func (h *BasicHost) resolveAddrs(ctx context.Context, pi pstore.PeerInfo) ([]ma.Multiaddr, error) {
- proto := ma.ProtocolWithCode(ma.P_IPFS).Name
- p2paddr, err := ma.NewMultiaddr("/" + proto + "/" + pi.ID.Pretty())
- if err != nil {
- return nil, err
- }
-
- var addrs []ma.Multiaddr
- for _, addr := range pi.Addrs {
- addrs = append(addrs, addr)
- if !madns.Matches(addr) {
- continue
- }
-
- reqaddr := addr.Encapsulate(p2paddr)
- resaddrs, err := h.maResolver.Resolve(ctx, reqaddr)
- if err != nil {
- log.Infof("error resolving %s: %s", reqaddr, err)
- }
- for _, res := range resaddrs {
- pi, err := pstore.InfoFromP2pAddr(res)
- if err != nil {
- log.Infof("error parsing %s: %s", res, err)
- }
- addrs = append(addrs, pi.Addrs...)
+ h.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL)
+
+ forceDirect, _ := network.GetForceDirectDial(ctx)
+ canUseLimitedConn, _ := network.GetAllowLimitedConn(ctx)
+ if !forceDirect {
+ connectedness := h.Network().Connectedness(pi.ID)
+ if connectedness == network.Connected || (canUseLimitedConn && connectedness == network.Limited) {
+ return nil
}
}
- return addrs, nil
+ return h.dialPeer(ctx, pi.ID)
}
// dialPeer opens a connection to peer, and makes sure to identify
// the connection once it has been opened.
func (h *BasicHost) dialPeer(ctx context.Context, p peer.ID) error {
- log.Debugf("host %s dialing %s", h.ID, p)
+ log.Debug("host dialing peer", "source_peer", h.ID(), "destination_peer", p)
c, err := h.Network().DialPeer(ctx, p)
if err != nil {
- return err
+ return fmt.Errorf("failed to dial: %w", err)
}
- // Clear protocols on connecting to new peer to avoid issues caused
- // by misremembering protocols between reconnects
- h.Peerstore().SetProtocols(p)
-
- // identify the connection before returning.
- done := make(chan struct{})
- go func() {
- h.ids.IdentifyConn(c)
- close(done)
- }()
-
- // respect don contexteone
+ // TODO: Consider removing this? On one hand, it's nice because we can
+ // assume that things like the agent version are usually set when this
+ // returns. On the other hand, we don't _really_ need to wait for this.
+ //
+ // This is mostly here to preserve existing behavior.
select {
- case <-done:
+ case <-h.ids.IdentifyWait(c):
case <-ctx.Done():
- return ctx.Err()
+ return fmt.Errorf("identify failed to complete: %w", ctx.Err())
}
- log.Debugf("host %s finished dialing %s", h.ID(), p)
+ log.Debug("host finished dialing peer", "source_peer", h.ID(), "destination_peer", p)
return nil
}
-func (h *BasicHost) ConnManager() ifconnmgr.ConnManager {
+func (h *BasicHost) ConnManager() connmgr.ConnManager {
return h.cmgr
}
-// Addrs returns listening addresses that are safe to announce to the network.
-// The output is the same as AllAddrs, but processed by AddrsFactory.
+// Addrs returns listening addresses.
+// When used with AutoRelay, and if the host is not publicly reachable,
+// this will not have the host's direct public addresses, it'll only have
+// the relay addresses and private addresses.
func (h *BasicHost) Addrs() []ma.Multiaddr {
- return h.addrs(h.AllAddrs())
+ return h.addressManager.Addrs()
}
-// AllAddrs returns all the addresses of BasicHost at this moment in time.
-// It's ok to not include addresses if they're not available to be used now.
+// AllAddrs returns all the addresses the host is listening on except circuit addresses.
func (h *BasicHost) AllAddrs() []ma.Multiaddr {
- addrs, err := h.Network().InterfaceListenAddresses()
- if err != nil {
- log.Debug("error retrieving network interface addrs")
- }
+ return h.addressManager.DirectAddrs()
+}
- if h.ids != nil { // add external observed addresses
- addrs = append(addrs, h.ids.OwnObservedAddrs()...)
+// ConfirmedAddrs returns all addresses of the host grouped by their reachability
+// as verified by autonatv2.
+//
+// Experimental: This API may change in the future without deprecation.
+//
+// Requires AutoNATv2 to be enabled.
+func (h *BasicHost) ConfirmedAddrs() (reachable []ma.Multiaddr, unreachable []ma.Multiaddr, unknown []ma.Multiaddr) {
+ return h.addressManager.ConfirmedAddrs()
+}
+
+func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr {
+ totalSize := 0
+ for _, a := range addrs {
+ totalSize += len(a.Bytes())
+ }
+ if totalSize <= maxSize {
+ return addrs
}
- if h.natmgr != nil { // natmgr is nil if we do not use nat option.
- nat := h.natmgr.NAT()
- if nat != nil { // nat is nil if not ready, or no nat is available.
- addrs = append(addrs, nat.ExternalAddrs()...)
+ score := func(addr ma.Multiaddr) int {
+ var res int
+ if manet.IsPublicAddr(addr) {
+ res |= 1 << 12
+ } else if !manet.IsIPLoopback(addr) {
+ res |= 1 << 11
}
+ var protocolWeight int
+ ma.ForEach(addr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_QUIC_V1:
+ protocolWeight = 5
+ case ma.P_TCP:
+ protocolWeight = 4
+ case ma.P_WSS:
+ protocolWeight = 3
+ case ma.P_WEBTRANSPORT:
+ protocolWeight = 2
+ case ma.P_WEBRTC_DIRECT:
+ protocolWeight = 1
+ case ma.P_P2P:
+ return false
+ }
+ return true
+ })
+ res |= 1 << protocolWeight
+ return res
}
+ slices.SortStableFunc(addrs, func(a, b ma.Multiaddr) int {
+ return score(b) - score(a) // b-a for reverse order
+ })
+ totalSize = 0
+ for i, a := range addrs {
+ totalSize += len(a.Bytes())
+ if totalSize > maxSize {
+ addrs = addrs[:i]
+ break
+ }
+ }
return addrs
}
+// SetAutoNat sets the autonat service for the host.
+func (h *BasicHost) SetAutoNat(a autonat.AutoNAT) {
+ h.autoNATMx.Lock()
+ defer h.autoNATMx.Unlock()
+ if h.autoNat == nil {
+ h.autoNat = a
+ }
+}
+
+// GetAutoNat returns the host's AutoNAT service, if AutoNAT is enabled.
+//
+// Deprecated: Use `BasicHost.Reachability` to get the host's reachability.
+func (h *BasicHost) GetAutoNat() autonat.AutoNAT {
+ h.autoNATMx.Lock()
+ defer h.autoNATMx.Unlock()
+ return h.autoNat
+}
+
+// Reachability returns the host's reachability status.
+func (h *BasicHost) Reachability() network.Reachability {
+ return *h.addressManager.hostReachability.Load()
+}
+
// Close shuts down the Host's services (network, etc).
func (h *BasicHost) Close() error {
- return h.proc.Close()
-}
+ h.closeSync.Do(func() {
+ h.ctxCancel()
+ if h.cmgr != nil {
+ h.cmgr.Close()
+ }
+
+ if h.ids != nil {
+ h.ids.Close()
+ }
+ if h.autoNat != nil {
+ h.autoNat.Close()
+ }
+ if h.relayManager != nil {
+ h.relayManager.Close()
+ }
+ if h.hps != nil {
+ h.hps.Close()
+ }
+ if h.autonatv2 != nil {
+ h.autonatv2.Close()
+ }
+
+ _ = h.emitters.evtLocalProtocolsUpdated.Close()
+ _ = h.emitters.evtLocalAddrsUpdated.Close()
-// GetBandwidthReporter exposes the Host's bandiwth metrics reporter
-func (h *BasicHost) GetBandwidthReporter() metrics.Reporter {
- return h.bwc
+ if err := h.network.Close(); err != nil {
+ log.Error("swarm close failed", "err", err)
+ }
+
+ h.addressManager.Close()
+ h.psManager.Close()
+ if h.Peerstore() != nil {
+ h.Peerstore().Close()
+ }
+
+ h.refCount.Wait()
+
+ if h.Network().ResourceManager() != nil {
+ h.Network().ResourceManager().Close()
+ }
+ })
+
+ return nil
}
type streamWrapper struct {
- inet.Stream
- rw io.ReadWriter
+ network.Stream
+ rw io.ReadWriteCloser
}
func (s *streamWrapper) Read(b []byte) (int, error) {
@@ -553,3 +889,19 @@ func (s *streamWrapper) Read(b []byte) (int, error) {
func (s *streamWrapper) Write(b []byte) (int, error) {
return s.rw.Write(b)
}
+
+func (s *streamWrapper) Close() error {
+ return s.rw.Close()
+}
+
+func (s *streamWrapper) CloseWrite() error {
+ // Flush the handshake before closing, but ignore the error. The other
+ // end may have closed their side for reading.
+ //
+ // If something is wrong with the stream, the user will get on error on
+ // read instead.
+ if flusher, ok := s.rw.(interface{ Flush() error }); ok {
+ _ = flusher.Flush()
+ }
+ return s.Stream.CloseWrite()
+}
diff --git a/p2p/host/basic/basic_host_test.go b/p2p/host/basic/basic_host_test.go
index 7cf8136e62..b8c69a0b28 100644
--- a/p2p/host/basic/basic_host_test.go
+++ b/p2p/host/basic/basic_host_test.go
@@ -1,108 +1,318 @@
package basichost
import (
- "bytes"
"context"
+ "encoding/binary"
+ "fmt"
"io"
- "sort"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
"testing"
"time"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- testutil "github.com/libp2p/go-libp2p-netutil"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- protocol "github.com/libp2p/go-libp2p-protocol"
+ "github.com/libp2p/go-libp2p-testing/race"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/p2p/host/autonat"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+
ma "github.com/multiformats/go-multiaddr"
- madns "github.com/multiformats/go-multiaddr-dns"
+ "github.com/multiformats/go-multiaddr/matest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-func TestHostSimple(t *testing.T) {
+func TestHostDoubleClose(t *testing.T) {
+ h1, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h1.Close()
+ h1.Close()
+}
+func TestHostSimple(t *testing.T) {
ctx := context.Background()
- h1 := New(testutil.GenSwarmNetwork(t, ctx))
- h2 := New(testutil.GenSwarmNetwork(t, ctx))
+ h1, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
defer h1.Close()
+ h1.Start()
+ h2, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+
defer h2.Close()
+ h2.Start()
h2pi := h2.Peerstore().PeerInfo(h2.ID())
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, h1.Connect(ctx, h2pi))
piper, pipew := io.Pipe()
- h2.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
+ h2.SetStreamHandler(protocol.TestingID, func(s network.Stream) {
defer s.Close()
w := io.MultiWriter(s, pipew)
io.Copy(w, s) // mirror everything
})
s, err := h1.NewStream(ctx, h2pi.ID, protocol.TestingID)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
// write to the stream
buf1 := []byte("abcdefghijkl")
- if _, err := s.Write(buf1); err != nil {
- t.Fatal(err)
- }
+ _, err = s.Write(buf1)
+ require.NoError(t, err)
// get it from the stream (echoed)
buf2 := make([]byte, len(buf1))
- if _, err := io.ReadFull(s, buf2); err != nil {
- t.Fatal(err)
- }
- if !bytes.Equal(buf1, buf2) {
- t.Fatalf("buf1 != buf2 -- %x != %x", buf1, buf2)
- }
+ _, err = io.ReadFull(s, buf2)
+ require.NoError(t, err)
+ require.Equal(t, buf1, buf2)
// get it from the pipe (tee)
buf3 := make([]byte, len(buf1))
- if _, err := io.ReadFull(piper, buf3); err != nil {
- t.Fatal(err)
+ _, err = io.ReadFull(piper, buf3)
+ require.NoError(t, err)
+ require.Equal(t, buf1, buf3)
+}
+
+func TestMultipleClose(t *testing.T) {
+ h, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+
+ require.NoError(t, h.Close())
+ require.NoError(t, h.Close())
+ h2, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h2.Close()
+ require.Error(t, h.Connect(context.Background(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}))
+ h.Network().Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.PermanentAddrTTL)
+ _, err = h.NewStream(context.Background(), h2.ID())
+ require.Error(t, err)
+ require.Empty(t, h.Addrs())
+ require.Empty(t, h.AllAddrs())
+}
+
+func TestSignedPeerRecordWithNoListenAddrs(t *testing.T) {
+ h, err := NewHost(swarmt.GenSwarm(t, swarmt.OptDialOnly), nil)
+ require.NoError(t, err)
+ defer h.Close()
+ h.Start()
+
+ require.Empty(t, h.Addrs(), "expected no listen addrs")
+ // now add a listen addr
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0")))
+ require.NotEmpty(t, h.Addrs(), "expected at least 1 listen addr")
+
+ cab, ok := peerstore.GetCertifiedAddrBook(h.Peerstore())
+ if !ok {
+ t.Fatalf("peerstore doesn't support certified addrs")
+ }
+ // the signed record with the new addr is added async
+ var env *record.Envelope
+ require.Eventually(t, func() bool {
+ env = cab.GetPeerRecord(h.ID())
+ return env != nil
+ }, 500*time.Millisecond, 10*time.Millisecond)
+ rec, err := env.Record()
+ require.NoError(t, err)
+ require.NotEmpty(t, rec.(*peer.PeerRecord).Addrs)
+}
+
+func TestProtocolHandlerEvents(t *testing.T) {
+ h, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h.Close()
+
+ sub, err := h.EventBus().Subscribe(&event.EvtLocalProtocolsUpdated{}, eventbus.BufSize(16))
+ require.NoError(t, err)
+ defer sub.Close()
+
+ // the identify service adds new protocol handlers shortly after the host
+ // starts. this helps us filter those events out, since they're unrelated
+ // to the test.
+ isIdentify := func(evt event.EvtLocalProtocolsUpdated) bool {
+ for _, p := range evt.Added {
+ if p == identify.ID || p == identify.IDPush {
+ return true
+ }
+ }
+ return false
+ }
+
+ nextEvent := func() event.EvtLocalProtocolsUpdated {
+ for {
+ select {
+ case evt := <-sub.Out():
+ next := evt.(event.EvtLocalProtocolsUpdated)
+ if isIdentify(next) {
+ continue
+ }
+ return next
+ case <-time.After(5 * time.Second):
+ t.Fatal("event not received in 5 seconds")
+ }
+ }
}
- if !bytes.Equal(buf1, buf3) {
- t.Fatalf("buf1 != buf3 -- %x != %x", buf1, buf3)
+
+ assert := func(added, removed []protocol.ID) {
+ next := nextEvent()
+ if !reflect.DeepEqual(added, next.Added) {
+ t.Errorf("expected added: %v; received: %v", added, next.Added)
+ }
+ if !reflect.DeepEqual(removed, next.Removed) {
+ t.Errorf("expected removed: %v; received: %v", removed, next.Removed)
+ }
}
+
+ h.SetStreamHandler(protocol.TestingID, func(_ network.Stream) {})
+ assert([]protocol.ID{protocol.TestingID}, nil)
+ h.SetStreamHandler("foo", func(_ network.Stream) {})
+ assert([]protocol.ID{"foo"}, nil)
+ h.RemoveStreamHandler(protocol.TestingID)
+ assert(nil, []protocol.ID{protocol.TestingID})
}
func TestHostAddrsFactory(t *testing.T) {
maddr := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
- addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ addrsFactory := func(_ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{maddr}
}
- ctx := context.Background()
- h := New(testutil.GenSwarmNetwork(t, ctx), AddrsFactory(addrsFactory))
+ h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{AddrsFactory: addrsFactory})
+ require.NoError(t, err)
defer h.Close()
addrs := h.Addrs()
if len(addrs) != 1 {
- t.Fatalf("expected 1 addr, got %d", len(addrs))
+ t.Fatalf("expected 1 addr, got %+v", addrs)
}
- if addrs[0] != maddr {
+ if !addrs[0].Equal(maddr) {
t.Fatalf("expected %s, got %s", maddr.String(), addrs[0].String())
}
+
+ autoNat, err := autonat.New(h, autonat.WithReachability(network.ReachabilityPublic))
+ if err != nil {
+ t.Fatalf("should be able to attach autonat: %v", err)
+ }
+ h.SetAutoNat(autoNat)
+ addrs = h.Addrs()
+ if len(addrs) != 1 {
+ t.Fatalf("didn't expect change in returned addresses.")
+ }
}
-func getHostPair(ctx context.Context, t *testing.T) (host.Host, host.Host) {
- h1 := New(testutil.GenSwarmNetwork(t, ctx))
- h2 := New(testutil.GenSwarmNetwork(t, ctx))
+func TestAllAddrs(t *testing.T) {
+ // no listen addrs
+ h, err := NewHost(swarmt.GenSwarm(t, swarmt.OptDialOnly), nil)
+ require.NoError(t, err)
+ h.Start()
+ defer h.Close()
+ require.Nil(t, h.AllAddrs())
+
+ // listen on loopback
+ laddr := ma.StringCast("/ip4/127.0.0.1/tcp/0")
+ require.NoError(t, h.Network().Listen(laddr))
+ require.Len(t, h.AllAddrs(), 1)
+ firstAddr := h.AllAddrs()[0]
+ require.Equal(t, "/ip4/127.0.0.1", ma.Split(firstAddr)[0].String())
+
+ // listen on IPv4 0.0.0.0
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/0.0.0.0/tcp/0")))
+ // should contain localhost and private local addr along with previous listen address
+ require.Len(t, h.AllAddrs(), 3)
+ // Should still contain the original addr.
+ require.True(t, ma.Contains(h.AllAddrs(), firstAddr), "should still contain the original addr")
+}
- h2pi := h2.Peerstore().PeerInfo(h2.ID())
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal(err)
- }
+func TestAllAddrsUnique(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("updates addrChangeTickrInterval which might be racy")
+ }
+ oldInterval := addrChangeTickrInterval
+ addrChangeTickrInterval = 100 * time.Millisecond
+ defer func() {
+ addrChangeTickrInterval = oldInterval
+ }()
+ sendNewAddrs := make(chan struct{})
+ opts := HostOpts{
+ AddrsFactory: func(_ []ma.Multiaddr) []ma.Multiaddr {
+ select {
+ case <-sendNewAddrs:
+ return []ma.Multiaddr{
+ ma.StringCast("/ip4/1.2.3.4/tcp/1"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/1"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"),
+ }
+ default:
+ return nil
+ }
+ },
+ }
+ // no listen addrs
+ h, err := NewHost(swarmt.GenSwarm(t, swarmt.OptDialOnly), &opts)
+ require.NoError(t, err)
+ defer h.Close()
+ h.Start()
+
+ sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{})
+ require.NoError(t, err)
+ out := make(chan int)
+ done := make(chan struct{})
+ go func() {
+ cnt := 0
+ for {
+ select {
+ case <-sub.Out():
+ cnt++
+ case <-done:
+ out <- cnt
+ return
+ }
+ }
+ }()
+ close(sendNewAddrs)
+ require.Len(t, h.Addrs(), 2)
+ matest.AssertEqualMultiaddrs(t, []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/1"), ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")}, h.Addrs())
+ time.Sleep(2*addrChangeTickrInterval + 1*time.Second) // the background loop runs every 5 seconds. Wait for 2x that time.
+ close(done)
+ cnt := <-out
+ require.Equal(t, 1, cnt)
+}
+// getHostPair gets a new pair of hosts.
+// The first host initiates the connection to the second host.
+func getHostPair(t *testing.T) (host.Host, host.Host) {
+ t.Helper()
+
+ h1, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h1.Start()
+ h2, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h2.Start()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
+ h2pi := h2.Peerstore().PeerInfo(h2.ID())
+ require.NoError(t, h1.Connect(ctx, h2pi))
return h1, h2
}
func assertWait(t *testing.T, c chan protocol.ID, exp protocol.ID) {
+ t.Helper()
select {
case proto := <-c:
if proto != exp {
- t.Fatal("should have connected on ", exp)
+ t.Fatalf("should have connected on %s, got %s", exp, proto)
}
case <-time.After(time.Second * 5):
t.Fatal("timeout waiting for stream")
@@ -110,61 +320,55 @@ func assertWait(t *testing.T, c chan protocol.ID, exp protocol.ID) {
}
func TestHostProtoPreference(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- h1, h2 := getHostPair(ctx, t)
+ h1, h2 := getHostPair(t)
defer h1.Close()
defer h2.Close()
- protoOld := protocol.ID("/testing")
- protoNew := protocol.ID("/testing/1.1.0")
- protoMinor := protocol.ID("/testing/1.2.0")
+ const (
+ protoOld = "/testing"
+ protoNew = "/testing/1.1.0"
+ protoMinor = "/testing/1.2.0"
+ )
connectedOn := make(chan protocol.ID)
-
- handler := func(s inet.Stream) {
+ handler := func(s network.Stream) {
connectedOn <- s.Protocol()
s.Close()
}
- h1.SetStreamHandler(protoOld, handler)
+ // Prevent pushing identify information so this test works.
+ h1.RemoveStreamHandler(identify.IDPush)
- s, err := h2.NewStream(ctx, h1.ID(), protoMinor, protoNew, protoOld)
- if err != nil {
- t.Fatal(err)
- }
+ h2.SetStreamHandler(protoOld, handler)
- assertWait(t, connectedOn, protoOld)
- s.Close()
+ s, err := h1.NewStream(context.Background(), h2.ID(), protoMinor, protoNew, protoOld)
+ require.NoError(t, err)
- mfunc, err := host.MultistreamSemverMatcher(protoMinor)
- if err != nil {
- t.Fatal(err)
- }
+ // force the lazy negotiation to complete
+ _, err = s.Write(nil)
+ require.NoError(t, err)
- h1.SetStreamHandlerMatch(protoMinor, mfunc, handler)
+ assertWait(t, connectedOn, protoOld)
+ s.Close()
+ h2.SetStreamHandlerMatch(protoMinor, func(protocol.ID) bool { return true }, handler)
// remembered preference will be chosen first, even when the other side newly supports it
- s2, err := h2.NewStream(ctx, h1.ID(), protoMinor, protoNew, protoOld)
- if err != nil {
- t.Fatal(err)
- }
+ s2, err := h1.NewStream(context.Background(), h2.ID(), protoMinor, protoNew, protoOld)
+ require.NoError(t, err)
// required to force 'lazy' handshake
_, err = s2.Write([]byte("hello"))
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
assertWait(t, connectedOn, protoOld)
-
s2.Close()
- s3, err := h2.NewStream(ctx, h1.ID(), protoMinor)
- if err != nil {
- t.Fatal(err)
- }
+ s3, err := h1.NewStream(context.Background(), h2.ID(), protoMinor)
+ require.NoError(t, err)
+
+ // Force a lazy handshake as we may have received a protocol update by this point.
+ _, err = s3.Write([]byte("hello"))
+ require.NoError(t, err)
assertWait(t, connectedOn, protoMinor)
s3.Close()
@@ -174,11 +378,11 @@ func TestHostProtoMismatch(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- h1, h2 := getHostPair(ctx, t)
+ h1, h2 := getHostPair(t)
defer h1.Close()
defer h2.Close()
- h1.SetStreamHandler("/super", func(s inet.Stream) {
+ h1.SetStreamHandler("/super", func(s network.Stream) {
t.Error("shouldnt get here")
s.Reset()
})
@@ -190,26 +394,37 @@ func TestHostProtoMismatch(t *testing.T) {
}
func TestHostProtoPreknowledge(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ h1, err := NewHost(swarmt.GenSwarm(t, swarmt.OptDialOnly), nil)
+ require.NoError(t, err)
+ defer h1.Close()
- h1 := New(testutil.GenSwarmNetwork(t, ctx))
- h2 := New(testutil.GenSwarmNetwork(t, ctx))
+ h2, err := NewHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP), nil)
+ require.NoError(t, err)
+ defer h2.Close()
conn := make(chan protocol.ID)
- handler := func(s inet.Stream) {
+ handler := func(s network.Stream) {
conn <- s.Protocol()
s.Close()
}
- h1.SetStreamHandler("/super", handler)
+ h2.SetStreamHandler("/super", handler)
+
+ h1.Start()
+ h2.Start()
+
+ // Prevent pushing identify information so this test actually _uses_ the super protocol.
+ h1.RemoveStreamHandler(identify.IDPush)
h2pi := h2.Peerstore().PeerInfo(h2.ID())
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal(err)
- }
- defer h1.Close()
- defer h2.Close()
+ // Filter to only 1 address so that we don't have to think about parallel
+ // connections in this test
+ h2pi.Addrs = h2pi.Addrs[:1]
+ require.NoError(t, h1.Connect(context.Background(), h2pi))
+
+ // This test implicitly relies on 1 connection. If a background identify
+ // completes after we set the stream handler below things break
+ require.Len(t, h1.Network().ConnsToPeer(h2.ID()), 1)
// wait for identify handshake to finish completely
select {
@@ -224,24 +439,30 @@ func TestHostProtoPreknowledge(t *testing.T) {
t.Fatal("timed out waiting for identify")
}
- h1.SetStreamHandler("/foo", handler)
+ h2.SetStreamHandler("/foo", handler)
- s, err := h2.NewStream(ctx, h1.ID(), "/foo", "/bar", "/super")
- if err != nil {
- t.Fatal(err)
- }
+ require.Never(t, func() bool {
+ protos, err := h1.Peerstore().GetProtocols(h2.ID())
+ require.NoError(t, err)
+ for _, p := range protos {
+ if p == "/foo" {
+ return true
+ }
+ }
+ return false
+ }, time.Second, 100*time.Millisecond)
+
+ s, err := h1.NewStream(context.Background(), h2.ID(), "/foo", "/bar", "/super")
+ require.NoError(t, err)
select {
case p := <-conn:
- t.Fatal("shouldnt have gotten connection yet, we should have a lazy stream: ", p)
+ t.Fatal("shouldn't have gotten connection yet, we should have a lazy stream: ", p)
case <-time.After(time.Millisecond * 50):
}
_, err = s.Read(nil)
- if err != nil {
- t.Fatal(err)
- }
-
+ require.NoError(t, err)
assertWait(t, conn, "/super")
s.Close()
@@ -251,137 +472,504 @@ func TestNewDialOld(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- h1, h2 := getHostPair(ctx, t)
+ h1, h2 := getHostPair(t)
defer h1.Close()
defer h2.Close()
connectedOn := make(chan protocol.ID)
- h1.SetStreamHandler("/testing", func(s inet.Stream) {
+ h2.SetStreamHandler("/testing", func(s network.Stream) {
connectedOn <- s.Protocol()
s.Close()
})
- s, err := h2.NewStream(ctx, h1.ID(), "/testing/1.0.0", "/testing")
- if err != nil {
- t.Fatal(err)
- }
+ s, err := h1.NewStream(ctx, h2.ID(), "/testing/1.0.0", "/testing")
+ require.NoError(t, err)
+ // force the lazy negotiation to complete
+ _, err = s.Write(nil)
+ require.NoError(t, err)
assertWait(t, connectedOn, "/testing")
- if s.Protocol() != "/testing" {
- t.Fatal("shoould have gotten /testing")
+ require.Equal(t, s.Protocol(), protocol.ID("/testing"), "should have gotten /testing")
+}
+
+func TestNewStreamResolve(t *testing.T) {
+ h1, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h1.Start()
+ h2, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h2.Start()
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ // Get the tcp port that h2 is listening on.
+ h2pi := h2.Peerstore().PeerInfo(h2.ID())
+ var dialAddr string
+ const tcpPrefix = "/ip4/127.0.0.1/tcp/"
+ for _, addr := range h2pi.Addrs {
+ addrStr := addr.String()
+ if strings.HasPrefix(addrStr, tcpPrefix) {
+ port := addrStr[len(tcpPrefix):]
+ dialAddr = "/dns4/localhost/tcp/" + port
+ break
+ }
}
+ assert.NotEqual(t, "", dialAddr)
- s.Close()
+ // Add the DNS multiaddr to h1's peerstore.
+ maddr, err := ma.NewMultiaddr(dialAddr)
+ require.NoError(t, err)
+ h1.Peerstore().AddAddr(h2.ID(), maddr, time.Second)
+
+ connectedOn := make(chan protocol.ID)
+ h2.SetStreamHandler("/testing", func(s network.Stream) {
+ connectedOn <- s.Protocol()
+ s.Close()
+ })
+
+ // NewStream will make a new connection using the DNS address in h1's
+ // peerstore.
+ s, err := h1.NewStream(ctx, h2.ID(), "/testing/1.0.0", "/testing")
+ require.NoError(t, err)
+
+ // force the lazy negotiation to complete
+ _, err = s.Write(nil)
+ require.NoError(t, err)
+ assertWait(t, connectedOn, "/testing")
}
func TestProtoDowngrade(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- h1, h2 := getHostPair(ctx, t)
+ h1, h2 := getHostPair(t)
defer h1.Close()
defer h2.Close()
connectedOn := make(chan protocol.ID)
- h1.SetStreamHandler("/testing/1.0.0", func(s inet.Stream) {
+ h2.SetStreamHandler("/testing/1.0.0", func(s network.Stream) {
+ defer s.Close()
+ result, err := io.ReadAll(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "bar", string(result))
connectedOn <- s.Protocol()
- s.Close()
})
- s, err := h2.NewStream(ctx, h1.ID(), "/testing/1.0.0", "/testing")
- if err != nil {
- t.Fatal(err)
- }
+ s, err := h1.NewStream(ctx, h2.ID(), "/testing/1.0.0", "/testing")
+ require.NoError(t, err)
+ require.Equal(t, s.Protocol(), protocol.ID("/testing/1.0.0"), "should have gotten /testing/1.0.0, got %s", s.Protocol())
- assertWait(t, connectedOn, "/testing/1.0.0")
-
- if s.Protocol() != "/testing/1.0.0" {
- t.Fatal("shoould have gotten /testing")
- }
- s.Close()
+ _, err = s.Write([]byte("bar"))
+ require.NoError(t, err)
+ require.NoError(t, s.CloseWrite())
- h1.Network().ConnsToPeer(h2.ID())[0].Close()
+ assertWait(t, connectedOn, "/testing/1.0.0")
+ require.NoError(t, s.Close())
- time.Sleep(time.Millisecond * 50) // allow notifications to propogate
- h1.RemoveStreamHandler("/testing/1.0.0")
- h1.SetStreamHandler("/testing", func(s inet.Stream) {
+ h1.Network().ClosePeer(h2.ID())
+ h2.RemoveStreamHandler("/testing/1.0.0")
+ h2.SetStreamHandler("/testing", func(s network.Stream) {
+ defer s.Close()
+ result, err := io.ReadAll(s)
+ assert.NoError(t, err)
+ assert.Equal(t, "foo", string(result))
connectedOn <- s.Protocol()
- s.Close()
})
+ // Give us a second to update our protocol list. This happens async through the event bus.
+ // This is _almost_ instantaneous, but this test fails once every ~1k runs without this.
+ time.Sleep(time.Millisecond)
+
h2pi := h2.Peerstore().PeerInfo(h2.ID())
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, h1.Connect(ctx, h2pi))
- s2, err := h2.NewStream(ctx, h1.ID(), "/testing/1.0.0", "/testing")
- if err != nil {
- t.Fatal(err)
- }
+ s2, err := h1.NewStream(ctx, h2.ID(), "/testing/1.0.0", "/testing")
+ require.NoError(t, err)
+ require.Equal(t, s2.Protocol(), protocol.ID("/testing"), "should have gotten /testing, got %s, %s", s.Protocol(), s.Conn())
- _, err = s2.Write(nil)
+ _, err = s2.Write([]byte("foo"))
+ require.NoError(t, err)
+ require.NoError(t, s2.CloseWrite())
+
+ assertWait(t, connectedOn, "/testing")
+}
+
+func TestAddrChangeImmediatelyIfAddressNonEmpty(t *testing.T) {
+ ctx := context.Background()
+ taddrs := []ma.Multiaddr{ma.StringCast("/ip4/1.2.3.4/tcp/1234")}
+
+ starting := make(chan struct{}, 1)
+ var count atomic.Int32
+ h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{AddrsFactory: func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ // The first call here is made from the constructor. Don't block.
+ if count.Add(1) == 1 {
+ return addrs
+ }
+ <-starting
+ return taddrs
+ }})
+ require.NoError(t, err)
+ defer h.Close()
+
+ sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{})
if err != nil {
- t.Fatal(err)
+ t.Error(err)
}
+ defer sub.Close()
+ close(starting)
+ h.Start()
- assertWait(t, connectedOn, "/testing")
+ expected := event.EvtLocalAddressesUpdated{
+ Diffs: true,
+ Current: []event.UpdatedAddress{
+ {Action: event.Added, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
+ },
+ Removed: []event.UpdatedAddress{}}
- if s2.Protocol() != "/testing" {
- t.Fatal("shoould have gotten /testing")
+ // assert we get expected event
+ evt := waitForAddrChangeEvent(ctx, sub, t)
+ if !updatedAddrEventsEqual(expected, evt) {
+ t.Errorf("change events not equal: \n\texpected: %v \n\tactual: %v", expected, evt)
}
- s2.Close()
+ // assert it's on the signed record
+ rc := peerRecordFromEnvelope(t, evt.SignedPeerRecord)
+ matest.AssertEqualMultiaddrs(t, taddrs, rc.Addrs)
+
+ // assert it's in the peerstore
+ ev := h.Peerstore().(peerstore.CertifiedAddrBook).GetPeerRecord(h.ID())
+ require.NotNil(t, ev)
+ rc = peerRecordFromEnvelope(t, ev)
+ matest.AssertEqualMultiaddrs(t, taddrs, rc.Addrs)
}
-func TestAddrResolution(t *testing.T) {
- ctx := context.Background()
+func TestStatefulAddrEvents(t *testing.T) {
+ h, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h.Start()
+ defer h.Close()
- p1, err := testutil.RandPeerID()
+ sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{}, eventbus.BufSize(10))
if err != nil {
t.Error(err)
}
- p2, err := testutil.RandPeerID()
- if err != nil {
- t.Error(err)
+ defer sub.Close()
+
+ select {
+ case v := <-sub.Out():
+ assert.NotNil(t, v)
+ case <-time.After(time.Second * 5):
+ t.Error("timed out waiting for event")
}
- addr1 := ma.StringCast("/dnsaddr/example.com")
- addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123")
- p2paddr1 := ma.StringCast("/dnsaddr/example.com/ipfs/" + p1.Pretty())
- p2paddr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123/ipfs/" + p1.Pretty())
- p2paddr3 := ma.StringCast("/ip4/192.0.2.1/tcp/123/ipfs/" + p2.Pretty())
+}
- backend := &madns.MockBackend{
- TXT: map[string][]string{"_dnsaddr.example.com": []string{
- "dnsaddr=" + p2paddr2.String(), "dnsaddr=" + p2paddr3.String(),
- }},
+func TestHostAddrChangeDetection(t *testing.T) {
+ // This test uses the address factory to provide several
+ // sets of listen addresses for the host. It advances through
+ // the sets by changing the currentAddrSet index var below.
+ addrSets := [][]ma.Multiaddr{
+ {},
+ {ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
+ {ma.StringCast("/ip4/1.2.3.4/tcp/1234"), ma.StringCast("/ip4/2.3.4.5/tcp/1234")},
+ {ma.StringCast("/ip4/2.3.4.5/tcp/1234"), ma.StringCast("/ip4/3.4.5.6/tcp/4321")},
+ }
+
+ // The events we expect the host to emit when SignalAddressChange is called
+ // and the changes between addr sets are detected
+ expectedEvents := []event.EvtLocalAddressesUpdated{
+ {
+ Diffs: true,
+ Current: []event.UpdatedAddress{
+ {Action: event.Added, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
+ },
+ Removed: []event.UpdatedAddress{},
+ },
+ {
+ Diffs: true,
+ Current: []event.UpdatedAddress{
+ {Action: event.Maintained, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
+ {Action: event.Added, Address: ma.StringCast("/ip4/2.3.4.5/tcp/1234")},
+ },
+ Removed: []event.UpdatedAddress{},
+ },
+ {
+ Diffs: true,
+ Current: []event.UpdatedAddress{
+ {Action: event.Added, Address: ma.StringCast("/ip4/3.4.5.6/tcp/4321")},
+ {Action: event.Maintained, Address: ma.StringCast("/ip4/2.3.4.5/tcp/1234")},
+ },
+ Removed: []event.UpdatedAddress{
+ {Action: event.Removed, Address: ma.StringCast("/ip4/1.2.3.4/tcp/1234")},
+ },
+ },
+ }
+
+ var lk sync.Mutex
+ currentAddrSet := 0
+ addrsFactory := func(_ []ma.Multiaddr) []ma.Multiaddr {
+ lk.Lock()
+ defer lk.Unlock()
+ return addrSets[currentAddrSet]
}
- resolver := &madns.Resolver{Backend: backend}
- h := New(testutil.GenSwarmNetwork(t, ctx), resolver)
+ ctx := context.Background()
+ h, err := NewHost(swarmt.GenSwarm(t), &HostOpts{AddrsFactory: addrsFactory})
+ require.NoError(t, err)
+ h.Start()
defer h.Close()
- pi, err := pstore.InfoFromP2pAddr(p2paddr1)
- if err != nil {
- t.Error(err)
+ sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{}, eventbus.BufSize(10))
+ require.NoError(t, err)
+ defer sub.Close()
+
+ // wait for the host background thread to start
+ time.Sleep(1 * time.Second)
+ // host should start with no addrs (addrSet 0)
+ addrs := h.Addrs()
+ if len(addrs) != 0 {
+ t.Fatalf("expected 0 addrs, got %d", len(addrs))
+ }
+
+ // change addr, signal and assert event
+ for i := 1; i < len(addrSets); i++ {
+ lk.Lock()
+ currentAddrSet = i
+ lk.Unlock()
+ h.addressManager.updateAddrsSync()
+ evt := waitForAddrChangeEvent(ctx, sub, t)
+ if !updatedAddrEventsEqual(expectedEvents[i-1], evt) {
+ t.Errorf("change events not equal: \n\texpected: %v \n\tactual: %v", expectedEvents[i-1], evt)
+ }
+
+ // assert it's on the signed record
+ rc := peerRecordFromEnvelope(t, evt.SignedPeerRecord)
+ matest.AssertMultiaddrsMatch(t, addrSets[i], rc.Addrs)
+
+ // assert it's in the peerstore
+ ev := h.Peerstore().(peerstore.CertifiedAddrBook).GetPeerRecord(h.ID())
+ require.NotNil(t, ev)
+ rc = peerRecordFromEnvelope(t, ev)
+ matest.AssertMultiaddrsMatch(t, addrSets[i], rc.Addrs)
}
+}
- tctx, cancel := context.WithTimeout(ctx, time.Millisecond*100)
+func TestNegotiationCancel(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- _ = h.Connect(tctx, *pi)
- addrs := h.Peerstore().Addrs(pi.ID)
- sort.Sort(sortedMultiaddrs(addrs))
+ h1, h2 := getHostPair(t)
+ defer h1.Close()
+ defer h2.Close()
+
+ // pre-negotiation so we can make the negotiation hang.
+ h2.Network().SetStreamHandler(func(s network.Stream) {
+ <-ctx.Done() // wait till the test is done.
+ s.Reset()
+ })
+
+ ctx2, cancel2 := context.WithCancel(ctx)
+ defer cancel2()
+
+ errCh := make(chan error, 1)
+ go func() {
+ s, err := h1.NewStream(ctx2, h2.ID(), "/testing")
+ if s != nil {
+ errCh <- fmt.Errorf("expected to fail negotiation")
+ return
+ }
+ errCh <- err
+ }()
+ select {
+ case err := <-errCh:
+ t.Fatal(err)
+ case <-time.After(10 * time.Millisecond):
+ // ok, hung.
+ }
+ cancel2()
+
+ select {
+ case err := <-errCh:
+ require.ErrorIs(t, err, context.Canceled)
+ case <-time.After(500 * time.Millisecond):
+ // failed to cancel
+ t.Fatal("expected negotiation to be canceled")
+ }
+}
- if len(addrs) != 2 || !addrs[0].Equal(addr1) || !addrs[1].Equal(addr2) {
- t.Fatalf("expected [%s %s], got %+v", addr1, addr2, addrs)
+func waitForAddrChangeEvent(ctx context.Context, sub event.Subscription, t *testing.T) event.EvtLocalAddressesUpdated {
+ t.Helper()
+ for {
+ select {
+ case evt, more := <-sub.Out():
+ if !more {
+ t.Fatal("channel should not be closed")
+ }
+ return evt.(event.EvtLocalAddressesUpdated)
+ case <-ctx.Done():
+ t.Fatal("context should not have cancelled")
+ case <-time.After(5 * time.Second):
+ t.Fatal("timed out waiting for address change event")
+ }
}
}
-type sortedMultiaddrs []ma.Multiaddr
+// updatedAddrsEqual is a helper to check whether two lists of
+// event.UpdatedAddress have the same contents, ignoring ordering.
+func updatedAddrsEqual(a, b []event.UpdatedAddress) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ // We can't use an UpdatedAddress directly as a map key, since
+ // Multiaddr is an interface, and go won't know how to compare
+ // for equality. So we convert to this little struct, which
+ // stores the multiaddr as a string.
+ type ua struct {
+ action event.AddrAction
+ addrStr string
+ }
+ aSet := make(map[ua]struct{})
+ for _, addr := range a {
+ k := ua{action: addr.Action, addrStr: string(addr.Address.Bytes())}
+ aSet[k] = struct{}{}
+ }
+ for _, addr := range b {
+ k := ua{action: addr.Action, addrStr: string(addr.Address.Bytes())}
+ _, ok := aSet[k]
+ if !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// updatedAddrEventsEqual is a helper to check whether two
+// event.EvtLocalAddressesUpdated are equal, ignoring the ordering of
+// addresses in the inner lists.
+func updatedAddrEventsEqual(a, b event.EvtLocalAddressesUpdated) bool {
+ return a.Diffs == b.Diffs &&
+ updatedAddrsEqual(a.Current, b.Current) &&
+ updatedAddrsEqual(a.Removed, b.Removed)
+}
+
+func peerRecordFromEnvelope(t *testing.T, ev *record.Envelope) *peer.PeerRecord {
+ t.Helper()
+ rec, err := ev.Record()
+ if err != nil {
+ t.Fatalf("error getting PeerRecord from event: %v", err)
+ return nil
+ }
+ peerRec, ok := rec.(*peer.PeerRecord)
+ if !ok {
+ t.Fatalf("wrong type for peer record")
+ return nil
+ }
+ return peerRec
+}
+
+func TestTrimHostAddrList(t *testing.T) {
+ type testCase struct {
+ name string
+ in []ma.Multiaddr
+ threshold int
+ out []ma.Multiaddr
+ }
+
+ tcpPublic := ma.StringCast("/ip4/1.1.1.1/tcp/1")
+ quicPublic := ma.StringCast("/ip4/1.1.1.1/udp/1/quic-v1")
+
+ tcpPrivate := ma.StringCast("/ip4/192.168.1.1/tcp/1")
+ quicPrivate := ma.StringCast("/ip4/192.168.1.1/udp/1/quic-v1")
+
+ tcpLocal := ma.StringCast("/ip4/127.0.0.1/tcp/1")
+ quicLocal := ma.StringCast("/ip4/127.0.0.1/udp/1/quic-v1")
+
+ testCases := []testCase{
+ {
+ name: "Public preferred over private",
+ in: []ma.Multiaddr{tcpPublic, quicPrivate},
+ threshold: len(tcpLocal.Bytes()),
+ out: []ma.Multiaddr{tcpPublic},
+ },
+ {
+ name: "Public and private preffered over local",
+ in: []ma.Multiaddr{tcpPublic, tcpPrivate, quicLocal},
+ threshold: len(tcpPublic.Bytes()) + len(tcpPrivate.Bytes()),
+ out: []ma.Multiaddr{tcpPublic, tcpPrivate},
+ },
+ {
+ name: "quic preferred over tcp",
+ in: []ma.Multiaddr{tcpPublic, quicPublic},
+ threshold: len(quicPublic.Bytes()),
+ out: []ma.Multiaddr{quicPublic},
+ },
+ {
+ name: "no filtering on large threshold",
+ in: []ma.Multiaddr{tcpPublic, quicPublic, quicLocal, tcpLocal, tcpPrivate},
+ threshold: 10000,
+ out: []ma.Multiaddr{tcpPublic, quicPublic, quicLocal, tcpLocal, tcpPrivate},
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ got := trimHostAddrList(tc.in, tc.threshold)
+ require.ElementsMatch(t, got, tc.out)
+ })
+ }
+}
+
+func TestHostTimeoutNewStream(t *testing.T) {
+ h1, err := NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h1.Start()
+ defer h1.Close()
+
+ const proto = "/testing"
+ h2 := swarmt.GenSwarm(t)
+
+ h2.SetStreamHandler(func(s network.Stream) {
+ // First message is multistream header. Just echo it
+ msHeader := []byte("\x19/multistream/1.0.0\n")
+ _, err := s.Read(msHeader)
+ assert.NoError(t, err)
+ _, err = s.Write(msHeader)
+ assert.NoError(t, err)
+
+ buf := make([]byte, 1024)
+ n, err := s.Read(buf)
+ assert.NoError(t, err)
+
+ msgLen, varintN := binary.Uvarint(buf[:n])
+ buf = buf[varintN:]
+ proto := buf[:int(msgLen)]
+ if string(proto) == "/ipfs/id/1.0.0\n" {
+ // Signal we don't support identify
+ na := []byte("na\n")
+ n := binary.PutUvarint(buf, uint64(len(na)))
+ copy(buf[n:], na)
+
+ _, err = s.Write(buf[:int(n)+len(na)])
+ assert.NoError(t, err)
+ } else {
+ // Stall
+ time.Sleep(5 * time.Second)
+ }
+ t.Log("Resetting")
+ s.Reset()
+ })
+
+ err = h1.Connect(context.Background(), peer.AddrInfo{
+ ID: h2.LocalPeer(),
+ Addrs: h2.ListenAddresses(),
+ })
+ require.NoError(t, err)
-func (sma sortedMultiaddrs) Len() int { return len(sma) }
-func (sma sortedMultiaddrs) Swap(i, j int) { sma[i], sma[j] = sma[j], sma[i] }
-func (sma sortedMultiaddrs) Less(i, j int) bool {
- return bytes.Compare(sma[i].Bytes(), sma[j].Bytes()) == 1
+ // No context passed in, fallback to negtimeout
+ h1.negtimeout = time.Second
+ _, err = h1.NewStream(context.Background(), h2.LocalPeer(), proto)
+ require.Error(t, err)
+ require.ErrorContains(t, err, "context deadline exceeded")
}
diff --git a/p2p/host/basic/internal/backoff/backoff.go b/p2p/host/basic/internal/backoff/backoff.go
new file mode 100644
index 0000000000..3d1fd23778
--- /dev/null
+++ b/p2p/host/basic/internal/backoff/backoff.go
@@ -0,0 +1,52 @@
+package backoff
+
+import (
+ "time"
+)
+
+var since = time.Since
+
+const defaultDelay = 100 * time.Millisecond
+const defaultMaxDelay = 1 * time.Minute
+
+type ExpBackoff struct {
+ Delay time.Duration
+ MaxDelay time.Duration
+
+ failures int
+ lastRun time.Time
+}
+
+func (b *ExpBackoff) init() {
+ if b.Delay == 0 {
+ b.Delay = defaultDelay
+ }
+ if b.MaxDelay == 0 {
+ b.MaxDelay = defaultMaxDelay
+ }
+}
+
+func (b *ExpBackoff) calcDelay() time.Duration {
+ delay := b.Delay * time.Duration(1<<(b.failures-1))
+ delay = min(delay, b.MaxDelay)
+ return delay
+}
+
+func (b *ExpBackoff) Run(f func() error) (err error, ran bool) {
+ b.init()
+
+ if b.failures != 0 {
+ if since(b.lastRun) < b.calcDelay() {
+ return nil, false
+ }
+ }
+
+ b.lastRun = time.Now()
+ err = f()
+ if err == nil {
+ b.failures = 0
+ } else {
+ b.failures++
+ }
+ return err, true
+}
diff --git a/p2p/host/basic/internal/backoff/backoff_test.go b/p2p/host/basic/internal/backoff/backoff_test.go
new file mode 100644
index 0000000000..6396b4e649
--- /dev/null
+++ b/p2p/host/basic/internal/backoff/backoff_test.go
@@ -0,0 +1,59 @@
+package backoff
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestBackoff(t *testing.T) {
+ origSince := since
+ defer func() { since = origSince }()
+
+ var timeSince time.Duration
+ since = func(time.Time) time.Duration {
+ return timeSince
+ }
+
+ var maybeErr error
+ b := &ExpBackoff{}
+ f := func() error { return maybeErr }
+
+ err, ran := b.Run(f)
+ require.True(t, ran)
+ require.NoError(t, err)
+
+ maybeErr = errors.New("some error")
+ err, ran = b.Run(f)
+ require.True(t, ran)
+ require.Error(t, err)
+
+ // Rerun again
+ _, ran = b.Run(f)
+ require.False(t, ran)
+
+ timeSince = 100*time.Millisecond + 1
+ err, ran = b.Run(f)
+ require.True(t, ran)
+ require.Error(t, err)
+
+ timeSince = 100*time.Millisecond + 1
+ _, ran = b.Run(f)
+ require.False(t, ran)
+
+ timeSince = 200*time.Millisecond + 1
+ err, ran = b.Run(f)
+ require.True(t, ran)
+ require.Error(t, err)
+
+ for timeSince < defaultMaxDelay*4 {
+ timeSince *= 2
+ err, ran = b.Run(f)
+ require.True(t, ran)
+ require.Error(t, err)
+ }
+
+ require.Equal(t, defaultMaxDelay, b.calcDelay())
+}
diff --git a/p2p/host/basic/mock_nat_test.go b/p2p/host/basic/mock_nat_test.go
new file mode 100644
index 0000000000..924e52c566
--- /dev/null
+++ b/p2p/host/basic/mock_nat_test.go
@@ -0,0 +1,99 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/p2p/host/basic (interfaces: NAT)
+//
+// Generated by this command:
+//
+// mockgen -build_flags=-tags=gomock -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT
+//
+
+// Package basichost is a generated GoMock package.
+package basichost
+
+import (
+ context "context"
+ netip "net/netip"
+ reflect "reflect"
+
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockNAT is a mock of NAT interface.
+type MockNAT struct {
+ ctrl *gomock.Controller
+ recorder *MockNATMockRecorder
+ isgomock struct{}
+}
+
+// MockNATMockRecorder is the mock recorder for MockNAT.
+type MockNATMockRecorder struct {
+ mock *MockNAT
+}
+
+// NewMockNAT creates a new mock instance.
+func NewMockNAT(ctrl *gomock.Controller) *MockNAT {
+ mock := &MockNAT{ctrl: ctrl}
+ mock.recorder = &MockNATMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockNAT) EXPECT() *MockNATMockRecorder {
+ return m.recorder
+}
+
+// AddMapping mocks base method.
+func (m *MockNAT) AddMapping(ctx context.Context, protocol string, port int) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddMapping", ctx, protocol, port)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddMapping indicates an expected call of AddMapping.
+func (mr *MockNATMockRecorder) AddMapping(ctx, protocol, port any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMapping", reflect.TypeOf((*MockNAT)(nil).AddMapping), ctx, protocol, port)
+}
+
+// Close mocks base method.
+func (m *MockNAT) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close.
+func (mr *MockNATMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockNAT)(nil).Close))
+}
+
+// GetMapping mocks base method.
+func (m *MockNAT) GetMapping(protocol string, port int) (netip.AddrPort, bool) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetMapping", protocol, port)
+ ret0, _ := ret[0].(netip.AddrPort)
+ ret1, _ := ret[1].(bool)
+ return ret0, ret1
+}
+
+// GetMapping indicates an expected call of GetMapping.
+func (mr *MockNATMockRecorder) GetMapping(protocol, port any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMapping", reflect.TypeOf((*MockNAT)(nil).GetMapping), protocol, port)
+}
+
+// RemoveMapping mocks base method.
+func (m *MockNAT) RemoveMapping(ctx context.Context, protocol string, port int) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "RemoveMapping", ctx, protocol, port)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// RemoveMapping indicates an expected call of RemoveMapping.
+func (mr *MockNATMockRecorder) RemoveMapping(ctx, protocol, port any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveMapping", reflect.TypeOf((*MockNAT)(nil).RemoveMapping), ctx, protocol, port)
+}
diff --git a/p2p/host/basic/mocks.go b/p2p/host/basic/mocks.go
new file mode 100644
index 0000000000..a29a0c5ef7
--- /dev/null
+++ b/p2p/host/basic/mocks.go
@@ -0,0 +1,6 @@
+//go:build gomock || generate
+
+package basichost
+
+//go:generate sh -c "go run go.uber.org/mock/mockgen -build_flags=\"-tags=gomock\" -package basichost -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/host/basic NAT"
+type NAT nat
diff --git a/p2p/host/basic/natmgr.go b/p2p/host/basic/natmgr.go
index 349f79e789..b9dec60a7b 100644
--- a/p2p/host/basic/natmgr.go
+++ b/p2p/host/basic/natmgr.go
@@ -2,239 +2,298 @@ package basichost
import (
"context"
+ "io"
+ "net"
+ "net/netip"
+ "strconv"
"sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ inat "github.com/libp2p/go-libp2p/p2p/net/nat"
- goprocess "github.com/jbenet/goprocess"
- lgbl "github.com/libp2p/go-libp2p-loggables"
- inat "github.com/libp2p/go-libp2p-nat"
- inet "github.com/libp2p/go-libp2p-net"
ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
)
-// A simple interface to manage NAT devices.
+// NATManager is a simple interface to manage NAT devices.
+// It listens Listen and ListenClose notifications from the network.Network,
+// and tries to obtain port mappings for those.
type NATManager interface {
+ GetMapping(ma.Multiaddr) ma.Multiaddr
+ HasDiscoveredNAT() bool
+ io.Closer
+}
- // Get the NAT device managed by the NAT manager.
- NAT() *inat.NAT
-
- // Receive a notification when the NAT device is ready for use.
- Ready() <-chan struct{}
+// NewNATManager creates a NAT manager.
+func NewNATManager(net network.Network) NATManager {
+ return newNATManager(net)
+}
- // Close all resources associated with a NAT manager.
- Close() error
+type entry struct {
+ protocol string
+ port int
}
-// Create a NAT manager.
-func NewNATManager(net inet.Network) NATManager {
- return newNatManager(net)
+type nat interface {
+ AddMapping(ctx context.Context, protocol string, port int) error
+ RemoveMapping(ctx context.Context, protocol string, port int) error
+ GetMapping(protocol string, port int) (netip.AddrPort, bool)
+ io.Closer
}
+// so we can mock it in tests
+var discoverNAT = func(ctx context.Context) (nat, error) { return inat.DiscoverNAT(ctx) }
+
// natManager takes care of adding + removing port mappings to the nat.
// Initialized with the host if it has a NATPortMap option enabled.
// natManager receives signals from the network, and check on nat mappings:
-// * natManager listens to the network and adds or closes port mappings
-// as the network signals Listen() or ListenClose().
-// * closing the natManager closes the nat and its mappings.
+// - natManager listens to the network and adds or closes port mappings
+// as the network signals Listen() or ListenClose().
+// - closing the natManager closes the nat and its mappings.
type natManager struct {
- net inet.Network
- natmu sync.RWMutex // guards nat (ready could obviate this mutex, but safety first.)
- nat *inat.NAT
+ net network.Network
+ natMx sync.RWMutex
+ nat nat
- ready chan struct{} // closed once the nat is ready to process port mappings
- proc goprocess.Process // natManager has a process + children. can be closed.
+ syncFlag chan struct{} // cap: 1
+
+ tracked map[entry]bool // the bool is only used in doSync and has no meaning outside of that function
+
+ refCount sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
}
-func newNatManager(net inet.Network) *natManager {
+func newNATManager(net network.Network) *natManager {
+ ctx, cancel := context.WithCancel(context.Background())
nmgr := &natManager{
- net: net,
- ready: make(chan struct{}),
+ net: net,
+ syncFlag: make(chan struct{}, 1),
+ ctx: ctx,
+ ctxCancel: cancel,
+ tracked: make(map[entry]bool),
}
-
- nmgr.proc = goprocess.WithTeardown(func() error {
- // on closing, unregister from network notifications.
- net.StopNotify((*nmgrNetNotifiee)(nmgr))
- return nil
- })
-
- // discover the nat.
- nmgr.discoverNAT()
+ nmgr.refCount.Add(1)
+ go nmgr.background(ctx)
return nmgr
}
// Close closes the natManager, closing the underlying nat
// and unregistering from network events.
func (nmgr *natManager) Close() error {
- return nmgr.proc.Close()
+ nmgr.ctxCancel()
+ nmgr.refCount.Wait()
+ return nil
}
-// Ready returns a channel which will be closed when the NAT has been found
-// and is ready to be used, or the search process is done.
-func (nmgr *natManager) Ready() <-chan struct{} {
- return nmgr.ready
+func (nmgr *natManager) HasDiscoveredNAT() bool {
+ nmgr.natMx.RLock()
+ defer nmgr.natMx.RUnlock()
+ return nmgr.nat != nil
}
-func (nmgr *natManager) discoverNAT() {
-
- nmgr.proc.Go(func(worker goprocess.Process) {
- // inat.DiscoverNAT blocks until the nat is found or a timeout
- // is reached. we unfortunately cannot specify timeouts-- the
- // library we're using just blocks.
- //
- // Note: on early shutdown, there may be a case where we're trying
- // to close before DiscoverNAT() returns. Since we cant cancel it
- // (library) we can choose to (1) drop the result and return early,
- // or (2) wait until it times out to exit. For now we choose (2),
- // to avoid leaking resources in a non-obvious way. the only case
- // this affects is when the daemon is being started up and _immediately_
- // asked to close. other services are also starting up, so ok to wait.
- discoverdone := make(chan struct{})
- var nat *inat.NAT
- go func() {
- defer close(discoverdone)
- nat = inat.DiscoverNAT()
- }()
-
- // by this point -- after finding the NAT -- we may have already
- // be closing. if so, just exit.
- select {
- case <-worker.Closing():
- return
- case <-discoverdone:
- if nat == nil { // no nat, or failed to get it.
- return
- }
+func (nmgr *natManager) background(ctx context.Context) {
+ defer nmgr.refCount.Done()
+
+ defer func() {
+ nmgr.natMx.Lock()
+ defer nmgr.natMx.Unlock()
+
+ if nmgr.nat != nil {
+ nmgr.nat.Close()
}
+ }()
- // wire up the nat to close when nmgr closes.
- // nmgr.proc is our parent, and waiting for us.
- nmgr.proc.AddChild(nat.Process())
-
- // set the nat.
- nmgr.natmu.Lock()
- nmgr.nat = nat
- nmgr.natmu.Unlock()
-
- // signal that we're ready to process nat mappings:
- close(nmgr.ready)
-
- // sign natManager up for network notifications
- // we need to sign up here to avoid missing some notifs
- // before the NAT has been found.
- nmgr.net.Notify((*nmgrNetNotifiee)(nmgr))
-
- // if any interfaces were brought up while we were setting up
- // the nat, now is the time to setup port mappings for them.
- // we release ready, then grab them to avoid losing any. adding
- // a port mapping is idempotent, so its ok to add the same twice.
- addrs := nmgr.net.ListenAddresses()
- for _, addr := range addrs {
- // we do it async because it's slow and we may want to close beforehand
- go addPortMapping(nmgr, addr)
+ discoverCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+ natInstance, err := discoverNAT(discoverCtx)
+ if err != nil {
+ log.Info("DiscoverNAT error:", "err", err)
+ return
+ }
+
+ nmgr.natMx.Lock()
+ nmgr.nat = natInstance
+ nmgr.natMx.Unlock()
+
+ // sign natManager up for network notifications
+ // we need to sign up here to avoid missing some notifs
+ // before the NAT has been found.
+ nmgr.net.Notify((*nmgrNetNotifiee)(nmgr))
+ defer nmgr.net.StopNotify((*nmgrNetNotifiee)(nmgr))
+
+ nmgr.doSync() // sync one first.
+ for {
+ select {
+ case <-nmgr.syncFlag:
+ nmgr.doSync() // sync when our listen addresses change.
+ case <-ctx.Done():
+ return
}
- })
+ }
}
-// NAT returns the natManager's nat object. this may be nil, if
-// (a) the search process is still ongoing, or (b) the search process
-// found no nat. Clients must check whether the return value is nil.
-func (nmgr *natManager) NAT() *inat.NAT {
- nmgr.natmu.Lock()
- defer nmgr.natmu.Unlock()
- return nmgr.nat
+func (nmgr *natManager) sync() {
+ select {
+ case nmgr.syncFlag <- struct{}{}:
+ default:
+ }
}
-func addPortMapping(nmgr *natManager, intaddr ma.Multiaddr) {
- nat := nmgr.NAT()
- if nat == nil {
- panic("natManager addPortMapping called without a nat.")
+// doSync syncs the current NAT mappings, removing any outdated mappings and adding any
+// new mappings.
+func (nmgr *natManager) doSync() {
+ for e := range nmgr.tracked {
+ nmgr.tracked[e] = false
}
+ var newAddresses []entry
+ for _, maddr := range nmgr.net.ListenAddresses() {
+ // Strip the IP
+ maIP, rest := ma.SplitFirst(maddr)
+ if maIP == nil || len(rest) == 0 {
+ continue
+ }
- // first, check if the port mapping already exists.
- for _, mapping := range nat.Mappings() {
- if mapping.InternalAddr().Equal(intaddr) {
- return // it exists! return.
+ switch maIP.Protocol().Code {
+ case ma.P_IP6, ma.P_IP4:
+ default:
+ continue
}
- }
- ctx := context.TODO()
- lm := make(lgbl.DeferredMap)
- lm["internalAddr"] = func() interface{} { return intaddr.String() }
+ // Only bother if we're listening on an unicast / unspecified IP.
+ ip := net.IP(maIP.RawValue())
+ if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
+ continue
+ }
- defer log.EventBegin(ctx, "natMgrAddPortMappingWait", lm).Done()
+ // Extract the port/protocol
+ proto, _ := ma.SplitFirst(rest)
+ if proto == nil {
+ continue
+ }
- select {
- case <-nmgr.proc.Closing():
- lm["outcome"] = "cancelled"
- return // no use.
- case <-nmgr.ready: // wait until it's ready.
+ var protocol string
+ switch proto.Protocol().Code {
+ case ma.P_TCP:
+ protocol = "tcp"
+ case ma.P_UDP:
+ protocol = "udp"
+ default:
+ continue
+ }
+ port, err := strconv.ParseUint(proto.Value(), 10, 16)
+ if err != nil {
+ // bug in multiaddr
+ panic(err)
+ }
+ e := entry{protocol: protocol, port: int(port)}
+ if _, ok := nmgr.tracked[e]; ok {
+ nmgr.tracked[e] = true
+ } else {
+ newAddresses = append(newAddresses, e)
+ }
}
- // actually start the port map (sub-event because waiting may take a while)
- defer log.EventBegin(ctx, "natMgrAddPortMapping", lm).Done()
+ var wg sync.WaitGroup
+ defer wg.Wait()
- // get the nat
- m, err := nat.NewMapping(intaddr)
- if err != nil {
- lm["outcome"] = "failure"
- lm["error"] = err
- return
+ // Close old mappings
+ for e, v := range nmgr.tracked {
+ if !v {
+ nmgr.nat.RemoveMapping(nmgr.ctx, e.protocol, e.port)
+ delete(nmgr.tracked, e)
+ }
}
- extaddr, err := m.ExternalAddr()
- if err != nil {
- lm["outcome"] = "failure"
- lm["error"] = err
- return
+ // Create new mappings.
+ for _, e := range newAddresses {
+ if err := nmgr.nat.AddMapping(nmgr.ctx, e.protocol, e.port); err != nil {
+ log.Error("failed to port-map", "protocol", e.protocol, "port", e.port, "err", err)
+ }
+ nmgr.tracked[e] = false
}
-
- lm["outcome"] = "success"
- lm["externalAddr"] = func() interface{} { return extaddr.String() }
- log.Infof("established nat port mapping: %s <--> %s", intaddr, extaddr)
}
-func rmPortMapping(nmgr *natManager, intaddr ma.Multiaddr) {
- nat := nmgr.NAT()
- if nat == nil {
- panic("natManager rmPortMapping called without a nat.")
- }
+func (nmgr *natManager) GetMapping(addr ma.Multiaddr) ma.Multiaddr {
+ nmgr.natMx.Lock()
+ defer nmgr.natMx.Unlock()
- // list the port mappings (it may be gone on it's own, so we need to
- // check this list, and not store it ourselves behind the scenes)
+ if nmgr.nat == nil { // NAT not yet initialized
+ return nil
+ }
- // close mappings for this internal address.
- for _, mapping := range nat.Mappings() {
- if mapping.InternalAddr().Equal(intaddr) {
- mapping.Close()
+ var found bool
+ var proto int // ma.P_TCP or ma.P_UDP
+ transport, rest := ma.SplitFunc(addr, func(c ma.Component) bool {
+ if found {
+ return true
}
+ proto = c.Protocol().Code
+ found = proto == ma.P_TCP || proto == ma.P_UDP
+ return false
+ })
+ if !manet.IsThinWaist(transport) {
+ return nil
}
-}
-
-// nmgrNetNotifiee implements the network notification listening part
-// of the natManager. this is merely listening to Listen() and ListenClose()
-// events.
-type nmgrNetNotifiee natManager
-func (nn *nmgrNetNotifiee) natManager() *natManager {
- return (*natManager)(nn)
-}
+ naddr, err := manet.ToNetAddr(transport)
+ if err != nil {
+ log.Error("error parsing net multiaddr", "addr", transport, "err", err)
+ return nil
+ }
-func (nn *nmgrNetNotifiee) Listen(n inet.Network, addr ma.Multiaddr) {
- if nn.natManager().NAT() == nil {
- return // not ready or doesnt exist.
+ var (
+ ip net.IP
+ port int
+ protocol string
+ )
+ switch naddr := naddr.(type) {
+ case *net.TCPAddr:
+ ip = naddr.IP
+ port = naddr.Port
+ protocol = "tcp"
+ case *net.UDPAddr:
+ ip = naddr.IP
+ port = naddr.Port
+ protocol = "udp"
+ default:
+ return nil
}
- addPortMapping(nn.natManager(), addr)
-}
+ if !ip.IsGlobalUnicast() && !ip.IsUnspecified() {
+ // We only map global unicast & unspecified addresses ports, not broadcast, multicast, etc.
+ return nil
+ }
-func (nn *nmgrNetNotifiee) ListenClose(n inet.Network, addr ma.Multiaddr) {
- if nn.natManager().NAT() == nil {
- return // not ready or doesnt exist.
+ extAddr, ok := nmgr.nat.GetMapping(protocol, port)
+ if !ok {
+ return nil
}
- rmPortMapping(nn.natManager(), addr)
+ var mappedAddr net.Addr
+ switch naddr.(type) {
+ case *net.TCPAddr:
+ mappedAddr = net.TCPAddrFromAddrPort(extAddr)
+ case *net.UDPAddr:
+ mappedAddr = net.UDPAddrFromAddrPort(extAddr)
+ }
+ mappedMaddr, err := manet.FromNetAddr(mappedAddr)
+ if err != nil {
+ log.Error("mapped addr can't be turned into a multiaddr", "addr", mappedAddr, "err", err)
+ return nil
+ }
+ extMaddr := mappedMaddr
+ if rest != nil {
+ extMaddr = ma.Join(extMaddr, rest)
+ }
+ return extMaddr
}
-func (nn *nmgrNetNotifiee) Connected(inet.Network, inet.Conn) {}
-func (nn *nmgrNetNotifiee) Disconnected(inet.Network, inet.Conn) {}
-func (nn *nmgrNetNotifiee) OpenedStream(inet.Network, inet.Stream) {}
-func (nn *nmgrNetNotifiee) ClosedStream(inet.Network, inet.Stream) {}
+type nmgrNetNotifiee natManager
+
+func (nn *nmgrNetNotifiee) natManager() *natManager { return (*natManager)(nn) }
+func (nn *nmgrNetNotifiee) Listen(network.Network, ma.Multiaddr) { nn.natManager().sync() }
+func (nn *nmgrNetNotifiee) ListenClose(_ network.Network, _ ma.Multiaddr) { nn.natManager().sync() }
+func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
+func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}
diff --git a/p2p/host/basic/natmgr_test.go b/p2p/host/basic/natmgr_test.go
new file mode 100644
index 0000000000..be2567dd99
--- /dev/null
+++ b/p2p/host/basic/natmgr_test.go
@@ -0,0 +1,108 @@
+package basichost
+
+import (
+ "context"
+ "net/netip"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ ma "github.com/multiformats/go-multiaddr"
+
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "go.uber.org/mock/gomock"
+)
+
+func setupMockNAT(t *testing.T) (mockNAT *MockNAT, reset func()) {
+ t.Helper()
+ ctrl := gomock.NewController(t)
+ mockNAT = NewMockNAT(ctrl)
+ origDiscoverNAT := discoverNAT
+ discoverNAT = func(_ context.Context) (nat, error) { return mockNAT, nil }
+ return mockNAT, func() {
+ discoverNAT = origDiscoverNAT
+ ctrl.Finish()
+ }
+}
+
+func TestMapping(t *testing.T) {
+ mockNAT, reset := setupMockNAT(t)
+ defer reset()
+
+ sw := swarmt.GenSwarm(t)
+ defer sw.Close()
+ m := newNATManager(sw)
+ require.Eventually(t, func() bool {
+ m.natMx.Lock()
+ defer m.natMx.Unlock()
+ return m.nat != nil
+ }, time.Second, time.Millisecond)
+ externalAddr := netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 4}), 4321)
+ // pretend that we have a TCP mapping
+ mockNAT.EXPECT().GetMapping("tcp", 1234).Return(externalAddr, true)
+ require.Equal(t, "/ip4/1.2.3.4/tcp/4321", m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234")).String())
+
+ // pretend that we have a QUIC mapping
+ mockNAT.EXPECT().GetMapping("udp", 1234).Return(externalAddr, true)
+ require.Equal(t, "/ip4/1.2.3.4/udp/4321/quic-v1", m.GetMapping(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")).String())
+
+ // pretend that there's no mapping
+ mockNAT.EXPECT().GetMapping("tcp", 1234).Return(netip.AddrPort{}, false)
+ require.Nil(t, m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234")))
+
+ // make sure this works for WebSocket addresses as well
+ mockNAT.EXPECT().GetMapping("tcp", 1234).Return(externalAddr, true)
+ require.Equal(t, "/ip4/1.2.3.4/tcp/4321/ws", m.GetMapping(ma.StringCast("/ip4/0.0.0.0/tcp/1234/ws")).String())
+
+ // make sure this works for WebTransport addresses as well
+ mockNAT.EXPECT().GetMapping("udp", 1234).Return(externalAddr, true)
+ require.Equal(t, "/ip4/1.2.3.4/udp/4321/quic-v1/webtransport", m.GetMapping(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1/webtransport")).String())
+}
+
+func TestAddAndRemoveListeners(t *testing.T) {
+ mockNAT, reset := setupMockNAT(t)
+ defer reset()
+
+ sw := swarmt.GenSwarm(t)
+ defer sw.Close()
+ m := newNATManager(sw)
+ require.Eventually(t, func() bool {
+ m.natMx.Lock()
+ defer m.natMx.Unlock()
+ return m.nat != nil
+ }, time.Second, time.Millisecond)
+
+ added := make(chan struct{}, 1)
+ // add a TCP listener
+ mockNAT.EXPECT().AddMapping(gomock.Any(), "tcp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} })
+ require.NoError(t, sw.Listen(ma.StringCast("/ip4/0.0.0.0/tcp/1234")))
+ select {
+ case <-added:
+ case <-time.After(time.Second):
+ t.Fatal("didn't receive call to AddMapping")
+ }
+
+ // add a QUIC listener
+ mockNAT.EXPECT().AddMapping(gomock.Any(), "udp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} })
+ require.NoError(t, sw.Listen(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1")))
+ select {
+ case <-added:
+ case <-time.After(time.Second):
+ t.Fatal("didn't receive call to AddMapping")
+ }
+
+ // remove the QUIC listener
+ mockNAT.EXPECT().RemoveMapping(gomock.Any(), "udp", 1234).Do(func(context.Context, string, int) { added <- struct{}{} })
+ sw.ListenClose(ma.StringCast("/ip4/0.0.0.0/udp/1234/quic-v1"))
+ select {
+ case <-added:
+ case <-time.After(time.Second):
+ t.Fatal("didn't receive call to RemoveMapping")
+ }
+
+ // test shutdown
+ mockNAT.EXPECT().RemoveMapping(gomock.Any(), "tcp", 1234).MaxTimes(1)
+ mockNAT.EXPECT().Close().MaxTimes(1)
+}
diff --git a/p2p/host/blank/blank.go b/p2p/host/blank/blank.go
new file mode 100644
index 0000000000..2e233c7173
--- /dev/null
+++ b/p2p/host/blank/blank.go
@@ -0,0 +1,233 @@
+package blankhost
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+
+ ma "github.com/multiformats/go-multiaddr"
+ mstream "github.com/multiformats/go-multistream"
+)
+
+var log = logging.Logger("blankhost")
+
+// BlankHost is the thinnest implementation of the host.Host interface
+type BlankHost struct {
+ n network.Network
+ mux *mstream.MultistreamMuxer[protocol.ID]
+ cmgr connmgr.ConnManager
+ eventbus event.Bus
+ emitters struct {
+ evtLocalProtocolsUpdated event.Emitter
+ }
+}
+
+type config struct {
+ cmgr connmgr.ConnManager
+ eventBus event.Bus
+}
+
+type Option = func(cfg *config)
+
+func WithConnectionManager(cmgr connmgr.ConnManager) Option {
+ return func(cfg *config) {
+ cfg.cmgr = cmgr
+ }
+}
+
+func WithEventBus(eventBus event.Bus) Option {
+ return func(cfg *config) {
+ cfg.eventBus = eventBus
+ }
+}
+
+func NewBlankHost(n network.Network, options ...Option) *BlankHost {
+ cfg := config{
+ cmgr: &connmgr.NullConnMgr{},
+ }
+ for _, opt := range options {
+ opt(&cfg)
+ }
+
+ bh := &BlankHost{
+ n: n,
+ cmgr: cfg.cmgr,
+ mux: mstream.NewMultistreamMuxer[protocol.ID](),
+ eventbus: cfg.eventBus,
+ }
+ if bh.eventbus == nil {
+ bh.eventbus = eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer()))
+ }
+
+ // subscribe the connection manager to network notifications (has no effect with NullConnMgr)
+ n.Notify(bh.cmgr.Notifee())
+
+ var err error
+ if bh.emitters.evtLocalProtocolsUpdated, err = bh.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil {
+ return nil
+ }
+
+ n.SetStreamHandler(bh.newStreamHandler)
+
+ // persist a signed peer record for self to the peerstore.
+ if err := bh.initSignedRecord(); err != nil {
+ log.Error("error creating blank host", "err", err)
+ return nil
+ }
+
+ return bh
+}
+
+func (bh *BlankHost) initSignedRecord() error {
+ cab, ok := peerstore.GetCertifiedAddrBook(bh.n.Peerstore())
+ if !ok {
+ log.Error("peerstore does not support signed records")
+ return errors.New("peerstore does not support signed records")
+ }
+ rec := peer.PeerRecordFromAddrInfo(peer.AddrInfo{ID: bh.ID(), Addrs: bh.Addrs()})
+ ev, err := record.Seal(rec, bh.Peerstore().PrivKey(bh.ID()))
+ if err != nil {
+ log.Error("failed to create signed record for self", "err", err)
+ return fmt.Errorf("failed to create signed record for self, err=%s", err)
+ }
+ _, err = cab.ConsumePeerRecord(ev, peerstore.PermanentAddrTTL)
+ if err != nil {
+ log.Error("failed to persist signed record to peerstore", "err", err)
+ return fmt.Errorf("failed to persist signed record for self, err=%s", err)
+ }
+ return err
+}
+
+var _ host.Host = (*BlankHost)(nil)
+
+func (bh *BlankHost) Addrs() []ma.Multiaddr {
+ addrs, err := bh.n.InterfaceListenAddresses()
+ if err != nil {
+ log.Debug("error retrieving network interface addrs", "err", err)
+ return nil
+ }
+
+ return addrs
+}
+
+func (bh *BlankHost) Close() error {
+ return bh.n.Close()
+}
+
+func (bh *BlankHost) Connect(ctx context.Context, ai peer.AddrInfo) error {
+ // absorb addresses into peerstore
+ bh.Peerstore().AddAddrs(ai.ID, ai.Addrs, peerstore.TempAddrTTL)
+
+ cs := bh.n.ConnsToPeer(ai.ID)
+ if len(cs) > 0 {
+ return nil
+ }
+
+ _, err := bh.Network().DialPeer(ctx, ai.ID)
+ if err != nil {
+ return fmt.Errorf("failed to dial: %w", err)
+ }
+ return err
+}
+
+func (bh *BlankHost) Peerstore() peerstore.Peerstore {
+ return bh.n.Peerstore()
+}
+
+func (bh *BlankHost) ID() peer.ID {
+ return bh.n.LocalPeer()
+}
+
+func (bh *BlankHost) NewStream(ctx context.Context, p peer.ID, protos ...protocol.ID) (network.Stream, error) {
+ s, err := bh.n.NewStream(ctx, p)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open stream: %w", err)
+ }
+
+ selected, err := mstream.SelectOneOf(protos, s)
+ if err != nil {
+ s.Reset()
+ return nil, fmt.Errorf("failed to negotiate protocol: %w", err)
+ }
+
+ s.SetProtocol(selected)
+ bh.Peerstore().AddProtocols(p, selected)
+
+ return s, nil
+}
+
+func (bh *BlankHost) RemoveStreamHandler(pid protocol.ID) {
+ bh.Mux().RemoveHandler(pid)
+ bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
+ Removed: []protocol.ID{pid},
+ })
+}
+
+func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
+ bh.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error {
+ is := rwc.(network.Stream)
+ is.SetProtocol(p)
+ handler(is)
+ return nil
+ })
+ bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
+ Added: []protocol.ID{pid},
+ })
+}
+
+func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
+ bh.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error {
+ is := rwc.(network.Stream)
+ is.SetProtocol(p)
+ handler(is)
+ return nil
+ })
+ bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{
+ Added: []protocol.ID{pid},
+ })
+}
+
+// newStreamHandler is the remote-opened stream handler for network.Network
+func (bh *BlankHost) newStreamHandler(s network.Stream) {
+ protoID, handle, err := bh.Mux().Negotiate(s)
+ if err != nil {
+ log.Info("protocol negotiation failed", "err", err)
+ s.Reset()
+ return
+ }
+
+ s.SetProtocol(protoID)
+
+ handle(protoID, s)
+}
+
+// TODO: i'm not sure this really needs to be here
+func (bh *BlankHost) Mux() protocol.Switch {
+ return bh.mux
+}
+
+// TODO: also not sure this fits... Might be better ways around this (leaky abstractions)
+func (bh *BlankHost) Network() network.Network {
+ return bh.n
+}
+
+func (bh *BlankHost) ConnManager() connmgr.ConnManager {
+ return bh.cmgr
+}
+
+func (bh *BlankHost) EventBus() event.Bus {
+ return bh.eventbus
+}
diff --git a/p2p/host/eventbus/basic.go b/p2p/host/eventbus/basic.go
new file mode 100644
index 0000000000..6227848c83
--- /dev/null
+++ b/p2p/host/eventbus/basic.go
@@ -0,0 +1,477 @@
+package eventbus
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("eventbus")
+
+const slowConsumerWarningTimeout = time.Second
+
+// /////////////////////
+// BUS
+
+// basicBus is a type-based event delivery system
+type basicBus struct {
+ lk sync.RWMutex
+ nodes map[reflect.Type]*node
+ wildcard *wildcardNode
+ metricsTracer MetricsTracer
+}
+
+var _ event.Bus = (*basicBus)(nil)
+
+type emitter struct {
+ n *node
+ w *wildcardNode
+ typ reflect.Type
+ closed atomic.Bool
+ dropper func(reflect.Type)
+ metricsTracer MetricsTracer
+}
+
+func (e *emitter) Emit(evt interface{}) error {
+ if e.closed.Load() {
+ return fmt.Errorf("emitter is closed")
+ }
+
+ e.n.emit(evt)
+ e.w.emit(evt)
+
+ if e.metricsTracer != nil {
+ e.metricsTracer.EventEmitted(e.typ)
+ }
+ return nil
+}
+
+func (e *emitter) Close() error {
+ if !e.closed.CompareAndSwap(false, true) {
+ return fmt.Errorf("closed an emitter more than once")
+ }
+ if e.n.nEmitters.Add(-1) == 0 {
+ e.dropper(e.typ)
+ }
+ return nil
+}
+
+func NewBus(opts ...Option) event.Bus {
+ bus := &basicBus{
+ nodes: map[reflect.Type]*node{},
+ wildcard: &wildcardNode{},
+ }
+ for _, opt := range opts {
+ opt(bus)
+ }
+ return bus
+}
+
+func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node)) {
+ b.lk.Lock()
+
+ n, ok := b.nodes[typ]
+ if !ok {
+ n = newNode(typ, b.metricsTracer)
+ b.nodes[typ] = n
+ }
+
+ n.lk.Lock()
+ b.lk.Unlock()
+
+ cb(n)
+
+ if async == nil {
+ n.lk.Unlock()
+ } else {
+ go func() {
+ defer n.lk.Unlock()
+ async(n)
+ }()
+ }
+}
+
+func (b *basicBus) tryDropNode(typ reflect.Type) {
+ b.lk.Lock()
+ n, ok := b.nodes[typ]
+ if !ok { // already dropped
+ b.lk.Unlock()
+ return
+ }
+
+ n.lk.Lock()
+ if n.nEmitters.Load() > 0 || len(n.sinks) > 0 {
+ n.lk.Unlock()
+ b.lk.Unlock()
+ return // still in use
+ }
+ n.lk.Unlock()
+
+ delete(b.nodes, typ)
+ b.lk.Unlock()
+}
+
+type wildcardSub struct {
+ ch chan interface{}
+ w *wildcardNode
+ metricsTracer MetricsTracer
+ name string
+ closeOnce sync.Once
+}
+
+func (w *wildcardSub) Out() <-chan interface{} {
+ return w.ch
+}
+
+func (w *wildcardSub) Close() error {
+ w.closeOnce.Do(func() {
+ w.w.removeSink(w.ch)
+ if w.metricsTracer != nil {
+ w.metricsTracer.RemoveSubscriber(reflect.TypeOf(event.WildcardSubscription))
+ }
+ })
+
+ return nil
+}
+
+func (w *wildcardSub) Name() string {
+ return w.name
+}
+
+type namedSink struct {
+ name string
+ ch chan interface{}
+}
+
+type sub struct {
+ ch chan interface{}
+ nodes []*node
+ dropper func(reflect.Type)
+ metricsTracer MetricsTracer
+ name string
+ closeOnce sync.Once
+}
+
+func (s *sub) Name() string {
+ return s.name
+}
+
+func (s *sub) Out() <-chan interface{} {
+ return s.ch
+}
+
+func (s *sub) Close() error {
+ go func() {
+ // drain the event channel, will return when closed and drained.
+ // this is necessary to unblock publishes to this channel.
+ for range s.ch {
+ }
+ }()
+ s.closeOnce.Do(func() {
+ for _, n := range s.nodes {
+ n.lk.Lock()
+
+ for i := 0; i < len(n.sinks); i++ {
+ if n.sinks[i].ch == s.ch {
+ n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil
+ n.sinks = n.sinks[:len(n.sinks)-1]
+
+ if s.metricsTracer != nil {
+ s.metricsTracer.RemoveSubscriber(n.typ)
+ }
+ break
+ }
+ }
+
+ tryDrop := len(n.sinks) == 0 && n.nEmitters.Load() == 0
+
+ n.lk.Unlock()
+
+ if tryDrop {
+ s.dropper(n.typ)
+ }
+ }
+ close(s.ch)
+ })
+ return nil
+}
+
+var _ event.Subscription = (*sub)(nil)
+
+// Subscribe creates new subscription. Failing to drain the channel will cause
+// publishers to get blocked. CancelFunc is guaranteed to return after last send
+// to the channel
+func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) {
+ settings := newSubSettings()
+ for _, opt := range opts {
+ if err := opt(&settings); err != nil {
+ return nil, err
+ }
+ }
+
+ if evtTypes == event.WildcardSubscription {
+ out := &wildcardSub{
+ ch: make(chan interface{}, settings.buffer),
+ w: b.wildcard,
+ metricsTracer: b.metricsTracer,
+ name: settings.name,
+ }
+ b.wildcard.addSink(&namedSink{ch: out.ch, name: out.name})
+ return out, nil
+ }
+
+ types, ok := evtTypes.([]interface{})
+ if !ok {
+ types = []interface{}{evtTypes}
+ }
+
+ if len(types) > 1 {
+ for _, t := range types {
+ if t == event.WildcardSubscription {
+ return nil, fmt.Errorf("wildcard subscriptions must be started separately")
+ }
+ }
+ }
+
+ out := &sub{
+ ch: make(chan interface{}, settings.buffer),
+ nodes: make([]*node, len(types)),
+
+ dropper: b.tryDropNode,
+ metricsTracer: b.metricsTracer,
+ name: settings.name,
+ }
+
+ for _, etyp := range types {
+ if reflect.TypeOf(etyp).Kind() != reflect.Ptr {
+ return nil, errors.New("subscribe called with non-pointer type")
+ }
+ }
+
+ for i, etyp := range types {
+ typ := reflect.TypeOf(etyp)
+
+ b.withNode(typ.Elem(), func(n *node) {
+ n.sinks = append(n.sinks, &namedSink{ch: out.ch, name: out.name})
+ out.nodes[i] = n
+ if b.metricsTracer != nil {
+ b.metricsTracer.AddSubscriber(typ.Elem())
+ }
+ }, func(n *node) {
+ if n.keepLast {
+ l := n.last
+ if l == nil {
+ return
+ }
+ out.ch <- l
+ }
+ })
+ }
+
+ return out, nil
+}
+
+// Emitter creates new emitter
+//
+// eventType accepts typed nil pointers, and uses the type information to
+// select output type
+//
+// Example:
+// emit, err := eventbus.Emitter(new(EventT))
+// defer emit.Close() // MUST call this after being done with the emitter
+//
+// emit(EventT{})
+func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e event.Emitter, err error) {
+ if evtType == event.WildcardSubscription {
+ return nil, fmt.Errorf("illegal emitter for wildcard subscription")
+ }
+
+ var settings emitterSettings
+ for _, opt := range opts {
+ if err := opt(&settings); err != nil {
+ return nil, err
+ }
+ }
+
+ typ := reflect.TypeOf(evtType)
+ if typ.Kind() != reflect.Ptr {
+ return nil, errors.New("emitter called with non-pointer type")
+ }
+ typ = typ.Elem()
+
+ b.withNode(typ, func(n *node) {
+ n.nEmitters.Add(1)
+ n.keepLast = n.keepLast || settings.makeStateful
+ e = &emitter{n: n, typ: typ, dropper: b.tryDropNode, w: b.wildcard, metricsTracer: b.metricsTracer}
+ }, nil)
+ return
+}
+
+// GetAllEventTypes returns all the event types that this bus has emitters
+// or subscribers for.
+func (b *basicBus) GetAllEventTypes() []reflect.Type {
+ b.lk.RLock()
+ defer b.lk.RUnlock()
+
+ types := make([]reflect.Type, 0, len(b.nodes))
+ for t := range b.nodes {
+ types = append(types, t)
+ }
+ return types
+}
+
+// /////////////////////
+// NODE
+
+type wildcardNode struct {
+ sync.RWMutex
+ nSinks atomic.Int32
+ sinks []*namedSink
+ metricsTracer MetricsTracer
+
+ slowConsumerTimer *time.Timer
+}
+
+func (n *wildcardNode) addSink(sink *namedSink) {
+ n.nSinks.Add(1) // ok to do outside the lock
+ n.Lock()
+ n.sinks = append(n.sinks, sink)
+ n.Unlock()
+
+ if n.metricsTracer != nil {
+ n.metricsTracer.AddSubscriber(reflect.TypeOf(event.WildcardSubscription))
+ }
+}
+
+func (n *wildcardNode) removeSink(ch chan interface{}) {
+ go func() {
+ // drain the event channel, will return when closed and drained.
+ // this is necessary to unblock publishes to this channel.
+ for range ch {
+ }
+ }()
+ n.nSinks.Add(-1) // ok to do outside the lock
+ n.Lock()
+ for i := 0; i < len(n.sinks); i++ {
+ if n.sinks[i].ch == ch {
+ n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil
+ n.sinks = n.sinks[:len(n.sinks)-1]
+ break
+ }
+ }
+ n.Unlock()
+}
+
+var wildcardType = reflect.TypeOf(event.WildcardSubscription)
+
+func (n *wildcardNode) emit(evt interface{}) {
+ if n.nSinks.Load() == 0 {
+ return
+ }
+
+ n.RLock()
+ for _, sink := range n.sinks {
+
+ // Sending metrics before sending on channel allows us to
+ // record channel full events before blocking
+ sendSubscriberMetrics(n.metricsTracer, sink)
+
+ select {
+ case sink.ch <- evt:
+ default:
+ slowConsumerTimer := emitAndLogError(n.slowConsumerTimer, wildcardType, evt, sink)
+ defer func() {
+ n.Lock()
+ n.slowConsumerTimer = slowConsumerTimer
+ n.Unlock()
+ }()
+ }
+ }
+ n.RUnlock()
+}
+
+type node struct {
+ // Note: make sure to NEVER lock basicBus.lk when this lock is held
+ lk sync.Mutex
+
+ typ reflect.Type
+
+ // emitter ref count
+ nEmitters atomic.Int32
+
+ keepLast bool
+ last interface{}
+
+ sinks []*namedSink
+ metricsTracer MetricsTracer
+
+ slowConsumerTimer *time.Timer
+}
+
+func newNode(typ reflect.Type, metricsTracer MetricsTracer) *node {
+ return &node{
+ typ: typ,
+ metricsTracer: metricsTracer,
+ }
+}
+
+func (n *node) emit(evt interface{}) {
+ typ := reflect.TypeOf(evt)
+ if typ != n.typ {
+ panic(fmt.Sprintf("Emit called with wrong type. expected: %s, got: %s", n.typ, typ))
+ }
+
+ n.lk.Lock()
+ if n.keepLast {
+ n.last = evt
+ }
+
+ for _, sink := range n.sinks {
+
+ // Sending metrics before sending on channel allows us to
+ // record channel full events before blocking
+ sendSubscriberMetrics(n.metricsTracer, sink)
+ select {
+ case sink.ch <- evt:
+ default:
+ n.slowConsumerTimer = emitAndLogError(n.slowConsumerTimer, n.typ, evt, sink)
+ }
+ }
+ n.lk.Unlock()
+}
+
+func emitAndLogError(timer *time.Timer, typ reflect.Type, evt interface{}, sink *namedSink) *time.Timer {
+ // Slow consumer. Log a warning if stalled for the timeout
+ if timer == nil {
+ timer = time.NewTimer(slowConsumerWarningTimeout)
+ } else {
+ timer.Reset(slowConsumerWarningTimeout)
+ }
+
+ select {
+ case sink.ch <- evt:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ log.Warn("subscriber is a slow consumer. This can lead to libp2p stalling and hard to debug issues.", "subscriber_name", sink.name, "event_type", typ)
+ // Continue to stall since there's nothing else we can do.
+ sink.ch <- evt
+ }
+
+ return timer
+}
+
+func sendSubscriberMetrics(metricsTracer MetricsTracer, sink *namedSink) {
+ if metricsTracer != nil {
+ metricsTracer.SubscriberQueueLength(sink.name, len(sink.ch)+1)
+ metricsTracer.SubscriberQueueFull(sink.name, len(sink.ch)+1 >= cap(sink.ch))
+ metricsTracer.SubscriberEventQueued(sink.name)
+ }
+}
diff --git a/p2p/host/eventbus/basic_metrics.go b/p2p/host/eventbus/basic_metrics.go
new file mode 100644
index 0000000000..8e7b1e88d8
--- /dev/null
+++ b/p2p/host/eventbus/basic_metrics.go
@@ -0,0 +1,164 @@
+package eventbus
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_eventbus"
+
+var (
+ eventsEmitted = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "events_emitted_total",
+ Help: "Events Emitted",
+ },
+ []string{"event"},
+ )
+ totalSubscribers = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "subscribers_total",
+ Help: "Number of subscribers for an event type",
+ },
+ []string{"event"},
+ )
+ subscriberQueueLength = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "subscriber_queue_length",
+ Help: "Subscriber queue length",
+ },
+ []string{"subscriber_name"},
+ )
+ subscriberQueueFull = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "subscriber_queue_full",
+ Help: "Subscriber Queue completely full",
+ },
+ []string{"subscriber_name"},
+ )
+ subscriberEventQueued = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "subscriber_event_queued",
+ Help: "Event Queued for subscriber",
+ },
+ []string{"subscriber_name"},
+ )
+ collectors = []prometheus.Collector{
+ eventsEmitted,
+ totalSubscribers,
+ subscriberQueueLength,
+ subscriberQueueFull,
+ subscriberEventQueued,
+ }
+)
+
+// MetricsTracer tracks metrics for the eventbus subsystem
+type MetricsTracer interface {
+
+ // EventEmitted counts the total number of events grouped by event type
+ EventEmitted(typ reflect.Type)
+
+ // AddSubscriber adds a subscriber for the event type
+ AddSubscriber(typ reflect.Type)
+
+ // RemoveSubscriber removes a subscriber for the event type
+ RemoveSubscriber(typ reflect.Type)
+
+ // SubscriberQueueLength is the length of the subscribers channel
+ SubscriberQueueLength(name string, n int)
+
+ // SubscriberQueueFull tracks whether a subscribers channel if full
+ SubscriberQueueFull(name string, isFull bool)
+
+ // SubscriberEventQueued counts the total number of events grouped by subscriber
+ SubscriberEventQueued(name string)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (m *metricsTracer) EventEmitted(typ reflect.Type) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
+ eventsEmitted.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) AddSubscriber(typ reflect.Type) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
+ totalSubscribers.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) RemoveSubscriber(typ reflect.Type) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, strings.TrimPrefix(typ.String(), "event."))
+ totalSubscribers.WithLabelValues(*tags...).Dec()
+}
+
+func (m *metricsTracer) SubscriberQueueLength(name string, n int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+ subscriberQueueLength.WithLabelValues(*tags...).Set(float64(n))
+}
+
+func (m *metricsTracer) SubscriberQueueFull(name string, isFull bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+ observer := subscriberQueueFull.WithLabelValues(*tags...)
+ if isFull {
+ observer.Set(1)
+ } else {
+ observer.Set(0)
+ }
+}
+
+func (m *metricsTracer) SubscriberEventQueued(name string) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+ subscriberEventQueued.WithLabelValues(*tags...).Inc()
+}
diff --git a/p2p/host/eventbus/basic_metrics_test.go b/p2p/host/eventbus/basic_metrics_test.go
new file mode 100644
index 0000000000..db1b2f7893
--- /dev/null
+++ b/p2p/host/eventbus/basic_metrics_test.go
@@ -0,0 +1,66 @@
+//go:build nocover
+
+package eventbus
+
+import (
+ "math/rand"
+ "reflect"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/event"
+)
+
+func BenchmarkEventEmitted(b *testing.B) {
+ b.ReportAllocs()
+ types := []reflect.Type{
+ reflect.TypeOf(new(event.EvtLocalAddressesUpdated)),
+ reflect.TypeOf(new(event.EvtNATDeviceTypeChanged)),
+ reflect.TypeOf(new(event.EvtLocalProtocolsUpdated)),
+ }
+ mt := NewMetricsTracer()
+ for i := 0; i < b.N; i++ {
+ mt.EventEmitted(types[i%len(types)])
+ }
+}
+
+func BenchmarkSubscriberQueueLength(b *testing.B) {
+ b.ReportAllocs()
+ names := []string{"s1", "s2", "s3", "s4"}
+ mt := NewMetricsTracer()
+ for i := 0; i < b.N; i++ {
+ mt.SubscriberQueueLength(names[i%len(names)], i)
+ }
+}
+
+var eventTypes = []reflect.Type{
+ reflect.TypeOf(new(event.EvtLocalAddressesUpdated)),
+ reflect.TypeOf(new(event.EvtNATDeviceTypeChanged)),
+ reflect.TypeOf(new(event.EvtLocalProtocolsUpdated)),
+ reflect.TypeOf(new(event.EvtPeerIdentificationCompleted)),
+}
+
+var names = []string{
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+}
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ mt := NewMetricsTracer()
+ tests := map[string]func(){
+ "EventEmitted": func() { mt.EventEmitted(eventTypes[rand.Intn(len(eventTypes))]) },
+ "AddSubscriber": func() { mt.AddSubscriber(eventTypes[rand.Intn(len(eventTypes))]) },
+ "RemoveSubscriber": func() { mt.RemoveSubscriber(eventTypes[rand.Intn(len(eventTypes))]) },
+ "SubscriberQueueLength": func() { mt.SubscriberQueueLength(names[rand.Intn(len(names))], rand.Intn(100)) },
+ "SubscriberQueueFull": func() { mt.SubscriberQueueFull(names[rand.Intn(len(names))], rand.Intn(2) == 1) },
+ "SubscriberEventQueued": func() { mt.SubscriberEventQueued(names[rand.Intn(len(names))]) },
+ }
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+ if allocs > 0 {
+ t.Fatalf("Alloc Test: %s, got: %0.2f, expected: 0 allocs", method, allocs)
+ }
+ }
+}
diff --git a/p2p/host/eventbus/basic_test.go b/p2p/host/eventbus/basic_test.go
new file mode 100644
index 0000000000..defa63032c
--- /dev/null
+++ b/p2p/host/eventbus/basic_test.go
@@ -0,0 +1,771 @@
+package eventbus
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+
+ "github.com/libp2p/go-libp2p-testing/race"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type (
+ EventA struct{}
+ EventB int
+)
+
+func getN() int {
+ n := 50000
+ if race.WithRace() {
+ n = 1000
+ }
+ return n
+}
+
+func (EventA) String() string {
+ return "Oh, Hello"
+}
+
+func TestDefaultSubIsBuffered(t *testing.T) {
+ bus := NewBus()
+ s, err := bus.Subscribe(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cap(s.(*sub).ch) == 0 {
+ t.Fatalf("without any options subscribe should be buffered. was %d", cap(s.(*sub).ch))
+ }
+}
+
+func TestEmit(t *testing.T) {
+ bus := NewBus()
+ sub, err := bus.Subscribe(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ defer sub.Close()
+ <-sub.Out()
+ }()
+
+ em, err := bus.Emitter(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ em.Emit(EventA{})
+}
+
+func TestSub(t *testing.T) {
+ bus := NewBus()
+ sub, err := bus.Subscribe(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var event EventB
+
+ var wait sync.WaitGroup
+ wait.Add(1)
+
+ go func() {
+ defer sub.Close()
+ event = (<-sub.Out()).(EventB)
+ wait.Done()
+ }()
+
+ em, err := bus.Emitter(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ em.Emit(EventB(7))
+ wait.Wait()
+
+ if event != 7 {
+ t.Error("got wrong event")
+ }
+}
+
+func TestGetAllEventTypes(t *testing.T) {
+ bus := NewBus()
+ require.Empty(t, bus.GetAllEventTypes())
+
+ // the wildcard subscription should be returned.
+ _, err := bus.Subscribe(event.WildcardSubscription)
+ require.NoError(t, err)
+
+ _, err = bus.Subscribe(new(EventB))
+ require.NoError(t, err)
+
+ evts := bus.GetAllEventTypes()
+ require.Len(t, evts, 1)
+ require.Equal(t, reflect.TypeOf((*EventB)(nil)).Elem(), evts[0])
+
+ _, err = bus.Emitter(new(EventA))
+ require.NoError(t, err)
+
+ evts = bus.GetAllEventTypes()
+ require.Len(t, evts, 2)
+ require.Contains(t, evts, reflect.TypeOf((*EventB)(nil)).Elem())
+ require.Contains(t, evts, reflect.TypeOf((*EventA)(nil)).Elem())
+}
+
+func TestEmitNoSubNoBlock(t *testing.T) {
+ bus := NewBus()
+
+ em, err := bus.Emitter(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ em.Emit(EventA{})
+}
+
+type mockLogger struct {
+ mu sync.Mutex
+ logs []string
+}
+
+func (m *mockLogger) Write(p []byte) (n int, err error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.logs = append(m.logs, string(p))
+ return len(p), nil
+}
+
+func (m *mockLogger) Logs() []string {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ return m.logs
+}
+
+func (m *mockLogger) Clear() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.logs = nil
+}
+
+func TestEmitLogsErrorOnStall(t *testing.T) {
+ oldLogger := log
+ defer func() {
+ log = oldLogger
+ }()
+ ml := mockLogger{}
+ log = slog.New(slog.NewTextHandler(&ml, nil))
+
+ bus1 := NewBus()
+ bus2 := NewBus()
+
+ eventSub, err := bus1.Subscribe(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wildcardSub, err := bus2.Subscribe(event.WildcardSubscription)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testCases := []event.Subscription{eventSub, wildcardSub}
+ eventBuses := []event.Bus{bus1, bus2}
+
+ for i, sub := range testCases {
+ bus := eventBuses[i]
+ em, err := bus.Emitter(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ go func() {
+ for i := 0; i < subSettingsDefault.buffer+2; i++ {
+ em.Emit(EventA{})
+ }
+ }()
+
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ logs := ml.Logs()
+ found := false
+ for _, log := range logs {
+ if strings.Contains(log, "slow consumer") {
+ found = true
+ break
+ }
+ }
+ assert.True(collect, found, "expected to find slow consumer log")
+ }, 3*time.Second, 500*time.Millisecond)
+ ml.Clear()
+
+ // Close the subscriber so the worker can finish.
+ sub.Close()
+ }
+}
+
+func TestEmitOnClosed(t *testing.T) {
+ bus := NewBus()
+
+ em, err := bus.Emitter(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+ em.Close()
+ err = em.Emit(EventA{})
+ if err == nil {
+ t.Errorf("expected error")
+ }
+ if err.Error() != "emitter is closed" {
+ t.Error("unexpected message")
+ }
+}
+
+func TestClosingRaces(t *testing.T) {
+ subs := getN()
+ emits := getN()
+
+ var wg sync.WaitGroup
+ var lk sync.RWMutex
+ lk.Lock()
+
+ wg.Add(subs + emits)
+
+ b := NewBus()
+
+ for i := 0; i < subs; i++ {
+ go func() {
+ lk.RLock()
+ defer lk.RUnlock()
+
+ sub, _ := b.Subscribe(new(EventA))
+ time.Sleep(10 * time.Millisecond)
+ sub.Close()
+
+ wg.Done()
+ }()
+ }
+ for i := 0; i < emits; i++ {
+ go func() {
+ lk.RLock()
+ defer lk.RUnlock()
+
+ emit, _ := b.Emitter(new(EventA))
+ time.Sleep(10 * time.Millisecond)
+ emit.Close()
+
+ wg.Done()
+ }()
+ }
+
+ time.Sleep(10 * time.Millisecond)
+ lk.Unlock() // start everything
+
+ wg.Wait()
+
+ if len(b.(*basicBus).nodes) != 0 {
+ t.Error("expected no nodes")
+ }
+}
+
+func TestSubMany(t *testing.T) {
+ bus := NewBus()
+
+ var r atomic.Int32
+
+ n := getN()
+ var wait sync.WaitGroup
+ var ready sync.WaitGroup
+ wait.Add(n)
+ ready.Add(n)
+
+ for i := 0; i < n; i++ {
+ go func() {
+ sub, err := bus.Subscribe(new(EventB))
+ if err != nil {
+ panic(err)
+ }
+ defer sub.Close()
+
+ ready.Done()
+ r.Add(int32((<-sub.Out()).(EventB)))
+ wait.Done()
+ }()
+ }
+
+ em, err := bus.Emitter(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ ready.Wait()
+
+ em.Emit(EventB(7))
+ wait.Wait()
+
+ if int(r.Load()) != 7*n {
+ t.Error("got wrong result")
+ }
+}
+
+func TestWildcardSubscription(t *testing.T) {
+ bus := NewBus()
+ sub, err := bus.Subscribe(event.WildcardSubscription)
+ require.NoError(t, err)
+ defer sub.Close()
+
+ em1, err := bus.Emitter(new(EventA))
+ require.NoError(t, err)
+ defer em1.Close()
+
+ em2, err := bus.Emitter(new(EventB))
+ require.NoError(t, err)
+ defer em2.Close()
+
+ require.NoError(t, em1.Emit(EventA{}))
+ require.NoError(t, em2.Emit(EventB(1)))
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ var evts []interface{}
+
+LOOP:
+ for {
+ select {
+ case evt := <-sub.Out():
+ if evta, ok := evt.(EventA); ok {
+ evts = append(evts, evta)
+ }
+
+ if evtb, ok := evt.(EventB); ok {
+ evts = append(evts, evtb)
+ }
+
+ if len(evts) == 2 {
+ break LOOP
+ }
+
+ case <-ctx.Done():
+ t.Fatalf("did not receive events")
+ }
+ }
+}
+
+func TestManyWildcardSubscriptions(t *testing.T) {
+ bus := NewBus()
+ var subs []event.Subscription
+ for i := 0; i < 10; i++ {
+ sub, err := bus.Subscribe(event.WildcardSubscription)
+ require.NoError(t, err)
+ subs = append(subs, sub)
+ }
+
+ em1, err := bus.Emitter(new(EventA))
+ require.NoError(t, err)
+ defer em1.Close()
+
+ em2, err := bus.Emitter(new(EventB))
+ require.NoError(t, err)
+ defer em2.Close()
+
+ require.NoError(t, em1.Emit(EventA{}))
+ require.NoError(t, em2.Emit(EventB(1)))
+
+ // all 10 subscriptions received all 2 events.
+ for _, s := range subs {
+ require.Len(t, s.Out(), 2)
+ }
+
+ // close the first five subscriptions.
+ for _, s := range subs[:5] {
+ require.NoError(t, s.Close())
+ }
+
+ // emit another 2 events.
+ require.NoError(t, em1.Emit(EventA{}))
+ require.NoError(t, em2.Emit(EventB(1)))
+
+ // the first five 0 events because it was closed. The other five
+ // have 4 events.
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ for _, s := range subs[:5] {
+ require.Len(t, s.Out(), 0, "expected closed subscription to have flushed events")
+ }
+ }, 2*time.Second, 100*time.Millisecond)
+
+ for _, s := range subs[5:] {
+ require.Len(t, s.Out(), 4)
+ }
+
+ // close them all, the first five will be closed twice (asserts idempotency).
+ for _, s := range subs {
+ require.NoError(t, s.Close())
+ }
+
+ for _, s := range subs {
+ require.Zero(t, s.(*wildcardSub).w.nSinks.Load())
+ }
+}
+
+func TestWildcardValidations(t *testing.T) {
+ bus := NewBus()
+
+ _, err := bus.Subscribe([]interface{}{event.WildcardSubscription, new(EventA), new(EventB)})
+ require.Error(t, err)
+
+ _, err = bus.Emitter(event.WildcardSubscription)
+ require.Error(t, err)
+}
+
+func TestSubType(t *testing.T) {
+ bus := NewBus()
+ sub, err := bus.Subscribe([]interface{}{new(EventA), new(EventB)})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var event fmt.Stringer
+
+ var wait sync.WaitGroup
+ wait.Add(1)
+
+ go func() {
+ defer sub.Close()
+ event = (<-sub.Out()).(EventA)
+ wait.Done()
+ }()
+
+ em, err := bus.Emitter(new(EventA))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ em.Emit(EventA{})
+ wait.Wait()
+
+ if event.String() != "Oh, Hello" {
+ t.Error("didn't get the correct message")
+ }
+}
+
+func TestNonStateful(t *testing.T) {
+ bus := NewBus()
+ em, err := bus.Emitter(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ sub1, err := bus.Subscribe(new(EventB), BufSize(1))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub1.Close()
+
+ select {
+ case <-sub1.Out():
+ t.Fatal("didn't expect to get an event")
+ default:
+ }
+
+ em.Emit(EventB(1))
+
+ select {
+ case e := <-sub1.Out():
+ if e.(EventB) != 1 {
+ t.Fatal("got wrong event")
+ }
+ default:
+ t.Fatal("expected to get an event")
+ }
+
+ sub2, err := bus.Subscribe(new(EventB), BufSize(1))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub2.Close()
+
+ select {
+ case <-sub2.Out():
+ t.Fatal("didn't expect to get an event")
+ default:
+ }
+}
+
+func TestStateful(t *testing.T) {
+ bus := NewBus()
+ em, err := bus.Emitter(new(EventB), Stateful)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer em.Close()
+
+ em.Emit(EventB(2))
+
+ sub, err := bus.Subscribe(new(EventB), BufSize(1))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub.Close()
+
+ if (<-sub.Out()).(EventB) != 2 {
+ t.Fatal("got wrong event")
+ }
+}
+
+func TestCloseBlocking(t *testing.T) {
+ bus := NewBus()
+ em, err := bus.Emitter(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sub, err := bus.Subscribe(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ em.Emit(EventB(159))
+ }()
+
+ time.Sleep(10 * time.Millisecond) // make sure that emit is blocked
+
+ sub.Close() // cancel sub
+}
+
+func TestSubFailFully(t *testing.T) {
+ bus := NewBus()
+ em, err := bus.Emitter(new(EventB))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = bus.Subscribe([]interface{}{new(EventB), 5})
+ if err == nil || err.Error() != "subscribe called with non-pointer type" {
+ t.Fatal(err)
+ }
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ em.Emit(EventB(159)) // will hang if sub doesn't fail properly
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout")
+ }
+}
+
+func TestSubCloseMultiple(t *testing.T) {
+ bus := NewBus()
+
+ sub, err := bus.Subscribe([]interface{}{new(EventB)})
+ require.NoError(t, err)
+ err = sub.Close()
+ require.NoError(t, err)
+ err = sub.Close()
+ require.NoError(t, err)
+}
+
+func testMany(t testing.TB, subs, emits, msgs int, stateful bool) {
+ if race.WithRace() && subs+emits > 5000 {
+ t.SkipNow()
+ }
+
+ bus := NewBus()
+
+ var r atomic.Int64
+
+ var wait sync.WaitGroup
+ var ready sync.WaitGroup
+ wait.Add(subs + emits)
+ ready.Add(subs)
+
+ for i := 0; i < subs; i++ {
+ go func() {
+ sub, err := bus.Subscribe(new(EventB))
+ if err != nil {
+ panic(err)
+ }
+ defer sub.Close()
+
+ ready.Done()
+ for i := 0; i < emits*msgs; i++ {
+ e, ok := <-sub.Out()
+ if !ok {
+ panic("wat")
+ }
+ r.Add(int64(e.(EventB)))
+ }
+ wait.Done()
+ }()
+ }
+
+ for i := 0; i < emits; i++ {
+ go func() {
+ em, err := bus.Emitter(new(EventB), func(settings interface{}) error {
+ settings.(*emitterSettings).makeStateful = stateful
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ defer em.Close()
+
+ ready.Wait()
+
+ for i := 0; i < msgs; i++ {
+ em.Emit(EventB(97))
+ }
+
+ wait.Done()
+ }()
+ }
+
+ wait.Wait()
+
+ if int(r.Load()) != 97*subs*emits*msgs {
+ t.Fatal("got wrong result")
+ }
+}
+
+func TestBothMany(t *testing.T) {
+ testMany(t, 10000, 100, 10, false)
+}
+
+type benchCase struct {
+ subs int
+ emits int
+ stateful bool
+}
+
+func (bc benchCase) name() string {
+ return fmt.Sprintf("subs-%03d/emits-%03d/stateful-%t", bc.subs, bc.emits, bc.stateful)
+}
+
+func genTestCases() []benchCase {
+ ret := make([]benchCase, 0, 200)
+ for stateful := 0; stateful < 2; stateful++ {
+ for subs := uint(0); subs <= 8; subs = subs + 4 {
+ for emits := uint(0); emits <= 8; emits = emits + 4 {
+ ret = append(ret, benchCase{1 << subs, 1 << emits, stateful == 1})
+ }
+ }
+ }
+ return ret
+}
+
+func BenchmarkEvents(b *testing.B) {
+ for _, bc := range genTestCases() {
+ b.Run(bc.name(), benchMany(bc))
+ }
+}
+
+func benchMany(bc benchCase) func(*testing.B) {
+ return func(b *testing.B) {
+ b.ReportAllocs()
+ subs := bc.subs
+ emits := bc.emits
+ stateful := bc.stateful
+ bus := NewBus()
+ var wait sync.WaitGroup
+ var ready sync.WaitGroup
+ wait.Add(subs + emits)
+ ready.Add(subs + emits)
+
+ for i := 0; i < subs; i++ {
+ go func() {
+ sub, err := bus.Subscribe(new(EventB))
+ if err != nil {
+ panic(err)
+ }
+ defer sub.Close()
+
+ ready.Done()
+ ready.Wait()
+ for i := 0; i < (b.N/emits)*emits; i++ {
+ _, ok := <-sub.Out()
+ if !ok {
+ panic("wat")
+ }
+ }
+ wait.Done()
+ }()
+ }
+
+ for i := 0; i < emits; i++ {
+ go func() {
+ em, err := bus.Emitter(new(EventB), func(settings interface{}) error {
+ settings.(*emitterSettings).makeStateful = stateful
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ defer em.Close()
+
+ ready.Done()
+ ready.Wait()
+
+ for i := 0; i < b.N/emits; i++ {
+ em.Emit(EventB(97))
+ }
+
+ wait.Done()
+ }()
+ }
+ ready.Wait()
+ b.ResetTimer()
+ wait.Wait()
+ }
+}
+
+var div = 100
+
+func BenchmarkSubscribe(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N/div; i++ {
+ bus := NewBus()
+ for j := 0; j < div; j++ {
+ bus.Subscribe(new(EventA))
+ }
+ }
+}
+
+func BenchmarkEmitter(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N/div; i++ {
+ bus := NewBus()
+ for j := 0; j < div; j++ {
+ bus.Emitter(new(EventA))
+ }
+ }
+}
+
+func BenchmarkSubscribeAndEmitter(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N/div; i++ {
+ bus := NewBus()
+ for j := 0; j < div; j++ {
+ bus.Subscribe(new(EventA))
+ bus.Emitter(new(EventA))
+ }
+ }
+}
diff --git a/p2p/host/eventbus/opts.go b/p2p/host/eventbus/opts.go
new file mode 100644
index 0000000000..837a0683f2
--- /dev/null
+++ b/p2p/host/eventbus/opts.go
@@ -0,0 +1,79 @@
+package eventbus
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "sync/atomic"
+)
+
+type subSettings struct {
+ buffer int
+ name string
+}
+
+var subCnt atomic.Int64
+
+var subSettingsDefault = subSettings{
+ buffer: 16,
+}
+
+// newSubSettings returns the settings for a new subscriber
+// The default naming strategy is sub--L
+func newSubSettings() subSettings {
+ settings := subSettingsDefault
+ _, file, line, ok := runtime.Caller(2) // skip=1 is eventbus.Subscriber
+ if ok {
+ file = strings.TrimPrefix(file, "github.com/")
+ // remove the version number from the path, for example
+ // go-libp2p-package@v0.x.y-some-hash-123/file.go will be shortened go go-libp2p-package/file.go
+ if idx1 := strings.Index(file, "@"); idx1 != -1 {
+ if idx2 := strings.Index(file[idx1:], "/"); idx2 != -1 {
+ file = file[:idx1] + file[idx1+idx2:]
+ }
+ }
+ settings.name = fmt.Sprintf("%s-L%d", file, line)
+ } else {
+ settings.name = fmt.Sprintf("subscriber-%d", subCnt.Add(1))
+ }
+ return settings
+}
+
+func BufSize(n int) func(interface{}) error {
+ return func(s interface{}) error {
+ s.(*subSettings).buffer = n
+ return nil
+ }
+}
+
+func Name(name string) func(interface{}) error {
+ return func(s interface{}) error {
+ s.(*subSettings).name = name
+ return nil
+ }
+}
+
+type emitterSettings struct {
+ makeStateful bool
+}
+
+// Stateful is an Emitter option which makes the eventbus channel
+// 'remember' last event sent, and when a new subscriber joins the
+// bus, the remembered event is immediately sent to the subscription
+// channel.
+//
+// This allows to provide state tracking for dynamic systems, and/or
+// allows new subscribers to verify that there are Emitters on the channel
+func Stateful(s interface{}) error {
+ s.(*emitterSettings).makeStateful = true
+ return nil
+}
+
+type Option func(*basicBus)
+
+func WithMetricsTracer(metricsTracer MetricsTracer) Option {
+ return func(bus *basicBus) {
+ bus.metricsTracer = metricsTracer
+ bus.wildcard.metricsTracer = metricsTracer
+ }
+}
diff --git a/p2p/host/observedaddrs/manager.go b/p2p/host/observedaddrs/manager.go
new file mode 100644
index 0000000000..577b36405f
--- /dev/null
+++ b/p2p/host/observedaddrs/manager.go
@@ -0,0 +1,609 @@
+package observedaddrs
+
+import (
+ "context"
+ "errors"
+ "net"
+ "slices"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var log = logging.Logger("observedaddrs")
+
+// ActivationThresh is the minimum number of observers required for an observed address
+// to be considered valid. We may not advertise this address even if we have these many
+// observations if better observed addresses are available.
+var ActivationThresh = 4
+
+var (
+ // observedAddrManagerWorkerChannelSize defines how many addresses can be enqueued
+ // for adding to an ObservedAddrManager.
+ observedAddrManagerWorkerChannelSize = 16
+ // natTypeChangeTickrInterval is the interval between two nat device change events.
+ //
+ // Computing the NAT type is expensive and the information in the event is not too
+ // useful, so this interval is long.
+ natTypeChangeTickrInterval = 1 * time.Minute
+)
+
+const maxExternalThinWaistAddrsPerLocalAddr = 3
+
+// thinWaist is a struct that stores the address along with it's thin waist prefix and rest of the multiaddr
+type thinWaist struct {
+ Addr, TW, Rest ma.Multiaddr
+}
+
+var errTW = errors.New("not a thinwaist address")
+
+func thinWaistForm(a ma.Multiaddr) (thinWaist, error) {
+ if len(a) < 2 {
+ return thinWaist{}, errTW
+ }
+ if c0, c1 := a[0].Code(), a[1].Code(); (c0 != ma.P_IP4 && c0 != ma.P_IP6) || (c1 != ma.P_TCP && c1 != ma.P_UDP) {
+ return thinWaist{}, errTW
+ }
+ return thinWaist{Addr: a, TW: a[:2], Rest: a[2:]}, nil
+}
+
+// getObserver returns the observer for the multiaddress
+// For an IPv4 multiaddress the observer is the IP address
+// For an IPv6 multiaddress the observer is the first /56 prefix of the IP address
+func getObserver(a ma.Multiaddr) (string, error) {
+ ip, err := manet.ToIP(a)
+ if err != nil {
+ return "", err
+ }
+ if ip4 := ip.To4(); ip4 != nil {
+ return ip4.String(), nil
+ }
+ // Count /56 prefix as a single observer.
+ return ip.Mask(net.CIDRMask(56, 128)).String(), nil
+}
+
+// connMultiaddrs provides IsClosed along with network.ConnMultiaddrs. It is easier to mock this than network.Conn
+type connMultiaddrs interface {
+ network.ConnMultiaddrs
+ IsClosed() bool
+}
+
+// observerSetCacheSize is the number of transport sharing the same thinwaist (tcp, ws, wss), (quic, webtransport, webrtc-direct)
+// This is 3 in practice right now, but keep a buffer of few extra elements
+const observerSetCacheSize = 10
+
+// observerSet is the set of observers who have observed ThinWaistAddr
+type observerSet struct {
+ ObservedTWAddr ma.Multiaddr
+ ObservedBy map[string]int
+
+ mu sync.RWMutex // protects following
+ cachedMultiaddrs map[string]ma.Multiaddr // cache of localMultiaddr rest(addr - thinwaist) => output multiaddr
+}
+
+func (s *observerSet) cacheMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
+ if addr == nil {
+ return s.ObservedTWAddr
+ }
+ addrStr := string(addr.Bytes())
+ s.mu.RLock()
+ res, ok := s.cachedMultiaddrs[addrStr]
+ s.mu.RUnlock()
+ if ok {
+ return res
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ // Check if some other go routine added this while we were waiting
+ res, ok = s.cachedMultiaddrs[addrStr]
+ if ok {
+ return res
+ }
+ if s.cachedMultiaddrs == nil {
+ s.cachedMultiaddrs = make(map[string]ma.Multiaddr, observerSetCacheSize)
+ }
+ if len(s.cachedMultiaddrs) == observerSetCacheSize {
+ // remove one entry if we will go over the limit
+ for k := range s.cachedMultiaddrs {
+ delete(s.cachedMultiaddrs, k)
+ break
+ }
+ }
+ s.cachedMultiaddrs[addrStr] = ma.Join(s.ObservedTWAddr, addr)
+ return s.cachedMultiaddrs[addrStr]
+}
+
+type observation struct {
+ conn connMultiaddrs
+ observed ma.Multiaddr
+}
+
+// Manager maps connection's local multiaddrs to their externally observable multiaddress
+type Manager struct {
+ // Our listen addrs
+ listenAddrs func() []ma.Multiaddr
+ // worker channel for new observations
+ wch chan observation
+ // eventbus for identify observations
+ eventbus event.Bus
+
+ // for closing
+ wg sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ stopNotify func()
+
+ mu sync.RWMutex
+ // local thin waist => external thin waist => observerSet
+ externalAddrs map[string]map[string]*observerSet
+ // connObservedTWAddrs maps the connection to the last observed thin waist multiaddr on that connection
+ connObservedTWAddrs map[connMultiaddrs]ma.Multiaddr
+}
+
+var _ basichost.ObservedAddrsManager = (*Manager)(nil)
+
+// NewManager returns a new manager using peerstore.OwnObservedAddressTTL as the TTL.
+func NewManager(eventbus event.Bus, net network.Network) (*Manager, error) {
+ listenAddrs := func() []ma.Multiaddr {
+ la := net.ListenAddresses()
+ ila, err := net.InterfaceListenAddresses()
+ if err != nil {
+ log.Warn("error getting interface listen addresses", "err", err)
+ }
+ return append(la, ila...)
+ }
+ o, err := newManagerWithListenAddrs(eventbus, listenAddrs)
+ if err != nil {
+ return nil, err
+ }
+
+ return o, nil
+}
+
+// newManagerWithListenAddrs uses the listenAddrs directly to simplify creation in tests.
+func newManagerWithListenAddrs(bus event.Bus, listenAddrs func() []ma.Multiaddr) (*Manager, error) {
+ o := &Manager{
+ externalAddrs: make(map[string]map[string]*observerSet),
+ connObservedTWAddrs: make(map[connMultiaddrs]ma.Multiaddr),
+ wch: make(chan observation, observedAddrManagerWorkerChannelSize),
+ listenAddrs: listenAddrs,
+ eventbus: bus,
+ stopNotify: func() {},
+ }
+ o.ctx, o.ctxCancel = context.WithCancel(context.Background())
+ return o, nil
+}
+
+// Start tracking addrs
+func (o *Manager) Start(n network.Network) {
+ nb := &network.NotifyBundle{
+ DisconnectedF: func(_ network.Network, c network.Conn) {
+ o.removeConn(c)
+ },
+ }
+
+ sub, err := o.eventbus.Subscribe(new(event.EvtPeerIdentificationCompleted), eventbus.Name("observed-addrs-manager"))
+ if err != nil {
+ log.Error("failed to start observed addrs manager: identify subscription failed.", "err", err)
+ return
+ }
+ emitter, err := o.eventbus.Emitter(new(event.EvtNATDeviceTypeChanged), eventbus.Stateful)
+ if err != nil {
+ log.Error("failed to start observed addrs manager: nat device type changed emitter error.", "err", err)
+ sub.Close()
+ return
+ }
+
+ n.Notify(nb)
+ o.stopNotify = func() {
+ n.StopNotify(nb)
+ }
+
+ o.wg.Add(2)
+ go o.eventHandler(sub, emitter)
+ go o.worker()
+}
+
+// AddrsFor return all activated observed addresses associated with the given
+// (resolved) listen address.
+func (o *Manager) AddrsFor(addr ma.Multiaddr) (addrs []ma.Multiaddr) {
+ if addr == nil {
+ return nil
+ }
+ o.mu.RLock()
+ defer o.mu.RUnlock()
+ tw, err := thinWaistForm(addr)
+ if err != nil {
+ return nil
+ }
+
+ observerSets := o.getTopExternalAddrs(string(tw.TW.Bytes()), ActivationThresh)
+ res := make([]ma.Multiaddr, 0, len(observerSets))
+ for _, s := range observerSets {
+ res = append(res, s.cacheMultiaddr(tw.Rest))
+ }
+ return res
+}
+
+// appendInferredAddrs infers the external address of addresses for the addresses
+// that we are listening on using the thin waist mapping.
+//
+// e.g. If we have observations for a QUIC address on port 9000, and we are
+// listening on the same interface and port 9000 for WebTransport, we can infer
+// the external WebTransport address.
+func (o *Manager) appendInferredAddrs(twToObserverSets map[string][]*observerSet, addrs []ma.Multiaddr) []ma.Multiaddr {
+ if twToObserverSets == nil {
+ twToObserverSets = make(map[string][]*observerSet)
+ for localTWStr := range o.externalAddrs {
+ twToObserverSets[localTWStr] = append(twToObserverSets[localTWStr], o.getTopExternalAddrs(localTWStr, ActivationThresh)...)
+ }
+ }
+ lAddrs := o.listenAddrs()
+ seenTWs := make(map[string]struct{})
+ for _, a := range lAddrs {
+ if _, ok := seenTWs[string(a.Bytes())]; ok {
+ // We've already added this
+ continue
+ }
+ seenTWs[string(a.Bytes())] = struct{}{}
+ t, err := thinWaistForm(a)
+ if err != nil {
+ continue
+ }
+ for _, s := range twToObserverSets[string(t.TW.Bytes())] {
+ addrs = append(addrs, s.cacheMultiaddr(t.Rest))
+ }
+ }
+ return addrs
+}
+
+// Addrs return all observed addresses with at least minObservers observers
+// If minObservers <= 0, it will return all addresses with at least ActivationThresh observers.
+func (o *Manager) Addrs(minObservers int) []ma.Multiaddr {
+ o.mu.RLock()
+ defer o.mu.RUnlock()
+
+ if minObservers <= 0 {
+ minObservers = ActivationThresh
+ }
+
+ m := make(map[string][]*observerSet)
+ for localTWStr := range o.externalAddrs {
+ m[localTWStr] = append(m[localTWStr], o.getTopExternalAddrs(localTWStr, minObservers)...)
+ }
+ addrs := make([]ma.Multiaddr, 0, maxExternalThinWaistAddrsPerLocalAddr*5) // assume 5 transports
+ addrs = o.appendInferredAddrs(m, addrs)
+ return addrs
+}
+
+func (o *Manager) getTopExternalAddrs(localTWStr string, minObservers int) []*observerSet {
+ observerSets := make([]*observerSet, 0, len(o.externalAddrs[localTWStr]))
+ for _, v := range o.externalAddrs[localTWStr] {
+ if len(v.ObservedBy) >= minObservers {
+ observerSets = append(observerSets, v)
+ }
+ }
+ slices.SortFunc(observerSets, func(a, b *observerSet) int {
+ diff := len(b.ObservedBy) - len(a.ObservedBy)
+ if diff != 0 {
+ return diff
+ }
+ // In case we have elements with equal counts,
+ // keep the address list stable by using the lexicographically smaller address
+ return a.ObservedTWAddr.Compare(b.ObservedTWAddr)
+ })
+ // TODO(sukunrt): Improve this logic. Return only if the addresses have a
+ // threshold fraction of the maximum observations
+ n := len(observerSets)
+ if n > maxExternalThinWaistAddrsPerLocalAddr {
+ n = maxExternalThinWaistAddrsPerLocalAddr
+ }
+ return observerSets[:n]
+}
+
+func (o *Manager) eventHandler(identifySub event.Subscription, natEmitter event.Emitter) {
+ defer o.wg.Done()
+ natTypeTicker := time.NewTicker(natTypeChangeTickrInterval)
+ defer natTypeTicker.Stop()
+ var udpNATType, tcpNATType network.NATDeviceType
+ for {
+ select {
+ case e := <-identifySub.Out():
+ evt := e.(event.EvtPeerIdentificationCompleted)
+ select {
+ case o.wch <- observation{
+ conn: evt.Conn,
+ observed: evt.ObservedAddr,
+ }:
+ default:
+ log.Debug("dropping address observation due to full buffer",
+ "from", evt.Conn.RemoteMultiaddr(),
+ "observed", evt.ObservedAddr,
+ )
+ }
+ case <-natTypeTicker.C:
+ newUDPNAT, newTCPNAT := o.getNATType()
+ if newUDPNAT != udpNATType {
+ natEmitter.Emit(event.EvtNATDeviceTypeChanged{
+ TransportProtocol: network.NATTransportUDP,
+ NatDeviceType: newUDPNAT,
+ })
+ }
+ if newTCPNAT != tcpNATType {
+ natEmitter.Emit(event.EvtNATDeviceTypeChanged{
+ TransportProtocol: network.NATTransportTCP,
+ NatDeviceType: newTCPNAT,
+ })
+ }
+ udpNATType, tcpNATType = newUDPNAT, newTCPNAT
+ case <-o.ctx.Done():
+ return
+ }
+ }
+}
+
+func (o *Manager) worker() {
+ defer o.wg.Done()
+ for {
+ select {
+ case obs := <-o.wch:
+ o.maybeRecordObservation(obs.conn, obs.observed)
+ case <-o.ctx.Done():
+ return
+ }
+ }
+}
+
+func (o *Manager) shouldRecordObservation(conn connMultiaddrs, observed ma.Multiaddr) (shouldRecord bool, localTW thinWaist, observedTW thinWaist) {
+ if conn == nil || observed == nil {
+ return false, thinWaist{}, thinWaist{}
+ }
+ // Ignore observations from loopback nodes. We already know our loopback
+ // addresses.
+ if manet.IsIPLoopback(observed) {
+ return false, thinWaist{}, thinWaist{}
+ }
+
+ // Provided by NAT64 peers, these addresses are specific to the peer and not publicly routable
+ if manet.IsNAT64IPv4ConvertedIPv6Addr(observed) {
+ return false, thinWaist{}, thinWaist{}
+ }
+
+ // Ignore p2p-circuit addresses. These are the observed address of the relay.
+ // Not useful for us.
+ if isRelayedAddress(observed) {
+ return false, thinWaist{}, thinWaist{}
+ }
+
+ localTW, err := thinWaistForm(conn.LocalMultiaddr())
+ if err != nil {
+ log.Info("failed to get interface listen addrs", "err", err)
+ return false, thinWaist{}, thinWaist{}
+ }
+
+ listenAddrs := o.listenAddrs()
+ for i, a := range listenAddrs {
+ tw, err := thinWaistForm(a)
+ if err != nil {
+ listenAddrs[i] = nil
+ continue
+ }
+ listenAddrs[i] = tw.TW
+ }
+
+ if !ma.Contains(listenAddrs, localTW.TW) {
+ // not in our list
+ return false, thinWaist{}, thinWaist{}
+ }
+
+ observedTW, err = thinWaistForm(observed)
+ if err != nil {
+ return false, thinWaist{}, thinWaist{}
+ }
+ if !hasConsistentTransport(localTW.TW, observedTW.TW) {
+ log.Debug(
+ "invalid observed address for local address",
+ "observed", observed,
+ "local", localTW.Addr,
+ )
+ return false, thinWaist{}, thinWaist{}
+ }
+
+ return true, localTW, observedTW
+}
+
+func (o *Manager) maybeRecordObservation(conn connMultiaddrs, observed ma.Multiaddr) {
+ shouldRecord, localTW, observedTW := o.shouldRecordObservation(conn, observed)
+ if !shouldRecord {
+ return
+ }
+ log.Debug("added own observed listen addr", "conn", conn, "observed", observed)
+
+ o.mu.Lock()
+ defer o.mu.Unlock()
+ o.recordObservationUnlocked(conn, localTW, observedTW)
+}
+
+func (o *Manager) recordObservationUnlocked(conn connMultiaddrs, localTW, observedTW thinWaist) {
+ if conn.IsClosed() {
+ // dont record if the connection is already closed. Any previous observations will be removed in
+ // the disconnected callback
+ return
+ }
+ localTWStr := string(localTW.TW.Bytes())
+ observedTWStr := string(observedTW.TW.Bytes())
+ observer, err := getObserver(conn.RemoteMultiaddr())
+ if err != nil {
+ return
+ }
+
+ prevObservedTWAddr, ok := o.connObservedTWAddrs[conn]
+ if ok {
+ // we have received the same observation again, nothing to do
+ if prevObservedTWAddr.Equal(observedTW.TW) {
+ return
+ }
+ // if we have a previous entry remove it from externalAddrs
+ o.removeExternalAddrsUnlocked(observer, localTWStr, string(prevObservedTWAddr.Bytes()))
+ }
+ o.connObservedTWAddrs[conn] = observedTW.TW
+ o.addExternalAddrsUnlocked(observedTW.TW, observer, localTWStr, observedTWStr)
+}
+
+func (o *Manager) removeExternalAddrsUnlocked(observer, localTWStr, observedTWStr string) {
+ s, ok := o.externalAddrs[localTWStr][observedTWStr]
+ if !ok {
+ return
+ }
+ s.ObservedBy[observer]--
+ if s.ObservedBy[observer] <= 0 {
+ delete(s.ObservedBy, observer)
+ }
+ if len(s.ObservedBy) == 0 {
+ delete(o.externalAddrs[localTWStr], observedTWStr)
+ }
+ if len(o.externalAddrs[localTWStr]) == 0 {
+ delete(o.externalAddrs, localTWStr)
+ }
+}
+
+func (o *Manager) addExternalAddrsUnlocked(observedTWAddr ma.Multiaddr, observer, localTWStr, observedTWStr string) {
+ s, ok := o.externalAddrs[localTWStr][observedTWStr]
+ if !ok {
+ s = &observerSet{
+ ObservedTWAddr: observedTWAddr,
+ ObservedBy: make(map[string]int),
+ }
+ if _, ok := o.externalAddrs[localTWStr]; !ok {
+ o.externalAddrs[localTWStr] = make(map[string]*observerSet)
+ }
+ o.externalAddrs[localTWStr][observedTWStr] = s
+ }
+ s.ObservedBy[observer]++
+}
+
+func (o *Manager) removeConn(conn connMultiaddrs) {
+ if conn == nil {
+ return
+ }
+ o.mu.Lock()
+ defer o.mu.Unlock()
+
+ observedTWAddr, ok := o.connObservedTWAddrs[conn]
+ if !ok {
+ return
+ }
+ delete(o.connObservedTWAddrs, conn)
+
+ localTW, err := thinWaistForm(conn.LocalMultiaddr())
+ if err != nil {
+ return
+ }
+
+ observer, err := getObserver(conn.RemoteMultiaddr())
+ if err != nil {
+ return
+ }
+
+ o.removeExternalAddrsUnlocked(observer, string(localTW.TW.Bytes()), string(observedTWAddr.Bytes()))
+}
+
+func (o *Manager) getNATType() (tcpNATType, udpNATType network.NATDeviceType) {
+ o.mu.RLock()
+ defer o.mu.RUnlock()
+
+ var tcpCounts, udpCounts []int
+ var tcpTotal, udpTotal int
+ for _, m := range o.externalAddrs {
+ isTCP := false
+ for _, v := range m {
+ for _, c := range v.ObservedTWAddr {
+ if c.Code() == ma.P_TCP {
+ isTCP = true
+ break
+ }
+ }
+ }
+ for _, v := range m {
+ if isTCP {
+ tcpCounts = append(tcpCounts, len(v.ObservedBy))
+ tcpTotal += len(v.ObservedBy)
+ } else {
+ udpCounts = append(udpCounts, len(v.ObservedBy))
+ udpTotal += len(v.ObservedBy)
+ }
+ }
+ }
+
+ sort.Sort(sort.Reverse(sort.IntSlice(tcpCounts)))
+ sort.Sort(sort.Reverse(sort.IntSlice(udpCounts)))
+
+ tcpTopCounts, udpTopCounts := 0, 0
+ for i := 0; i < maxExternalThinWaistAddrsPerLocalAddr && i < len(tcpCounts); i++ {
+ tcpTopCounts += tcpCounts[i]
+ }
+ for i := 0; i < maxExternalThinWaistAddrsPerLocalAddr && i < len(udpCounts); i++ {
+ udpTopCounts += udpCounts[i]
+ }
+
+ // If the top elements cover more than 1/2 of all the observations, there's a > 50% chance that
+ // hole punching based on outputs of observed address manager will succeed
+ //
+ // The `3*maxExternalThinWaistAddrsPerLocalAddr` is a magic number, we just want sufficient
+ // observations to decide about NAT Type
+ if tcpTotal >= 3*maxExternalThinWaistAddrsPerLocalAddr {
+ if tcpTopCounts >= tcpTotal/2 {
+ tcpNATType = network.NATDeviceTypeEndpointIndependent
+ } else {
+ tcpNATType = network.NATDeviceTypeEndpointDependent
+ }
+ }
+ if udpTotal >= 3*maxExternalThinWaistAddrsPerLocalAddr {
+ if udpTopCounts >= udpTotal/2 {
+ udpNATType = network.NATDeviceTypeEndpointIndependent
+ } else {
+ udpNATType = network.NATDeviceTypeEndpointDependent
+ }
+ }
+ return
+}
+
+func (o *Manager) Close() error {
+ o.stopNotify()
+ o.ctxCancel()
+ o.wg.Wait()
+ return nil
+}
+
+// hasConsistentTransport returns true if the thin waist address `aTW` shares the same
+// protocols with `bTW`
+func hasConsistentTransport(aTW, bTW ma.Multiaddr) bool {
+ if len(aTW) != len(bTW) {
+ return false
+ }
+ for i, a := range aTW {
+ if bTW[i].Code() != a.Code() {
+ return false
+ }
+ }
+ return true
+}
+
+func isRelayedAddress(a ma.Multiaddr) bool {
+ for _, c := range a {
+ if c.Code() == ma.P_CIRCUIT {
+ return true
+ }
+ }
+ return false
+}
diff --git a/p2p/host/observedaddrs/manager_glass_test.go b/p2p/host/observedaddrs/manager_glass_test.go
new file mode 100644
index 0000000000..a9e8e0ae76
--- /dev/null
+++ b/p2p/host/observedaddrs/manager_glass_test.go
@@ -0,0 +1,172 @@
+package observedaddrs
+
+// This test lives in the identify package, not the identify_test package, so it
+// can access internal types.
+
+import (
+ "fmt"
+ "sync/atomic"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multiaddr/matest"
+ "github.com/stretchr/testify/require"
+)
+
+type mockConn struct {
+ local, remote ma.Multiaddr
+ isClosed atomic.Bool
+}
+
+// LocalMultiaddr implements connMultiaddrProvider
+func (c *mockConn) LocalMultiaddr() ma.Multiaddr {
+ return c.local
+}
+
+// RemoteMultiaddr implements connMultiaddrProvider
+func (c *mockConn) RemoteMultiaddr() ma.Multiaddr {
+ return c.remote
+}
+
+func (c *mockConn) Close() {
+ c.isClosed.Store(true)
+}
+
+func (c *mockConn) IsClosed() bool {
+ return c.isClosed.Load()
+}
+
+func TestShouldRecordObservationWithWebTransport(t *testing.T) {
+ listenAddr := ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1/webtransport/certhash/uEgNmb28")
+ listenAddrs := func() []ma.Multiaddr { return []ma.Multiaddr{listenAddr} }
+
+ c := &mockConn{
+ local: listenAddr,
+ remote: ma.StringCast("/ip4/1.2.3.6/udp/1236/quic-v1/webtransport"),
+ }
+ observedAddr := ma.StringCast("/ip4/1.2.3.4/udp/1231/quic-v1/webtransport")
+ o, err := newManagerWithListenAddrs(nil, listenAddrs)
+ require.NoError(t, err)
+ shouldRecord, _, _ := o.shouldRecordObservation(c, observedAddr)
+ require.True(t, shouldRecord)
+}
+
+func TestShouldNotRecordObservationWithRelayedAddr(t *testing.T) {
+ listenAddr := ma.StringCast("/ip4/1.2.3.4/udp/8888/quic-v1/p2p-circuit")
+ listenAddrs := func() []ma.Multiaddr { return []ma.Multiaddr{listenAddr} }
+
+ c := &mockConn{
+ local: listenAddr,
+ remote: ma.StringCast("/ip4/1.2.3.6/udp/1236/quic-v1/p2p-circuit"),
+ }
+ observedAddr := ma.StringCast("/ip4/1.2.3.4/udp/1231/quic-v1/p2p-circuit")
+ o, err := newManagerWithListenAddrs(nil, listenAddrs)
+ require.NoError(t, err)
+ shouldRecord, _, _ := o.shouldRecordObservation(c, observedAddr)
+ require.False(t, shouldRecord)
+}
+
+func TestShouldRecordObservationWithNAT64Addr(t *testing.T) {
+ listenAddr1 := ma.StringCast("/ip4/0.0.0.0/tcp/1234")
+ listenAddr2 := ma.StringCast("/ip6/::/tcp/1234")
+ listenAddrs := func() []ma.Multiaddr { return []ma.Multiaddr{listenAddr1, listenAddr2} }
+ c4 := &mockConn{
+ local: listenAddr1,
+ remote: ma.StringCast("/ip4/1.2.3.6/tcp/4321"),
+ }
+ c6 := &mockConn{
+ local: listenAddr2,
+ remote: ma.StringCast("/ip6/1::4/tcp/4321"),
+ }
+
+ cases := []struct {
+ addr ma.Multiaddr
+ want bool
+ conn *mockConn
+ failureReason string
+ }{
+ {
+ addr: ma.StringCast("/ip4/1.2.3.4/tcp/1234"),
+ want: true,
+ failureReason: "IPv4 should be observed",
+ conn: c4,
+ },
+ {
+ addr: ma.StringCast("/ip6/1::4/tcp/1234"),
+ want: true,
+ failureReason: "public IPv6 address should be observed",
+ conn: c6,
+ },
+ {
+ addr: ma.StringCast("/ip6/64:ff9b::192.0.1.2/tcp/1234"),
+ want: false,
+ failureReason: "NAT64 IPv6 address shouldn't be observed",
+ conn: c6,
+ },
+ }
+
+ o, err := newManagerWithListenAddrs(nil, listenAddrs)
+ require.NoError(t, err)
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ if shouldRecord, _, _ := o.shouldRecordObservation(tc.conn, tc.addr); shouldRecord != tc.want {
+ t.Fatalf("%s %s", tc.addr, tc.failureReason)
+ }
+ })
+ }
+}
+
+func TestThinWaistForm(t *testing.T) {
+ tc := []struct {
+ input string
+ tw string
+ rest string
+ err bool
+ }{{
+ input: "/ip4/1.2.3.4/tcp/1",
+ tw: "/ip4/1.2.3.4/tcp/1",
+ rest: "",
+ }, {
+ input: "/ip4/1.2.3.4/tcp/1/ws",
+ tw: "/ip4/1.2.3.4/tcp/1",
+ rest: "/ws",
+ }, {
+ input: "/ip4/127.0.0.1/udp/1/quic-v1",
+ tw: "/ip4/127.0.0.1/udp/1",
+ rest: "/quic-v1",
+ }, {
+ input: "/ip4/1.2.3.4/udp/1/quic-v1/webtransport",
+ tw: "/ip4/1.2.3.4/udp/1",
+ rest: "/quic-v1/webtransport",
+ }, {
+ input: "/ip4/1.2.3.4/",
+ err: true,
+ }, {
+ input: "/tcp/1",
+ err: true,
+ }, {
+ input: "/ip6/::1/tcp/1",
+ tw: "/ip6/::1/tcp/1",
+ rest: "",
+ }}
+ for i, tt := range tc {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ inputAddr := ma.StringCast(tt.input)
+ tw, err := thinWaistForm(inputAddr)
+ if tt.err {
+ require.Equal(t, tw, thinWaist{})
+ require.Error(t, err)
+ return
+ }
+ wantTW := ma.StringCast(tt.tw)
+ var restTW ma.Multiaddr
+ if tt.rest != "" {
+ restTW = ma.StringCast(tt.rest)
+ }
+ matest.AssertEqualMultiaddr(t, inputAddr, tw.Addr)
+ matest.AssertEqualMultiaddr(t, wantTW, tw.TW)
+ matest.AssertEqualMultiaddr(t, restTW, tw.Rest)
+ })
+ }
+
+}
diff --git a/p2p/host/observedaddrs/manager_test.go b/p2p/host/observedaddrs/manager_test.go
new file mode 100644
index 0000000000..95f6454316
--- /dev/null
+++ b/p2p/host/observedaddrs/manager_test.go
@@ -0,0 +1,637 @@
+package observedaddrs
+
+import (
+ crand "crypto/rand"
+ "fmt"
+ "net"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ ma "github.com/multiformats/go-multiaddr"
+ matest "github.com/multiformats/go-multiaddr/matest"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func requireAddrsMatch(t *testing.T, a, b []ma.Multiaddr) {
+ t.Helper()
+ slices.SortFunc(a, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ slices.SortFunc(b, func(a, b ma.Multiaddr) int { return a.Compare(b) })
+ requireEqualAddrs(t, a, b)
+}
+
+func requireEqualAddrs(t *testing.T, a, b []ma.Multiaddr) {
+ t.Helper()
+ if len(a) != len(b) {
+ t.Fatalf("length mismatch: %d != %d", len(a), len(b))
+ }
+ for i, addr := range a {
+ if !addr.Equal(b[i]) {
+ t.Fatalf("addr mismatch: %s != %s", addr, b[i])
+ }
+ }
+}
+
+func newConn(local, remote ma.Multiaddr) *mockConn {
+ return &mockConn{local: local, remote: remote}
+}
+
+func TestObservedAddrsManager(t *testing.T) {
+ tcp4ListenAddr := ma.StringCast("/ip4/192.168.1.100/tcp/1")
+ quic4ListenAddr := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1")
+ webTransport4ListenAddr := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1/webtransport/certhash/uEgNmb28")
+ tcp6ListenAddr := ma.StringCast("/ip6/2004::1/tcp/1")
+ quic6ListenAddr := ma.StringCast("/ip6/::/udp/1/quic-v1")
+ webTransport6ListenAddr := ma.StringCast("/ip6/::/udp/1/quic-v1/webtransport/certhash/uEgNmb28")
+ newObservedAddrMgr := func() *Manager {
+ listenAddrsFunc := func() []ma.Multiaddr {
+ return []ma.Multiaddr{
+ tcp4ListenAddr, quic4ListenAddr, webTransport4ListenAddr, tcp6ListenAddr, quic6ListenAddr, webTransport6ListenAddr,
+ }
+ }
+ eb := eventbus.NewBus()
+ o, err := newManagerWithListenAddrs(eb, listenAddrsFunc)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := swarmt.GenSwarm(t)
+ o.Start(s)
+ t.Cleanup(func() { o.Close() })
+ return o
+ }
+
+ checkAllEntriesRemoved := func(o *Manager) bool {
+ return len(o.Addrs(0)) == 0 && len(o.externalAddrs) == 0 && len(o.connObservedTWAddrs) == 0
+ }
+
+ getConns := func(t *testing.T, n int, protocolCode int) []*mockConn {
+ t.Helper()
+ localAddrMap := map[int]ma.Multiaddr{
+ ma.P_TCP: tcp4ListenAddr,
+ ma.P_QUIC_V1: quic4ListenAddr,
+ ma.P_WEBTRANSPORT: webTransport4ListenAddr,
+ }
+ protoPartMap := map[int]ma.Multiaddr{
+ ma.P_TCP: ma.StringCast("/tcp/1"),
+ ma.P_QUIC_V1: ma.StringCast("/udp/1/quic-v1"),
+ ma.P_WEBTRANSPORT: ma.StringCast("/udp/1/quic-v1/webtransport"),
+ }
+
+ localAddr, ok := localAddrMap[protocolCode]
+ if !ok {
+ t.Fatalf("unknown protocol code: %d", protocolCode)
+ }
+ protoPart, ok := protoPartMap[protocolCode]
+ if !ok {
+ t.Fatalf("unknown protocol code: %d", protocolCode)
+ }
+
+ conns := make([]*mockConn, 0, n)
+ for i := 0; i < n; i++ {
+ ipPart := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d", i))
+ conns = append(conns, newConn(localAddr, ma.Join(ipPart, protoPart)))
+ }
+ return conns
+ }
+
+ t.Run("Single Observation", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ observed := ma.StringCast("/ip4/2.2.2.2/tcp/2")
+ c1 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.1/tcp/1"))
+ c2 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.2/tcp/1"))
+ c3 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.3/tcp/1"))
+ c4 := newConn(tcp4ListenAddr, ma.StringCast("/ip4/1.2.3.4/tcp/1"))
+ o.maybeRecordObservation(c1, observed)
+ o.maybeRecordObservation(c2, observed)
+ o.maybeRecordObservation(c3, observed)
+ o.maybeRecordObservation(c4, observed)
+ require.Eventually(t, func() bool {
+ return matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observed})
+ }, 1*time.Second, 100*time.Millisecond)
+ o.removeConn(c1)
+ o.removeConn(c2)
+ o.removeConn(c3)
+ o.removeConn(c4)
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("many observed addrs output size limited", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ conns := getConns(t, 40, ma.P_TCP)
+ observedAddrs := make([]ma.Multiaddr, maxExternalThinWaistAddrsPerLocalAddr*2)
+ for i := 0; i < len(observedAddrs); i++ {
+ observedAddrs[i] = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/tcp/2", i))
+ }
+ for i, c := range conns {
+ // avoid the async nature of Record
+ o.maybeRecordObservation(c, observedAddrs[i%len(observedAddrs)])
+ }
+ require.Eventually(t, func() bool {
+ return len(o.Addrs(ActivationThresh)) == maxExternalThinWaistAddrsPerLocalAddr &&
+ len(o.AddrsFor(tcp4ListenAddr)) == maxExternalThinWaistAddrsPerLocalAddr
+ }, 1*time.Second, 100*time.Millisecond)
+ for _, c := range conns {
+ o.removeConn(c)
+ }
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("WebTransport inferred from QUIC", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ observedQuic := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1")
+ observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1/webtransport/certhash/uEgNmb28")
+ c1 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1"))
+ c2 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1"))
+ c3 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport"))
+ c4 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport"))
+ o.maybeRecordObservation(c1, observedQuic)
+ o.maybeRecordObservation(c2, observedQuic)
+ o.maybeRecordObservation(c3, observedWebTransport)
+ o.maybeRecordObservation(c4, observedWebTransport)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic, observedWebTransport})
+ }, 1*time.Second, 100*time.Millisecond)
+ o.removeConn(c1)
+ o.removeConn(c2)
+ o.removeConn(c3)
+ o.removeConn(c4)
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("WebTransport inferred from QUIC, with no WebTransport connections", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ observedQuic := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1")
+ inferredWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1/webtransport/certhash/uEgNmb28")
+ c1 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1"))
+ c2 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1"))
+ c3 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1"))
+ c4 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"))
+ o.maybeRecordObservation(c1, observedQuic)
+ o.maybeRecordObservation(c2, observedQuic)
+ o.maybeRecordObservation(c3, observedQuic)
+ o.maybeRecordObservation(c4, observedQuic)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic, inferredWebTransport})
+ }, 1*time.Second, 100*time.Millisecond)
+ o.removeConn(c1)
+ o.removeConn(c2)
+ o.removeConn(c3)
+ o.removeConn(c4)
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("SameObservers", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+
+ observedQuic := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1")
+ inferredWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1/webtransport/certhash/uEgNmb28")
+
+ const N = 4 // ActivationThresh
+ var ob1, ob2 [N]connMultiaddrs
+ for i := 0; i < N; i++ {
+ ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
+ ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/2/quic-v1", i)))
+ }
+ for i := 0; i < N-1; i++ {
+ o.maybeRecordObservation(ob1[i], observedQuic)
+ o.maybeRecordObservation(ob2[i], observedQuic)
+ }
+ time.Sleep(100 * time.Millisecond)
+ require.Equal(t, o.Addrs(0), []ma.Multiaddr{})
+
+ // We should have a valid address now
+ o.maybeRecordObservation(ob1[N-1], observedQuic)
+ o.maybeRecordObservation(ob2[N-1], observedQuic)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic, inferredWebTransport})
+ }, 2*time.Second, 100*time.Millisecond)
+
+ // Now disconnect first observer group
+ for i := 0; i < N; i++ {
+ o.removeConn(ob1[i])
+ }
+ time.Sleep(100 * time.Millisecond)
+ if !matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic, inferredWebTransport}) {
+ t.Fatalf("address removed too earyl %v %v", o.Addrs(0), observedQuic)
+ }
+
+ // Now disconnect the second group to check cleanup
+ for i := 0; i < N; i++ {
+ o.removeConn(ob2[i])
+ }
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 2*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("SameObserversDifferentAddrs", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+
+ observedQuic1 := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1")
+ observedQuic2 := ma.StringCast("/ip4/2.2.2.2/udp/3/quic-v1")
+ inferredWebTransport1 := ma.StringCast("/ip4/2.2.2.2/udp/2/quic-v1/webtransport/certhash/uEgNmb28")
+ inferredWebTransport2 := ma.StringCast("/ip4/2.2.2.2/udp/3/quic-v1/webtransport/certhash/uEgNmb28")
+
+ const N = 4 // ActivationThresh
+ var ob1, ob2 [N]connMultiaddrs
+ for i := 0; i < N; i++ {
+ ob1[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
+ ob2[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/2/quic-v1", i)))
+ }
+ for i := 0; i < N-1; i++ {
+ o.maybeRecordObservation(ob1[i], observedQuic1)
+ o.maybeRecordObservation(ob2[i], observedQuic2)
+ }
+ time.Sleep(100 * time.Millisecond)
+ require.Equal(t, o.Addrs(0), []ma.Multiaddr{})
+
+ // We should have a valid address now
+ o.maybeRecordObservation(ob1[N-1], observedQuic1)
+ o.maybeRecordObservation(ob2[N-1], observedQuic2)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic1, observedQuic2, inferredWebTransport1, inferredWebTransport2})
+ }, 2*time.Second, 100*time.Millisecond)
+
+ // Now disconnect first observer group
+ for i := 0; i < N; i++ {
+ o.removeConn(ob1[i])
+ }
+ time.Sleep(100 * time.Millisecond)
+ if !matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic2, inferredWebTransport2}) {
+ t.Fatalf("address removed too early %v %v", o.Addrs(0), observedQuic2)
+ }
+
+ // Now disconnect the second group to check cleanup
+ for i := 0; i < N; i++ {
+ o.removeConn(ob2[i])
+ }
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 2*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("Old observations discarded", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ c1 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1"))
+ c2 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1"))
+ c3 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport"))
+ c4 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport"))
+ c5 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.5/udp/1/quic-v1"))
+ c6 := newConn(quic4ListenAddr, ma.StringCast("/ip4/1.2.3.6/udp/1/quic-v1"))
+ var observedQuic, observedWebTransport, observedWebTransportWithCertHash ma.Multiaddr
+ for i := 0; i < 10; i++ {
+ // Change the IP address in each observation
+ observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1", i))
+ observedWebTransport = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1/webtransport", i))
+ observedWebTransportWithCertHash = ma.StringCast(fmt.Sprintf("/ip4/2.2.2.%d/udp/2/quic-v1/webtransport/certhash/uEgNmb28", i))
+ o.maybeRecordObservation(c1, observedQuic)
+ o.maybeRecordObservation(c2, observedQuic)
+ o.maybeRecordObservation(c3, observedWebTransport)
+ o.maybeRecordObservation(c4, observedWebTransport)
+ o.maybeRecordObservation(c5, observedQuic)
+ time.Sleep(20 * time.Millisecond)
+ }
+
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertEqualMultiaddrs(t, o.Addrs(0), []ma.Multiaddr{observedQuic, observedWebTransportWithCertHash})
+ }, 1*time.Second, 100*time.Millisecond)
+
+ tw, err := thinWaistForm(quic4ListenAddr)
+ require.NoError(t, err)
+ require.Less(t, len(o.externalAddrs[string(tw.TW.Bytes())]), 2)
+
+ requireEqualAddrs(t, []ma.Multiaddr{observedWebTransportWithCertHash}, o.AddrsFor(webTransport4ListenAddr))
+ requireEqualAddrs(t, []ma.Multiaddr{observedQuic}, o.AddrsFor(quic4ListenAddr))
+ requireAddrsMatch(t, []ma.Multiaddr{observedQuic, observedWebTransportWithCertHash}, o.Addrs(0))
+
+ for i := 0; i < 3; i++ {
+ // remove non-recorded connection
+ o.removeConn(c6)
+ }
+ requireEqualAddrs(t, []ma.Multiaddr{observedWebTransportWithCertHash}, o.AddrsFor(webTransport4ListenAddr))
+ requireEqualAddrs(t, []ma.Multiaddr{observedQuic}, o.AddrsFor(quic4ListenAddr))
+ requireAddrsMatch(t, []ma.Multiaddr{observedQuic, observedWebTransportWithCertHash}, o.Addrs(0))
+
+ o.removeConn(c1)
+ o.removeConn(c2)
+ o.removeConn(c3)
+ o.removeConn(c4)
+ o.removeConn(c5)
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("WebTransport certhash", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport")
+ observedWebTransportWithCerthash := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport/certhash/uEgNmb28")
+ inferredQUIC := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1")
+ c1 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.1/udp/1/quic-v1/webtransport"))
+ c2 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.2/udp/1/quic-v1/webtransport"))
+ c3 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.3/udp/1/quic-v1/webtransport"))
+ c4 := newConn(webTransport4ListenAddr, ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport"))
+ o.maybeRecordObservation(c1, observedWebTransport)
+ o.maybeRecordObservation(c2, observedWebTransport)
+ o.maybeRecordObservation(c3, observedWebTransport)
+ o.maybeRecordObservation(c4, observedWebTransport)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertMultiaddrsMatch(t, o.Addrs(0), []ma.Multiaddr{observedWebTransportWithCerthash, inferredQUIC})
+ }, 1*time.Second, 100*time.Millisecond)
+ o.removeConn(c1)
+ o.removeConn(c2)
+ o.removeConn(c3)
+ o.removeConn(c4)
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+
+ t.Run("getNATType", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+
+ observedWebTransport := ma.StringCast("/ip4/2.2.2.2/udp/1/quic-v1/webtransport")
+ var udpConns [5 * maxExternalThinWaistAddrsPerLocalAddr]connMultiaddrs
+ for i := 0; i < len(udpConns); i++ {
+ udpConns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i)))
+ o.maybeRecordObservation(udpConns[i], observedWebTransport)
+ time.Sleep(10 * time.Millisecond)
+ }
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ tcpNAT, udpNAT := o.getNATType()
+ require.Equal(t, tcpNAT, network.NATDeviceTypeUnknown)
+ require.Equal(t, udpNAT, network.NATDeviceTypeEndpointIndependent)
+ }, 1*time.Second, 100*time.Millisecond)
+
+ })
+ t.Run("NATTypeSymmetric", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ const N = 100
+ var tcpConns, quicConns [N]*mockConn
+ for i := 0; i < N; i++ {
+ tcpConns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i)))
+ quicConns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
+ }
+ var observedQuic, observedTCP ma.Multiaddr
+ for i := 0; i < N; i++ {
+ // ip addr has the form 2.2..2
+ observedQuic = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/udp/2/quic-v1", i%20))
+ observedTCP = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.2/tcp/2", i%20))
+ o.maybeRecordObservation(tcpConns[i], observedTCP)
+ o.maybeRecordObservation(quicConns[i], observedQuic)
+ time.Sleep(10 * time.Millisecond)
+ }
+ // At this point we have 20 groups with 5 observations for every connection
+ // The output should remain stable
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ require.Equal(t, len(o.Addrs(0)), 3*maxExternalThinWaistAddrsPerLocalAddr)
+ }, 1*time.Second, 100*time.Millisecond)
+ addrs := o.Addrs(0)
+ for i := 0; i < 10; i++ {
+ require.ElementsMatch(t, o.Addrs(0), addrs, "%s %s", o.Addrs(0), addrs)
+ time.Sleep(50 * time.Millisecond)
+ }
+
+ tcpNAT, udpNAT := o.getNATType()
+ require.Equal(t, tcpNAT, network.NATDeviceTypeEndpointDependent)
+ require.Equal(t, udpNAT, network.NATDeviceTypeEndpointDependent)
+
+ for i := 0; i < N; i++ {
+ o.removeConn(tcpConns[i])
+ o.removeConn(quicConns[i])
+ }
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+ t.Run("Nil Input", func(_ *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ o.maybeRecordObservation(nil, nil)
+ remoteAddr := ma.StringCast("/ip4/1.2.3.4/tcp/1")
+ o.maybeRecordObservation(newConn(tcp4ListenAddr, remoteAddr), nil)
+ o.maybeRecordObservation(nil, remoteAddr)
+ o.AddrsFor(nil)
+ o.removeConn(nil)
+ })
+
+ t.Run("Many connection many observations IP4 And IP6", func(t *testing.T) {
+ o := newObservedAddrMgr()
+ defer o.Close()
+ const N = 100
+ var tcp4Conns, quic4Conns, webTransport4Conns [N]*mockConn
+ var tcp6Conns, quic6Conns, webTransport6Conns [N]*mockConn
+ for i := 0; i < N; i++ {
+ tcp4Conns[i] = newConn(tcp4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/tcp/1", i)))
+ quic4Conns[i] = newConn(quic4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1", i)))
+ webTransport4Conns[i] = newConn(webTransport4ListenAddr, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.%d/udp/1/quic-v1/webtransport", i)))
+
+ tcp6Conns[i] = newConn(tcp6ListenAddr, ma.StringCast(fmt.Sprintf("/ip6/20%02x::/tcp/1", i)))
+ quic6Conns[i] = newConn(quic6ListenAddr, ma.StringCast(fmt.Sprintf("/ip6/20%02x::/udp/1/quic-v1", i)))
+ webTransport6Conns[i] = newConn(webTransport6ListenAddr, ma.StringCast(fmt.Sprintf("/ip6/20%02x::/udp/1/quic-v1/webtransport", i)))
+ }
+ var observedQUIC4, observedWebTransport4, observedTCP4 ma.Multiaddr
+ var observedQUIC6, observedWebTransport6, observedTCP6 ma.Multiaddr
+ for i := 0; i < N; i++ {
+ for j := 0; j < 5; j++ {
+ // ip addr has the form 2.2..
+ observedQUIC4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1", i/10, j))
+ observedWebTransport4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/udp/2/quic-v1/webtransport", i/10, j))
+ observedTCP4 = ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.%d/tcp/2", i/10, j))
+
+ // ip addr has the form 20XX::YY
+ observedQUIC6 = ma.StringCast(fmt.Sprintf("/ip6/20%02x::%02x/udp/2/quic-v1", i/10, j))
+ observedWebTransport6 = ma.StringCast(fmt.Sprintf("/ip6/20%02x::%02x/udp/2/quic-v1/webtransport", i/10, j))
+ observedTCP6 = ma.StringCast(fmt.Sprintf("/ip6/20%02x::%02x/tcp/2", i/10, j))
+
+ o.maybeRecordObservation(tcp4Conns[i], observedTCP4)
+ o.maybeRecordObservation(quic4Conns[i], observedQUIC4)
+ o.maybeRecordObservation(webTransport4Conns[i], observedWebTransport4)
+
+ o.maybeRecordObservation(tcp6Conns[i], observedTCP6)
+ o.maybeRecordObservation(quic6Conns[i], observedQUIC6)
+ o.maybeRecordObservation(webTransport6Conns[i], observedWebTransport6)
+ }
+ }
+ // At this point we have 10 groups of N / 10 with 10 observations for every connection
+ // The output should remain stable
+ require.Eventually(t, func() bool {
+ return len(o.Addrs(0)) == 2*3*maxExternalThinWaistAddrsPerLocalAddr
+ }, 1*time.Second, 100*time.Millisecond)
+ addrs := o.Addrs(0)
+ for i := 0; i < 10; i++ {
+ require.ElementsMatch(t, o.Addrs(0), addrs, "%s %s", o.Addrs(0), addrs)
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ // Now we bias a few address counts and check for sorting correctness
+ var resTCPAddrs, resQuicAddrs, resWebTransportAddrs, resWebTransportWithCertHashAddrs []ma.Multiaddr
+
+ for i, idx := 0, 0; i < maxExternalThinWaistAddrsPerLocalAddr; i++ {
+ resTCPAddrs = append(resTCPAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/tcp/2", 9-i)))
+ resQuicAddrs = append(resQuicAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1", 9-i)))
+ resWebTransportAddrs = append(resWebTransportAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1/webtransport", 9-i)))
+ resWebTransportWithCertHashAddrs = append(resWebTransportWithCertHashAddrs, ma.StringCast(fmt.Sprintf("/ip4/2.2.%d.4/udp/2/quic-v1/webtransport/certhash/uEgNmb28", 9-i)))
+
+ o.maybeRecordObservation(tcp4Conns[i], resTCPAddrs[idx])
+ o.maybeRecordObservation(quic4Conns[i], resQuicAddrs[idx])
+ o.maybeRecordObservation(webTransport4Conns[i], resWebTransportAddrs[idx])
+ idx++
+
+ resTCPAddrs = append(resTCPAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/tcp/2", 9-i)))
+ resQuicAddrs = append(resQuicAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1", 9-i)))
+ resWebTransportAddrs = append(resWebTransportAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1/webtransport", 9-i)))
+ resWebTransportWithCertHashAddrs = append(resWebTransportWithCertHashAddrs, ma.StringCast(fmt.Sprintf("/ip6/20%02x::04/udp/2/quic-v1/webtransport/certhash/uEgNmb28", 9-i)))
+
+ o.maybeRecordObservation(tcp6Conns[i], resTCPAddrs[idx])
+ o.maybeRecordObservation(quic6Conns[i], resQuicAddrs[idx])
+ o.maybeRecordObservation(webTransport6Conns[i], resWebTransportAddrs[idx])
+ idx++
+ }
+ var allAddrs []ma.Multiaddr
+ allAddrs = append(allAddrs, resTCPAddrs[:]...)
+ allAddrs = append(allAddrs, resQuicAddrs[:]...)
+ allAddrs = append(allAddrs, resWebTransportWithCertHashAddrs[:]...)
+ require.EventuallyWithT(t, func(t *assert.CollectT) {
+ matest.AssertMultiaddrsMatch(t, o.Addrs(0), allAddrs)
+ }, 1*time.Second, 100*time.Millisecond)
+
+ for i := 0; i < N; i++ {
+ o.removeConn(tcp4Conns[i])
+ o.removeConn(quic4Conns[i])
+ o.removeConn(webTransport4Conns[i])
+ o.removeConn(tcp6Conns[i])
+ o.removeConn(quic6Conns[i])
+ o.removeConn(webTransport6Conns[i])
+ }
+ require.Eventually(t, func() bool {
+ return checkAllEntriesRemoved(o)
+ }, 1*time.Second, 100*time.Millisecond)
+ })
+}
+
+func genIPMultiaddr(ip6 bool) ma.Multiaddr {
+ var ipB [16]byte
+ crand.Read(ipB[:])
+ var ip net.IP
+ if ip6 {
+ ip = net.IP(ipB[:])
+ } else {
+ ip = net.IP(ipB[:4])
+ }
+ addr, _ := manet.FromIP(ip)
+ return addr
+}
+
+func FuzzObservedAddrsManager(f *testing.F) {
+ protos := []string{
+ "/webrtc-direct",
+ "/quic-v1",
+ "/quic-v1/webtransport",
+ }
+ tcp4 := ma.StringCast("/ip4/192.168.1.100/tcp/1")
+ quic4 := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1")
+ wt4 := ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1/webtransport/certhash/uEgNmb28")
+ tcp6 := ma.StringCast("/ip6/1::1/tcp/1")
+ quic6 := ma.StringCast("/ip6/::/udp/1/quic-v1")
+ wt6 := ma.StringCast("/ip6/::/udp/1/quic-v1/webtransport/certhash/uEgNmb28")
+ newObservedAddrMgr := func() *Manager {
+ listenAddrs := []ma.Multiaddr{
+ tcp4, quic4, wt4, tcp6, quic6, wt6,
+ }
+ listenAddrsFunc := func() []ma.Multiaddr {
+ return listenAddrs
+ }
+ eb := eventbus.NewBus()
+ o, err := newManagerWithListenAddrs(eb, listenAddrsFunc)
+ if err != nil {
+ panic(err)
+ }
+ return o
+ }
+
+ f.Fuzz(func(_ *testing.T, port uint16) {
+ addrs := []ma.Multiaddr{genIPMultiaddr(true), genIPMultiaddr(false)}
+ n := len(addrs)
+ for i := 0; i < n; i++ {
+ addrs = append(addrs, addrs[i].Encapsulate(ma.StringCast(fmt.Sprintf("/tcp/%d", port))))
+ addrs = append(addrs, addrs[i].Encapsulate(ma.StringCast(fmt.Sprintf("/udp/%d", port))))
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/tcp/%d", port)))
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/udp/%d", port)))
+ }
+ n = len(addrs)
+ for i := 0; i < n; i++ {
+ for j := 0; j < len(protos); j++ {
+ protoAddr := ma.StringCast(protos[j])
+ addrs = append(addrs, addrs[i].Encapsulate(protoAddr))
+ addrs = append(addrs, protoAddr)
+ }
+ }
+ o := newObservedAddrMgr()
+ defer o.Close()
+ for i := 0; i < len(addrs); i++ {
+ for _, l := range o.listenAddrs() {
+ c := newConn(l, addrs[i])
+ o.maybeRecordObservation(c, addrs[i])
+ o.maybeRecordObservation(c, nil)
+ o.maybeRecordObservation(nil, addrs[i])
+ o.removeConn(c)
+ }
+ }
+ })
+}
+
+func TestObserver(t *testing.T) {
+ tests := []struct {
+ addr ma.Multiaddr
+ want string
+ }{
+ {
+ addr: ma.StringCast("/ip4/1.2.3.4/tcp/1"),
+ want: "1.2.3.4",
+ },
+ {
+ addr: ma.StringCast("/ip4/192.168.0.1/tcp/1"),
+ want: "192.168.0.1",
+ },
+ {
+ addr: ma.StringCast("/ip6/200::1/udp/1/quic-v1"),
+ want: "200::",
+ },
+ {
+ addr: ma.StringCast("/ip6/::1/udp/1/quic-v1"),
+ want: "::",
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ got, err := getObserver(tc.addr)
+ require.NoError(t, err)
+ require.Equal(t, got, tc.want)
+ })
+ }
+}
diff --git a/p2p/host/peerstore/metrics.go b/p2p/host/peerstore/metrics.go
new file mode 100644
index 0000000000..e05fda7d8e
--- /dev/null
+++ b/p2p/host/peerstore/metrics.go
@@ -0,0 +1,58 @@
+package peerstore
+
+import (
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// LatencyEWMASmoothing governs the decay of the EWMA (the speed
+// at which it changes). This must be a normalized (0-1) value.
+// 1 is 100% change, 0 is no change.
+var LatencyEWMASmoothing = 0.1
+
+type metrics struct {
+ mutex sync.RWMutex
+ latmap map[peer.ID]time.Duration
+}
+
+func NewMetrics() *metrics {
+ return &metrics{
+ latmap: make(map[peer.ID]time.Duration),
+ }
+}
+
+// RecordLatency records a new latency measurement
+func (m *metrics) RecordLatency(p peer.ID, next time.Duration) {
+ nextf := float64(next)
+ s := LatencyEWMASmoothing
+ if s > 1 || s < 0 {
+ s = 0.1 // ignore the knob. it's broken. look, it jiggles.
+ }
+
+ m.mutex.Lock()
+ ewma, found := m.latmap[p]
+ ewmaf := float64(ewma)
+ if !found {
+ m.latmap[p] = next // when no data, just take it as the mean.
+ } else {
+ nextf = ((1.0 - s) * ewmaf) + (s * nextf)
+ m.latmap[p] = time.Duration(nextf)
+ }
+ m.mutex.Unlock()
+}
+
+// LatencyEWMA returns an exponentially-weighted moving avg.
+// of all measurements of a peer's latency.
+func (m *metrics) LatencyEWMA(p peer.ID) time.Duration {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+ return m.latmap[p]
+}
+
+func (m *metrics) RemovePeer(p peer.ID) {
+ m.mutex.Lock()
+ delete(m.latmap, p)
+ m.mutex.Unlock()
+}
diff --git a/p2p/host/peerstore/metrics_test.go b/p2p/host/peerstore/metrics_test.go
new file mode 100644
index 0000000000..d92a482b51
--- /dev/null
+++ b/p2p/host/peerstore/metrics_test.go
@@ -0,0 +1,63 @@
+package peerstore
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/test"
+)
+
+func TestLatencyEWMAFun(t *testing.T) {
+ t.Skip("run it for fun")
+
+ m := NewMetrics()
+ id, err := test.RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mu := 100.0
+ sig := 10.0
+ next := func() time.Duration {
+ mu = (rand.NormFloat64() * sig) + mu
+ return time.Duration(mu)
+ }
+
+ print := func() {
+ fmt.Printf("%3.f %3.f --> %d\n", sig, mu, m.LatencyEWMA(id))
+ }
+
+ for {
+ time.Sleep(200 * time.Millisecond)
+ m.RecordLatency(id, next())
+ print()
+ }
+}
+
+func TestLatencyEWMA(t *testing.T) {
+ m := NewMetrics()
+ id, err := test.RandPeerID()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ const exp = 100
+ const mu = exp
+ const sig = 10
+ next := func() time.Duration { return time.Duration(rand.Intn(20) - 10 + mu) }
+
+ for i := 0; i < 10; i++ {
+ m.RecordLatency(id, next())
+ }
+
+ lat := m.LatencyEWMA(id)
+ diff := exp - lat
+ if diff < 0 {
+ diff = -diff
+ }
+ if diff > sig {
+ t.Fatalf("latency outside of expected range. expected %d ยฑ %d, got %d", exp, sig, lat)
+ }
+}
diff --git a/p2p/host/peerstore/peerstore.go b/p2p/host/peerstore/peerstore.go
new file mode 100644
index 0000000000..45c22c3620
--- /dev/null
+++ b/p2p/host/peerstore/peerstore.go
@@ -0,0 +1,22 @@
+package peerstore
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+)
+
+func PeerInfos(ps pstore.Peerstore, peers peer.IDSlice) []peer.AddrInfo {
+ pi := make([]peer.AddrInfo, len(peers))
+ for i, p := range peers {
+ pi[i] = ps.PeerInfo(p)
+ }
+ return pi
+}
+
+func PeerInfoIDs(pis []peer.AddrInfo) peer.IDSlice {
+ ps := make(peer.IDSlice, len(pis))
+ for i, pi := range pis {
+ ps[i] = pi.ID
+ }
+ return ps
+}
diff --git a/p2p/host/peerstore/pstoreds/addr_book.go b/p2p/host/peerstore/pstoreds/addr_book.go
new file mode 100644
index 0000000000..fe56b185af
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/addr_book.go
@@ -0,0 +1,617 @@
+package pstoreds
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds/pb"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+
+ "github.com/hashicorp/golang-lru/arc/v2"
+ ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ b32 "github.com/multiformats/go-base32"
+ ma "github.com/multiformats/go-multiaddr"
+ "google.golang.org/protobuf/proto"
+)
+
+type ttlWriteMode int
+
+const (
+ ttlOverride ttlWriteMode = iota
+ ttlExtend
+)
+
+var (
+ log = logging.Logger("peerstore/ds")
+
+ // Peer addresses are stored db key pattern:
+ // /peers/addrs/
+ addrBookBase = ds.NewKey("/peers/addrs")
+)
+
+// addrsRecord decorates the AddrBookRecord with locks and metadata.
+type addrsRecord struct {
+ sync.RWMutex
+ *pb.AddrBookRecord
+ dirty bool
+}
+
+// flush writes the record to the datastore by calling ds.Put, unless the record is
+// marked for deletion, in which case we call ds.Delete. To be called within a lock.
+func (r *addrsRecord) flush(write ds.Write) (err error) {
+ key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString(r.Id))
+
+ if len(r.Addrs) == 0 {
+ if err = write.Delete(context.TODO(), key); err == nil {
+ r.dirty = false
+ }
+ return err
+ }
+
+ data, err := proto.Marshal(r)
+ if err != nil {
+ return err
+ }
+ if err = write.Put(context.TODO(), key, data); err != nil {
+ return err
+ }
+ // write succeeded; record is no longer dirty.
+ r.dirty = false
+ return nil
+}
+
+// clean is called on records to perform housekeeping. The return value indicates if the record was changed
+// as a result of this call.
+//
+// clean does the following:
+// * sorts addresses by expiration (soonest expiring first).
+// * removes expired addresses.
+//
+// It short-circuits optimistically when there's nothing to do.
+//
+// clean is called from several points:
+// * when accessing an entry.
+// * when performing periodic GC.
+// * after an entry has been modified (e.g. addresses have been added or removed, TTLs updated, etc.)
+//
+// If the return value is true, the caller should perform a flush immediately to sync the record with the store.
+func (r *addrsRecord) clean(now time.Time) (chgd bool) {
+ nowUnix := now.Unix()
+ addrsLen := len(r.Addrs)
+
+ if !r.dirty && !r.hasExpiredAddrs(nowUnix) {
+ // record is not dirty, and we have no expired entries to purge.
+ return false
+ }
+
+ if addrsLen == 0 {
+ // this is a ghost record; let's signal it has to be written.
+ // flush() will take care of doing the deletion.
+ return true
+ }
+
+ if r.dirty && addrsLen > 1 {
+ sort.Slice(r.Addrs, func(i, j int) bool {
+ return r.Addrs[i].Expiry < r.Addrs[j].Expiry
+ })
+ }
+
+ r.Addrs = removeExpired(r.Addrs, nowUnix)
+
+ return r.dirty || len(r.Addrs) != addrsLen
+}
+
+func (r *addrsRecord) hasExpiredAddrs(now int64) bool {
+ if len(r.Addrs) > 0 && r.Addrs[0].Expiry <= now {
+ return true
+ }
+ return false
+}
+
+func removeExpired(entries []*pb.AddrBookRecord_AddrEntry, now int64) []*pb.AddrBookRecord_AddrEntry {
+ // since addresses are sorted by expiration, we find the first
+ // survivor and split the slice on its index.
+ pivot := -1
+ for i, addr := range entries {
+ if addr.Expiry > now {
+ break
+ }
+ pivot = i
+ }
+
+ return entries[pivot+1:]
+}
+
+// dsAddrBook is an address book backed by a Datastore with a GC procedure to purge expired entries. It uses an
+// in-memory address stream manager. See the NewAddrBook for more information.
+type dsAddrBook struct {
+ ctx context.Context
+ opts Options
+
+ cache cache[peer.ID, *addrsRecord]
+ ds ds.Batching
+ gc *dsAddrBookGc
+ subsManager *pstoremem.AddrSubManager
+
+ // controls children goroutine lifetime.
+ childrenDone sync.WaitGroup
+ cancelFn func()
+
+ clock clock
+}
+
+type clock interface {
+ Now() time.Time
+ After(d time.Duration) <-chan time.Time
+}
+
+type realclock struct{}
+
+func (rc realclock) Now() time.Time {
+ return time.Now()
+}
+
+func (rc realclock) After(d time.Duration) <-chan time.Time {
+ return time.After(d)
+}
+
+var _ pstore.AddrBook = (*dsAddrBook)(nil)
+var _ pstore.CertifiedAddrBook = (*dsAddrBook)(nil)
+
+// NewAddrBook initializes a new datastore-backed address book. It serves as a drop-in replacement for pstoremem
+// (memory-backed peerstore), and works with any datastore implementing the ds.Batching interface.
+//
+// Addresses and peer records are serialized into protobuf, storing one datastore entry per peer, along with metadata
+// to control address expiration. To alleviate disk access and serde overhead, we internally use a read/write-through
+// ARC cache, the size of which is adjustable via Options.CacheSize.
+//
+// The user has a choice of two GC algorithms:
+//
+// - lookahead GC: minimises the amount of full store traversals by maintaining a time-indexed list of entries that
+// need to be visited within the period specified in Options.GCLookaheadInterval. This is useful in scenarios with
+// considerable TTL variance, coupled with datastores whose native iterators return entries in lexicographical key
+// order. Enable this mode by passing a value Options.GCLookaheadInterval > 0. Lookahead windows are jumpy, not
+// sliding. Purges operate exclusively over the lookahead window with periodicity Options.GCPurgeInterval.
+//
+// - full-purge GC (default): performs a full visit of the store with periodicity Options.GCPurgeInterval. Useful when
+// the range of possible TTL values is small and the values themselves are also extreme, e.g. 10 minutes or
+// permanent, popular values used in other libp2p modules. In this cited case, optimizing with lookahead windows
+// makes little sense.
+func NewAddrBook(ctx context.Context, store ds.Batching, opts Options) (ab *dsAddrBook, err error) {
+ ctx, cancelFn := context.WithCancel(ctx)
+ ab = &dsAddrBook{
+ ctx: ctx,
+ ds: store,
+ opts: opts,
+ cancelFn: cancelFn,
+ subsManager: pstoremem.NewAddrSubManager(),
+ clock: realclock{},
+ }
+
+ if opts.Clock != nil {
+ ab.clock = opts.Clock
+ }
+
+ if opts.CacheSize > 0 {
+ if ab.cache, err = arc.NewARC[peer.ID, *addrsRecord](int(opts.CacheSize)); err != nil {
+ return nil, err
+ }
+ } else {
+ ab.cache = new(noopCache[peer.ID, *addrsRecord])
+ }
+
+ if ab.gc, err = newAddressBookGc(ctx, ab); err != nil {
+ return nil, err
+ }
+
+ return ab, nil
+}
+
+func (ab *dsAddrBook) Close() error {
+ ab.cancelFn()
+ ab.childrenDone.Wait()
+ return nil
+}
+
+// loadRecord is a read-through fetch. It fetches a record from cache, falling back to the
+// datastore upon a miss, and returning a newly initialized record if the peer doesn't exist.
+//
+// loadRecord calls clean() on an existing record before returning it. If the record changes
+// as a result and the update argument is true, the resulting state is saved in the datastore.
+//
+// If the cache argument is true, the record is inserted in the cache when loaded from the datastore.
+func (ab *dsAddrBook) loadRecord(id peer.ID, cache bool, update bool) (pr *addrsRecord, err error) {
+ if pr, ok := ab.cache.Get(id); ok {
+ pr.Lock()
+ defer pr.Unlock()
+
+ if pr.clean(ab.clock.Now()) && update {
+ err = pr.flush(ab.ds)
+ }
+ return pr, err
+ }
+
+ pr = &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}}
+ key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(id)))
+ data, err := ab.ds.Get(context.TODO(), key)
+
+ switch err {
+ case ds.ErrNotFound:
+ err = nil
+ pr.Id = []byte(id)
+ case nil:
+ if err := proto.Unmarshal(data, pr); err != nil {
+ return nil, err
+ }
+ // this record is new and local for now (not in cache), so we don't need to lock.
+ if pr.clean(ab.clock.Now()) && update {
+ err = pr.flush(ab.ds)
+ }
+ default:
+ return nil, err
+ }
+
+ if cache {
+ ab.cache.Add(id, pr)
+ }
+ return pr, err
+}
+
+// AddAddr will add a new address if it's not already in the AddrBook.
+func (ab *dsAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
+ ab.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
+}
+
+// AddAddrs will add many new addresses if they're not already in the AddrBook.
+func (ab *dsAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
+ if ttl <= 0 {
+ return
+ }
+ addrs = cleanAddrs(addrs, p)
+ ab.setAddrs(p, addrs, ttl, ttlExtend, false)
+}
+
+// ConsumePeerRecord adds addresses from a signed peer.PeerRecord (contained in
+// a record.Envelope), which will expire after the given TTL.
+// See https://godoc.org/github.com/libp2p/go-libp2p/core/peerstore#CertifiedAddrBook for more details.
+func (ab *dsAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, ttl time.Duration) (bool, error) {
+ r, err := recordEnvelope.Record()
+ if err != nil {
+ return false, err
+ }
+ rec, ok := r.(*peer.PeerRecord)
+ if !ok {
+ return false, fmt.Errorf("envelope did not contain PeerRecord")
+ }
+ if !rec.PeerID.MatchesPublicKey(recordEnvelope.PublicKey) {
+ return false, fmt.Errorf("signing key does not match PeerID in PeerRecord")
+ }
+
+ // ensure that the seq number from envelope is >= any previously received seq no
+ // update when equal to extend the ttls
+ if ab.latestPeerRecordSeq(rec.PeerID) > rec.Seq {
+ return false, nil
+ }
+
+ addrs := cleanAddrs(rec.Addrs, rec.PeerID)
+ err = ab.setAddrs(rec.PeerID, addrs, ttl, ttlExtend, true)
+ if err != nil {
+ return false, err
+ }
+
+ err = ab.storeSignedPeerRecord(rec.PeerID, recordEnvelope, rec)
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (ab *dsAddrBook) latestPeerRecordSeq(p peer.ID) uint64 {
+ pr, err := ab.loadRecord(p, true, false)
+ if err != nil {
+ // We ignore the error because we don't want to fail storing a new record in this
+ // case.
+ log.Error("unable to load record", "peer", p, "err", err)
+ return 0
+ }
+ pr.RLock()
+ defer pr.RUnlock()
+
+ if len(pr.Addrs) == 0 || pr.CertifiedRecord == nil || len(pr.CertifiedRecord.Raw) == 0 {
+ return 0
+ }
+ return pr.CertifiedRecord.Seq
+}
+
+func (ab *dsAddrBook) storeSignedPeerRecord(p peer.ID, envelope *record.Envelope, rec *peer.PeerRecord) error {
+ envelopeBytes, err := envelope.Marshal()
+ if err != nil {
+ return err
+ }
+ // reload record and add routing state
+ // this has to be done after we add the addresses, since if
+ // we try to flush a datastore record with no addresses,
+ // it will just get deleted
+ pr, err := ab.loadRecord(p, true, false)
+ if err != nil {
+ return err
+ }
+ pr.Lock()
+ defer pr.Unlock()
+ pr.CertifiedRecord = &pb.AddrBookRecord_CertifiedRecord{
+ Seq: rec.Seq,
+ Raw: envelopeBytes,
+ }
+ pr.dirty = true
+ err = pr.flush(ab.ds)
+ return err
+}
+
+// GetPeerRecord returns a record.Envelope containing a peer.PeerRecord for the
+// given peer id, if one exists.
+// Returns nil if no signed PeerRecord exists for the peer.
+func (ab *dsAddrBook) GetPeerRecord(p peer.ID) *record.Envelope {
+ pr, err := ab.loadRecord(p, true, false)
+ if err != nil {
+ log.Error("unable to load record for peer", "peer", p, "err", err)
+ return nil
+ }
+ pr.RLock()
+ defer pr.RUnlock()
+ if pr.CertifiedRecord == nil || len(pr.CertifiedRecord.Raw) == 0 || len(pr.Addrs) == 0 {
+ return nil
+ }
+ state, _, err := record.ConsumeEnvelope(pr.CertifiedRecord.Raw, peer.PeerRecordEnvelopeDomain)
+ if err != nil {
+ log.Error("error unmarshaling stored signed peer record for peer", "peer", p, "err", err)
+ return nil
+ }
+ return state
+}
+
+// SetAddr will add or update the TTL of an address in the AddrBook.
+func (ab *dsAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
+ ab.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
+}
+
+// SetAddrs will add or update the TTLs of addresses in the AddrBook.
+func (ab *dsAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
+ addrs = cleanAddrs(addrs, p)
+ if ttl <= 0 {
+ ab.deleteAddrs(p, addrs)
+ return
+ }
+ ab.setAddrs(p, addrs, ttl, ttlOverride, false)
+}
+
+// UpdateAddrs will update any addresses for a given peer and TTL combination to
+// have a new TTL.
+func (ab *dsAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
+ pr, err := ab.loadRecord(p, true, false)
+ if err != nil {
+ log.Error("failed to update ttls for peer", "peer", p, "err", err)
+ return
+ }
+
+ pr.Lock()
+ defer pr.Unlock()
+
+ newExp := ab.clock.Now().Add(newTTL).Unix()
+ for _, entry := range pr.Addrs {
+ if entry.Ttl != int64(oldTTL) {
+ continue
+ }
+ entry.Ttl, entry.Expiry = int64(newTTL), newExp
+ pr.dirty = true
+ }
+
+ if pr.clean(ab.clock.Now()) {
+ pr.flush(ab.ds)
+ }
+}
+
+// Addrs returns all of the non-expired addresses for a given peer.
+func (ab *dsAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
+ pr, err := ab.loadRecord(p, true, true)
+ if err != nil {
+ log.Warn("failed to load peerstore entry for peer while querying addrs", "peer", p, "err", err)
+ return nil
+ }
+
+ pr.RLock()
+ defer pr.RUnlock()
+
+ addrs := make([]ma.Multiaddr, len(pr.Addrs))
+ for i, a := range pr.Addrs {
+ var err error
+ addrs[i], err = ma.NewMultiaddrBytes(a.Addr)
+ if err != nil {
+ log.Warn("failed to parse peerstore entry for peer while querying addrs", "peer", p, "err", err)
+ return nil
+ }
+ }
+ return addrs
+}
+
+// Peers returns all of the peer IDs for which the AddrBook has addresses.
+func (ab *dsAddrBook) PeersWithAddrs() peer.IDSlice {
+ ids, err := uniquePeerIds(ab.ds, addrBookBase, func(result query.Result) string {
+ return ds.RawKey(result.Key).Name()
+ })
+ if err != nil {
+ log.Error("error while retrieving peers with addresses", "err", err)
+ }
+ return ids
+}
+
+// AddrStream returns a channel on which all new addresses discovered for a
+// given peer ID will be published.
+func (ab *dsAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
+ initial := ab.Addrs(p)
+ return ab.subsManager.AddrStream(ctx, p, initial)
+}
+
+// ClearAddrs will delete all known addresses for a peer ID.
+func (ab *dsAddrBook) ClearAddrs(p peer.ID) {
+ ab.cache.Remove(p)
+
+ key := addrBookBase.ChildString(b32.RawStdEncoding.EncodeToString([]byte(p)))
+ if err := ab.ds.Delete(context.TODO(), key); err != nil {
+ log.Error("failed to clear addresses for peer", "peer", p, "err", err)
+ }
+}
+
+func (ab *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, mode ttlWriteMode, _ bool) (err error) {
+ if len(addrs) == 0 {
+ return nil
+ }
+
+ pr, err := ab.loadRecord(p, true, false)
+ if err != nil {
+ return fmt.Errorf("failed to load peerstore entry for peer %s while setting addrs, err: %v", p, err)
+ }
+
+ pr.Lock()
+ defer pr.Unlock()
+
+ // // if we have a signed PeerRecord, ignore attempts to add unsigned addrs
+ // if !signed && pr.CertifiedRecord != nil {
+ // return nil
+ // }
+
+ newExp := ab.clock.Now().Add(ttl).Unix()
+ addrsMap := make(map[string]*pb.AddrBookRecord_AddrEntry, len(pr.Addrs))
+ for _, addr := range pr.Addrs {
+ addrsMap[string(addr.Addr)] = addr
+ }
+
+ updateExisting := func(incoming ma.Multiaddr) *pb.AddrBookRecord_AddrEntry {
+ existingEntry := addrsMap[string(incoming.Bytes())]
+ if existingEntry == nil {
+ return nil
+ }
+
+ switch mode {
+ case ttlOverride:
+ existingEntry.Ttl = int64(ttl)
+ existingEntry.Expiry = newExp
+ case ttlExtend:
+ if int64(ttl) > existingEntry.Ttl {
+ existingEntry.Ttl = int64(ttl)
+ }
+ if newExp > existingEntry.Expiry {
+ existingEntry.Expiry = newExp
+ }
+ default:
+ panic("BUG: unimplemented ttl mode")
+ }
+ return existingEntry
+ }
+
+ var entries []*pb.AddrBookRecord_AddrEntry
+ for _, incoming := range addrs {
+ existingEntry := updateExisting(incoming)
+
+ if existingEntry == nil {
+ // if signed {
+ // entries = append(entries, existingEntry)
+ // }
+ // } else {
+ // new addr, add & broadcast
+ entry := &pb.AddrBookRecord_AddrEntry{
+ Addr: incoming.Bytes(),
+ Ttl: int64(ttl),
+ Expiry: newExp,
+ }
+ entries = append(entries, entry)
+
+ // note: there's a minor chance that writing the record will fail, in which case we would've broadcast
+ // the addresses without persisting them. This is very unlikely and not much of an issue.
+ ab.subsManager.BroadcastAddr(p, incoming)
+ }
+ }
+
+ // if signed {
+ // // when adding signed addrs, we want to keep _only_ the incoming addrs
+ // pr.Addrs = entries
+ // } else {
+ pr.Addrs = append(pr.Addrs, entries...)
+ // }
+
+ pr.dirty = true
+ pr.clean(ab.clock.Now())
+ return pr.flush(ab.ds)
+}
+
+// deletes addresses in place, avoiding copies until we encounter the first deletion.
+// does not preserve order, but entries are re-sorted before flushing to disk anyway.
+func deleteInPlace(s []*pb.AddrBookRecord_AddrEntry, addrs []ma.Multiaddr) []*pb.AddrBookRecord_AddrEntry {
+ if s == nil || len(addrs) == 0 {
+ return s
+ }
+ survived := len(s)
+Outer:
+ for i, addr := range s {
+ for _, del := range addrs {
+ if !bytes.Equal(del.Bytes(), addr.Addr) {
+ continue
+ }
+ survived--
+ // if there are no survivors, bail out
+ if survived == 0 {
+ break Outer
+ }
+ s[i] = s[survived]
+ // we've already dealt with s[i], move to the next
+ continue Outer
+ }
+ }
+ return s[:survived]
+}
+
+func (ab *dsAddrBook) deleteAddrs(p peer.ID, addrs []ma.Multiaddr) (err error) {
+ pr, err := ab.loadRecord(p, false, false)
+ if err != nil {
+ return fmt.Errorf("failed to load peerstore entry for peer %v while deleting addrs, err: %v", p, err)
+ }
+
+ pr.Lock()
+ defer pr.Unlock()
+
+ if pr.Addrs == nil {
+ return nil
+ }
+
+ pr.Addrs = deleteInPlace(pr.Addrs, addrs)
+
+ pr.dirty = true
+ pr.clean(ab.clock.Now())
+ return pr.flush(ab.ds)
+}
+
+func cleanAddrs(addrs []ma.Multiaddr, pid peer.ID) []ma.Multiaddr {
+ clean := make([]ma.Multiaddr, 0, len(addrs))
+ for _, addr := range addrs {
+ // Remove suffix of /p2p/peer-id from address
+ addr, addrPid := peer.SplitAddr(addr)
+ if addr == nil {
+ log.Warn("Was passed a nil multiaddr", "peer", pid)
+ continue
+ }
+ if addrPid != "" && addrPid != pid {
+ log.Warn("Was passed p2p address with a different peerId", "found", addrPid, "expected", pid)
+ continue
+ }
+ clean = append(clean, addr)
+ }
+ return clean
+}
diff --git a/p2p/host/peerstore/pstoreds/addr_book_gc.go b/p2p/host/peerstore/pstoreds/addr_book_gc.go
new file mode 100644
index 0000000000..779cbf7815
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/addr_book_gc.go
@@ -0,0 +1,408 @@
+package pstoreds
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds/pb"
+ "google.golang.org/protobuf/proto"
+
+ ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+ b32 "github.com/multiformats/go-base32"
+)
+
+var (
+ // GC lookahead entries are stored in key pattern:
+ // /peers/gc/addrs// => nil
+ // in databases with lexicographical key order, this time-indexing allows us to visit
+ // only the timeslice we are interested in.
+ gcLookaheadBase = ds.NewKey("/peers/gc/addrs")
+
+ // queries
+ purgeLookaheadQuery = query.Query{
+ Prefix: gcLookaheadBase.String(),
+ Orders: []query.Order{query.OrderByFunction(orderByTimestampInKey)},
+ KeysOnly: true,
+ }
+
+ purgeStoreQuery = query.Query{
+ Prefix: addrBookBase.String(),
+ Orders: []query.Order{query.OrderByKey{}},
+ KeysOnly: false,
+ }
+
+ populateLookaheadQuery = query.Query{
+ Prefix: addrBookBase.String(),
+ Orders: []query.Order{query.OrderByKey{}},
+ KeysOnly: true,
+ }
+)
+
+// dsAddrBookGc is responsible for garbage collection in a datastore-backed address book.
+type dsAddrBookGc struct {
+ ctx context.Context
+ ab *dsAddrBook
+ running chan struct{}
+ lookaheadEnabled bool
+ purgeFunc func()
+ currWindowEnd int64
+}
+
+func newAddressBookGc(ctx context.Context, ab *dsAddrBook) (*dsAddrBookGc, error) {
+ if ab.opts.GCPurgeInterval < 0 {
+ return nil, fmt.Errorf("negative GC purge interval provided: %s", ab.opts.GCPurgeInterval)
+ }
+ if ab.opts.GCLookaheadInterval < 0 {
+ return nil, fmt.Errorf("negative GC lookahead interval provided: %s", ab.opts.GCLookaheadInterval)
+ }
+ if ab.opts.GCInitialDelay < 0 {
+ return nil, fmt.Errorf("negative GC initial delay provided: %s", ab.opts.GCInitialDelay)
+ }
+ if ab.opts.GCLookaheadInterval > 0 && ab.opts.GCLookaheadInterval < ab.opts.GCPurgeInterval {
+ return nil, fmt.Errorf("lookahead interval must be larger than purge interval, respectively: %s, %s",
+ ab.opts.GCLookaheadInterval, ab.opts.GCPurgeInterval)
+ }
+
+ lookaheadEnabled := ab.opts.GCLookaheadInterval > 0
+ gc := &dsAddrBookGc{
+ ctx: ctx,
+ ab: ab,
+ running: make(chan struct{}, 1),
+ lookaheadEnabled: lookaheadEnabled,
+ }
+
+ if lookaheadEnabled {
+ gc.purgeFunc = gc.purgeLookahead
+ } else {
+ gc.purgeFunc = gc.purgeStore
+ }
+
+ // do not start GC timers if purge is disabled; this GC can only be triggered manually.
+ if ab.opts.GCPurgeInterval > 0 {
+ gc.ab.childrenDone.Add(1)
+ go gc.background()
+ }
+
+ return gc, nil
+}
+
+// gc prunes expired addresses from the datastore at regular intervals. It should be spawned as a goroutine.
+func (gc *dsAddrBookGc) background() {
+ defer gc.ab.childrenDone.Done()
+
+ select {
+ case <-gc.ab.clock.After(gc.ab.opts.GCInitialDelay):
+ case <-gc.ab.ctx.Done():
+ // yield if we have been cancelled/closed before the delay elapses.
+ return
+ }
+
+ purgeTimer := time.NewTicker(gc.ab.opts.GCPurgeInterval)
+ defer purgeTimer.Stop()
+
+ var lookaheadCh <-chan time.Time
+ if gc.lookaheadEnabled {
+ lookaheadTimer := time.NewTicker(gc.ab.opts.GCLookaheadInterval)
+ lookaheadCh = lookaheadTimer.C
+ gc.populateLookahead() // do a lookahead now
+ defer lookaheadTimer.Stop()
+ }
+
+ for {
+ select {
+ case <-purgeTimer.C:
+ gc.purgeFunc()
+
+ case <-lookaheadCh:
+ // will never trigger if lookahead is disabled (nil Duration).
+ gc.populateLookahead()
+
+ case <-gc.ctx.Done():
+ return
+ }
+ }
+}
+
+// purgeCycle runs a single GC purge cycle. It operates within the lookahead window if lookahead is enabled; else it
+// visits all entries in the datastore, deleting the addresses that have expired.
+func (gc *dsAddrBookGc) purgeLookahead() {
+ select {
+ case gc.running <- struct{}{}:
+ defer func() { <-gc.running }()
+ default:
+ // yield if lookahead is running.
+ return
+ }
+
+ var id peer.ID
+ record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} // empty record to reuse and avoid allocs.
+ batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch)
+ if err != nil {
+ log.Warn("failed while creating batch to purge GC entries", "err", err)
+ }
+
+ // This function drops an unparseable GC entry; this is for safety. It is an escape hatch in case
+ // we modify the format of keys going forward. If a user runs a new version against an old DB,
+ // if we don't clean up unparseable entries we'll end up accumulating garbage.
+ dropInError := func(key ds.Key, err error, msg string) {
+ if err != nil {
+ log.Warn("failed while record with GC key; deleting", "message", msg, "key", key, "err", err)
+ }
+ if err = batch.Delete(context.TODO(), key); err != nil {
+ log.Warn("failed to delete corrupt GC lookahead entry", "key", key, "err", err)
+ }
+ }
+
+ // This function drops a GC key if the entry is cleaned correctly. It may reschedule another visit
+ // if the next earliest expiry falls within the current window again.
+ dropOrReschedule := func(key ds.Key, ar *addrsRecord) {
+ if err := batch.Delete(context.TODO(), key); err != nil {
+ log.Warn("failed to delete lookahead entry", "key", key, "err", err)
+ }
+
+ // re-add the record if it needs to be visited again in this window.
+ if len(ar.Addrs) != 0 && ar.Addrs[0].Expiry <= gc.currWindowEnd {
+ gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", ar.Addrs[0].Expiry, key.Name()))
+ if err := batch.Put(context.TODO(), gcKey, []byte{}); err != nil {
+ log.Warn("failed to add new GC key", "key", gcKey, "err", err)
+ }
+ }
+ }
+
+ results, err := gc.ab.ds.Query(context.TODO(), purgeLookaheadQuery)
+ if err != nil {
+ log.Warn("failed while fetching entries to purge", "err", err)
+ return
+ }
+ defer results.Close()
+
+ now := gc.ab.clock.Now().Unix()
+
+ // keys: /peers/gc/addrs//
+ // values: nil
+ for result := range results.Next() {
+ gcKey := ds.RawKey(result.Key)
+ ts, err := strconv.ParseInt(gcKey.Parent().Name(), 10, 64)
+ if err != nil {
+ dropInError(gcKey, err, "parsing timestamp")
+ log.Warn("failed while parsing timestamp from key", "key", result.Key, "err", err)
+ continue
+ } else if ts > now {
+ // this is an ordered cursor; when we hit an entry with a timestamp beyond now, we can break.
+ break
+ }
+
+ idb32, err := b32.RawStdEncoding.DecodeString(gcKey.Name())
+ if err != nil {
+ dropInError(gcKey, err, "parsing peer ID")
+ log.Warn("failed while parsing b32 peer ID from key", "key", result.Key, "err", err)
+ continue
+ }
+
+ id, err = peer.IDFromBytes(idb32)
+ if err != nil {
+ dropInError(gcKey, err, "decoding peer ID")
+ log.Warn("failed while decoding peer ID from key", "key", result.Key, "err", err)
+ continue
+ }
+
+ // if the record is in cache, we clean it and flush it if necessary.
+ if cached, ok := gc.ab.cache.Peek(id); ok {
+ cached.Lock()
+ if cached.clean(gc.ab.clock.Now()) {
+ if err = cached.flush(batch); err != nil {
+ log.Warn("failed to flush entry modified by GC for peer", "peer", id, "err", err)
+ }
+ }
+ dropOrReschedule(gcKey, cached)
+ cached.Unlock()
+ continue
+ }
+
+ record.Reset()
+
+ // otherwise, fetch it from the store, clean it and flush it.
+ entryKey := addrBookBase.ChildString(gcKey.Name())
+ val, err := gc.ab.ds.Get(context.TODO(), entryKey)
+ if err != nil {
+ // captures all errors, including ErrNotFound.
+ dropInError(gcKey, err, "fetching entry")
+ continue
+ }
+ err = proto.Unmarshal(val, record)
+ if err != nil {
+ dropInError(gcKey, err, "unmarshalling entry")
+ continue
+ }
+ if record.clean(gc.ab.clock.Now()) {
+ err = record.flush(batch)
+ if err != nil {
+ log.Warn("failed to flush entry modified by GC for peer", "peer", id, "err", err)
+ }
+ }
+ dropOrReschedule(gcKey, record)
+ }
+
+ if err = batch.Commit(context.TODO()); err != nil {
+ log.Warn("failed to commit GC purge batch", "err", err)
+ }
+}
+
+func (gc *dsAddrBookGc) purgeStore() {
+ select {
+ case gc.running <- struct{}{}:
+ defer func() { <-gc.running }()
+ default:
+ // yield if lookahead is running.
+ return
+ }
+
+ record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}} // empty record to reuse and avoid allocs.
+ batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch)
+ if err != nil {
+ log.Warn("failed while creating batch to purge GC entries", "err", err)
+ }
+
+ results, err := gc.ab.ds.Query(context.TODO(), purgeStoreQuery)
+ if err != nil {
+ log.Warn("failed while opening iterator", "err", err)
+ return
+ }
+ defer results.Close()
+
+ // keys: /peers/addrs/
+ for result := range results.Next() {
+ record.Reset()
+ if err = proto.Unmarshal(result.Value, record); err != nil {
+ log.Warn("failed to unmarshal record during GC purge", "key", result.Key, "err", err)
+ continue
+ }
+
+ id := record.Id
+ if !record.clean(gc.ab.clock.Now()) {
+ continue
+ }
+
+ if err := record.flush(batch); err != nil {
+ log.Warn("failed to flush entry modified by GC for peer", "peer", id, "err", err)
+ }
+ gc.ab.cache.Remove(peer.ID(id))
+ }
+
+ if err = batch.Commit(context.TODO()); err != nil {
+ log.Warn("failed to commit GC purge batch", "err", err)
+ }
+}
+
+// populateLookahead populates the lookahead window by scanning the entire store and picking entries whose earliest
+// expiration falls within the window period.
+//
+// Those entries are stored in the lookahead region in the store, indexed by the timestamp when they need to be
+// visited, to facilitate temporal range scans.
+func (gc *dsAddrBookGc) populateLookahead() {
+ if gc.ab.opts.GCLookaheadInterval == 0 {
+ return
+ }
+
+ select {
+ case gc.running <- struct{}{}:
+ defer func() { <-gc.running }()
+ default:
+ // yield if something's running.
+ return
+ }
+
+ until := gc.ab.clock.Now().Add(gc.ab.opts.GCLookaheadInterval).Unix()
+
+ var id peer.ID
+ record := &addrsRecord{AddrBookRecord: &pb.AddrBookRecord{}}
+ results, err := gc.ab.ds.Query(context.TODO(), populateLookaheadQuery)
+ if err != nil {
+ log.Warn("failed while querying to populate lookahead GC window", "err", err)
+ return
+ }
+ defer results.Close()
+
+ batch, err := newCyclicBatch(gc.ab.ds, defaultOpsPerCyclicBatch)
+ if err != nil {
+ log.Warn("failed while creating batch to populate lookahead GC window", "err", err)
+ return
+ }
+
+ for result := range results.Next() {
+ idb32 := ds.RawKey(result.Key).Name()
+ k, err := b32.RawStdEncoding.DecodeString(idb32)
+ if err != nil {
+ log.Warn("failed while decoding peer ID from key", "key", result.Key, "err", err)
+ continue
+ }
+ if id, err = peer.IDFromBytes(k); err != nil {
+ log.Warn("failed while decoding peer ID from key", "key", result.Key, "err", err)
+ }
+
+ // if the record is in cache, use the cached version.
+ if cached, ok := gc.ab.cache.Peek(id); ok {
+ cached.RLock()
+ if len(cached.Addrs) == 0 || cached.Addrs[0].Expiry > until {
+ cached.RUnlock()
+ continue
+ }
+ gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", cached.Addrs[0].Expiry, idb32))
+ if err = batch.Put(context.TODO(), gcKey, []byte{}); err != nil {
+ log.Warn("failed while inserting GC entry for peer", "peer", id, "err", err)
+ }
+ cached.RUnlock()
+ continue
+ }
+
+ record.Reset()
+
+ val, err := gc.ab.ds.Get(context.TODO(), ds.RawKey(result.Key))
+ if err != nil {
+ log.Warn("failed which getting record from store for peer", "peer", id, "err", err)
+ continue
+ }
+ if err := proto.Unmarshal(val, record); err != nil {
+ log.Warn("failed while unmarshalling record from store for peer", "peer", id, "err", err)
+ continue
+ }
+ if len(record.Addrs) > 0 && record.Addrs[0].Expiry <= until {
+ gcKey := gcLookaheadBase.ChildString(fmt.Sprintf("%d/%s", record.Addrs[0].Expiry, idb32))
+ if err = batch.Put(context.TODO(), gcKey, []byte{}); err != nil {
+ log.Warn("failed while inserting GC entry for peer", "peer", id, "err", err)
+ }
+ }
+ }
+
+ if err = batch.Commit(context.TODO()); err != nil {
+ log.Warn("failed to commit GC lookahead batch", "err", err)
+ }
+
+ gc.currWindowEnd = until
+}
+
+// orderByTimestampInKey orders the results by comparing the timestamp in the
+// key. A lexiographic sort by itself is wrong since "10" is less than "2", but
+// as an int 2 is obviously less than 10.
+func orderByTimestampInKey(a, b query.Entry) int {
+ aKey := ds.RawKey(a.Key)
+ aInt, err := strconv.ParseInt(aKey.Parent().Name(), 10, 64)
+ if err != nil {
+ return -1
+ }
+ bKey := ds.RawKey(b.Key)
+ bInt, err := strconv.ParseInt(bKey.Parent().Name(), 10, 64)
+ if err != nil {
+ return -1
+ }
+ if aInt < bInt {
+ return -1
+ } else if aInt == bInt {
+ return 0
+ }
+ return 1
+}
diff --git a/p2p/host/peerstore/pstoreds/addr_book_gc_test.go b/p2p/host/peerstore/pstoreds/addr_book_gc_test.go
new file mode 100644
index 0000000000..74429ee53d
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/addr_book_gc_test.go
@@ -0,0 +1,264 @@
+package pstoreds
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/test"
+
+ mockClock "github.com/benbjohnson/clock"
+ "github.com/ipfs/go-datastore/query"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+var lookaheadQuery = query.Query{Prefix: gcLookaheadBase.String(), KeysOnly: true}
+
+type testProbe struct {
+ t *testing.T
+ ab pstore.AddrBook
+}
+
+func (tp *testProbe) countLookaheadEntries() (i int) {
+ results, err := tp.ab.(*dsAddrBook).ds.Query(context.Background(), lookaheadQuery)
+ if err != nil {
+ tp.t.Fatal(err)
+ }
+
+ defer results.Close()
+ for range results.Next() {
+ i++
+ }
+ return i
+}
+
+func (tp *testProbe) clearCache() {
+ for _, k := range tp.ab.(*dsAddrBook).cache.Keys() {
+ tp.ab.(*dsAddrBook).cache.Remove(k)
+ }
+}
+
+func TestGCLookahead(t *testing.T) {
+ opts := DefaultOpts()
+
+ // effectively disable automatic GC for this test.
+ opts.GCInitialDelay = 90 * time.Hour
+ opts.GCLookaheadInterval = 10 * time.Second
+ opts.GCPurgeInterval = 1 * time.Second
+
+ factory := addressBookFactory(t, mapDBStore, opts)
+ ab, closeFn := factory()
+ gc := ab.(*dsAddrBook).gc
+ defer closeFn()
+
+ tp := &testProbe{t, ab}
+
+ ids := test.GeneratePeerIDs(10)
+ addrs := test.GenerateAddrs(100)
+
+ // lookahead is 10 seconds, so these entries will be outside the lookahead window.
+ ab.AddAddrs(ids[0], addrs[:10], time.Hour)
+ ab.AddAddrs(ids[1], addrs[10:20], time.Hour)
+ ab.AddAddrs(ids[2], addrs[20:30], time.Hour)
+
+ gc.populateLookahead()
+ if i := tp.countLookaheadEntries(); i != 0 {
+ t.Errorf("expected no GC lookahead entries, got: %v", i)
+ }
+
+ // change addresses of a peer to have TTL 1 second, placing them in the lookahead window.
+ ab.UpdateAddrs(ids[1], time.Hour, time.Second)
+
+ // Purge the cache, to exercise a different path in the lookahead cycle.
+ tp.clearCache()
+
+ gc.populateLookahead()
+ if i := tp.countLookaheadEntries(); i != 1 {
+ t.Errorf("expected 1 GC lookahead entry, got: %v", i)
+ }
+
+ // change addresses of another to have TTL 5 second, placing them in the lookahead window.
+ ab.UpdateAddrs(ids[2], time.Hour, 5*time.Second)
+ gc.populateLookahead()
+ if i := tp.countLookaheadEntries(); i != 2 {
+ t.Errorf("expected 2 GC lookahead entries, got: %v", i)
+ }
+}
+
+func TestGCPurging(t *testing.T) {
+ opts := DefaultOpts()
+
+ // effectively disable automatic GC for this test.
+ opts.GCInitialDelay = 90 * time.Hour
+ opts.GCLookaheadInterval = 20 * time.Second
+ opts.GCPurgeInterval = 1 * time.Second
+ clk := mockClock.NewMock()
+ opts.Clock = clk
+
+ factory := addressBookFactory(t, mapDBStore, opts)
+ ab, closeFn := factory()
+ gc := ab.(*dsAddrBook).gc
+ defer closeFn()
+
+ tp := &testProbe{t, ab}
+
+ ids := test.GeneratePeerIDs(10)
+ addrs := test.GenerateAddrs(100)
+
+ // stagger addresses within the lookahead window, but stagger them.
+ ab.AddAddrs(ids[0], addrs[:10], 1*time.Second)
+ ab.AddAddrs(ids[1], addrs[30:40], 1*time.Second)
+ ab.AddAddrs(ids[2], addrs[60:70], 1*time.Second)
+
+ ab.AddAddrs(ids[0], addrs[10:20], 4*time.Second)
+ ab.AddAddrs(ids[1], addrs[40:50], 4*time.Second)
+
+ ab.AddAddrs(ids[0], addrs[20:30], 10*time.Second)
+ ab.AddAddrs(ids[1], addrs[50:60], 10*time.Second)
+
+ // this is inside the window, but it will survive the purges we do in the test.
+ ab.AddAddrs(ids[3], addrs[70:80], 15*time.Second)
+
+ gc.populateLookahead()
+ if i := tp.countLookaheadEntries(); i != 4 {
+ t.Errorf("expected 4 GC lookahead entries, got: %v", i)
+ }
+
+ clk.Add(2 * time.Second)
+ gc.purgeLookahead()
+ if i := tp.countLookaheadEntries(); i != 3 {
+ t.Errorf("expected 3 GC lookahead entries, got: %v", i)
+ }
+
+ // Purge the cache, to exercise a different path in the purge cycle.
+ tp.clearCache()
+
+ clk.Add(5 * time.Second)
+ gc.purgeLookahead()
+ if i := tp.countLookaheadEntries(); i != 3 {
+ t.Errorf("expected 3 GC lookahead entries, got: %v", i)
+ }
+
+ clk.Add(5 * time.Second)
+ gc.purgeLookahead()
+ if i := tp.countLookaheadEntries(); i != 1 {
+ t.Errorf("expected 1 GC lookahead entries, got: %v", i)
+ }
+ if i := len(ab.PeersWithAddrs()); i != 1 {
+ t.Errorf("expected 1 entries in database, got: %v", i)
+ }
+ if p := ab.PeersWithAddrs()[0]; p != ids[3] {
+ t.Errorf("expected remaining peer to be #3, got: %v, expected: %v", p, ids[3])
+ }
+}
+
+func TestGCDelay(t *testing.T) {
+ ids := test.GeneratePeerIDs(10)
+ addrs := test.GenerateAddrs(100)
+
+ clk := mockClock.NewMock()
+ opts := DefaultOpts()
+ opts.GCInitialDelay = 2 * time.Second
+ opts.GCLookaheadInterval = 1 * time.Minute
+ opts.GCPurgeInterval = 30 * time.Second
+ opts.Clock = clk
+
+ factory := addressBookFactory(t, mapDBStore, opts)
+ ab, closeFn := factory()
+ defer closeFn()
+ // give the background Go routine some time to start
+ time.Sleep(100 * time.Millisecond)
+
+ tp := &testProbe{t, ab}
+
+ ab.AddAddrs(ids[0], addrs, 1*time.Second)
+
+ // immediately after we should be having no lookahead entries.
+ if i := tp.countLookaheadEntries(); i != 0 {
+ t.Fatalf("expected no lookahead entries, got: %d", i)
+ }
+
+ // after the initial delay has passed.
+ clk.Add(3 * time.Second)
+ require.Eventually(t, func() bool { return tp.countLookaheadEntries() == 1 }, 3000*time.Millisecond, 10*time.Millisecond, "expected 1 lookahead entry")
+}
+
+func TestGCLookaheadDisabled(t *testing.T) {
+ ids := test.GeneratePeerIDs(10)
+ addrs := test.GenerateAddrs(100)
+
+ opts := DefaultOpts()
+
+ // effectively disable automatic GC for this test.
+ opts.GCInitialDelay = 90 * time.Hour
+ opts.GCLookaheadInterval = 0 // disable lookahead
+ opts.GCPurgeInterval = 9 * time.Hour
+ clk := mockClock.NewMock()
+ opts.Clock = clk
+
+ factory := addressBookFactory(t, mapDBStore, opts)
+ ab, closeFn := factory()
+ defer closeFn()
+
+ tp := &testProbe{t, ab}
+
+ // four peers:
+ // ids[0] has 10 addresses, all of which expire in 500ms.
+ // ids[1] has 20 addresses; 50% expire in 500ms and 50% in 10 hours.
+ // ids[2] has 10 addresses; all expire in 10 hours.
+ // ids[3] has 60 addresses; all expire in 10 hours.
+ ab.AddAddrs(ids[0], addrs[:10], 500*time.Millisecond)
+ ab.AddAddrs(ids[1], addrs[10:20], 500*time.Millisecond)
+ ab.AddAddrs(ids[1], addrs[20:30], 10*time.Hour)
+ ab.AddAddrs(ids[2], addrs[30:40], 10*time.Hour)
+ ab.AddAddrs(ids[3], addrs[40:], 10*time.Hour)
+
+ clk.Add(100 * time.Millisecond)
+
+ if i := tp.countLookaheadEntries(); i != 0 {
+ t.Errorf("expected no GC lookahead entries, got: %v", i)
+ }
+
+ clk.Add(500 * time.Millisecond)
+ gc := ab.(*dsAddrBook).gc
+ gc.purgeFunc()
+
+ var empty []ma.Multiaddr
+ test.AssertAddressesEqual(t, empty, ab.Addrs(ids[0]))
+ test.AssertAddressesEqual(t, addrs[20:30], ab.Addrs(ids[1]))
+ test.AssertAddressesEqual(t, addrs[30:40], ab.Addrs(ids[2]))
+ test.AssertAddressesEqual(t, addrs[40:], ab.Addrs(ids[3]))
+}
+
+func BenchmarkLookaheadCycle(b *testing.B) {
+ ids := test.GeneratePeerIDs(100)
+ addrs := test.GenerateAddrs(100)
+
+ opts := DefaultOpts()
+
+ opts.GCInitialDelay = 2 * time.Hour
+ opts.GCLookaheadInterval = 2 * time.Hour
+ opts.GCPurgeInterval = 6 * time.Hour
+
+ factory := addressBookFactory(b, mapDBStore, opts)
+ ab, closeFn := factory()
+ defer closeFn()
+
+ inside, outside := 1*time.Minute, 48*time.Hour
+ for i, id := range ids {
+ var ttl time.Duration
+ if i%2 == 0 {
+ ttl = inside
+ } else {
+ ttl = outside
+ }
+ ab.AddAddrs(id, addrs, ttl)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ab.(*dsAddrBook).gc.populateLookahead()
+ }
+}
diff --git a/p2p/host/peerstore/pstoreds/cache.go b/p2p/host/peerstore/pstoreds/cache.go
new file mode 100644
index 0000000000..184944f45a
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/cache.go
@@ -0,0 +1,40 @@
+package pstoreds
+
+// cache abstracts all methods we access from ARCCache, to enable alternate
+// implementations such as a no-op one.
+type cache[K comparable, V any] interface {
+ Get(key K) (value V, ok bool)
+ Add(key K, value V)
+ Remove(key K)
+ Contains(key K) bool
+ Peek(key K) (value V, ok bool)
+ Keys() []K
+}
+
+// noopCache is a dummy implementation that's used when the cache is disabled.
+type noopCache[K comparable, V any] struct {
+}
+
+var _ cache[int, int] = (*noopCache[int, int])(nil)
+
+func (*noopCache[K, V]) Get(_ K) (value V, ok bool) {
+ return *new(V), false
+}
+
+func (*noopCache[K, V]) Add(_ K, _ V) {
+}
+
+func (*noopCache[K, V]) Remove(_ K) {
+}
+
+func (*noopCache[K, V]) Contains(_ K) bool {
+ return false
+}
+
+func (*noopCache[K, V]) Peek(_ K) (value V, ok bool) {
+ return *new(V), false
+}
+
+func (*noopCache[K, V]) Keys() (keys []K) {
+ return keys
+}
diff --git a/p2p/host/peerstore/pstoreds/cyclic_batch.go b/p2p/host/peerstore/pstoreds/cyclic_batch.go
new file mode 100644
index 0000000000..17a30f91d3
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/cyclic_batch.go
@@ -0,0 +1,78 @@
+package pstoreds
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ ds "github.com/ipfs/go-datastore"
+)
+
+// how many operations are queued in a cyclic batch before we flush it.
+var defaultOpsPerCyclicBatch = 20
+
+// cyclicBatch buffers ds write operations and automatically flushes them after defaultOpsPerCyclicBatch (20) have been
+// queued. An explicit `Commit()` closes this cyclic batch, erroring all further operations.
+//
+// It is similar to go-ds autobatch, but it's driven by an actual Batch facility offered by the
+// ds.
+type cyclicBatch struct {
+ threshold int
+ ds.Batch
+ ds ds.Batching
+ pending int
+}
+
+func newCyclicBatch(ds ds.Batching, _ int) (ds.Batch, error) {
+ batch, err := ds.Batch(context.TODO())
+ if err != nil {
+ return nil, err
+ }
+ return &cyclicBatch{Batch: batch, ds: ds}, nil
+}
+
+func (cb *cyclicBatch) cycle() (err error) {
+ if cb.Batch == nil {
+ return errors.New("cyclic batch is closed")
+ }
+ if cb.pending < cb.threshold {
+ // we haven't reached the threshold yet.
+ return nil
+ }
+ // commit and renew the batch.
+ if err = cb.Batch.Commit(context.TODO()); err != nil {
+ return fmt.Errorf("failed while committing cyclic batch: %w", err)
+ }
+ if cb.Batch, err = cb.ds.Batch(context.TODO()); err != nil {
+ return fmt.Errorf("failed while renewing cyclic batch: %w", err)
+ }
+ return nil
+}
+
+func (cb *cyclicBatch) Put(ctx context.Context, key ds.Key, val []byte) error {
+ if err := cb.cycle(); err != nil {
+ return err
+ }
+ cb.pending++
+ return cb.Batch.Put(ctx, key, val)
+}
+
+func (cb *cyclicBatch) Delete(ctx context.Context, key ds.Key) error {
+ if err := cb.cycle(); err != nil {
+ return err
+ }
+ cb.pending++
+ return cb.Batch.Delete(ctx, key)
+}
+
+func (cb *cyclicBatch) Commit(ctx context.Context) error {
+ if cb.Batch == nil {
+ return errors.New("cyclic batch is closed")
+ }
+ if err := cb.Batch.Commit(ctx); err != nil {
+ return err
+ }
+ cb.pending = 0
+ cb.Batch = nil
+ return nil
+}
diff --git a/p2p/host/peerstore/pstoreds/deprecate.go b/p2p/host/peerstore/pstoreds/deprecate.go
new file mode 100644
index 0000000000..b549871c31
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/deprecate.go
@@ -0,0 +1,5 @@
+// Deprecated: The database-backed peerstore will be removed from go-libp2p in the future.
+// Use the memory peerstore (pstoremem) instead.
+// For more details see https://github.com/libp2p/go-libp2p/issues/2329
+// and https://github.com/libp2p/go-libp2p/issues/2355.
+package pstoreds
diff --git a/p2p/host/peerstore/pstoreds/ds_test.go b/p2p/host/peerstore/pstoreds/ds_test.go
new file mode 100644
index 0000000000..a240de3a1c
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/ds_test.go
@@ -0,0 +1,147 @@
+package pstoreds
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ pt "github.com/libp2p/go-libp2p/p2p/host/peerstore/test"
+
+ mockclock "github.com/benbjohnson/clock"
+ ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/sync"
+ "github.com/stretchr/testify/require"
+)
+
+func mapDBStore(_ testing.TB) (ds.Batching, func()) {
+ store := ds.NewMapDatastore()
+ closer := func() {
+ store.Close()
+ }
+ return sync.MutexWrap(store), closer
+}
+
+type datastoreFactory func(tb testing.TB) (ds.Batching, func())
+
+var dstores = map[string]datastoreFactory{
+ "MapDB": mapDBStore,
+}
+
+func TestDsPeerstore(t *testing.T) {
+ for name, dsFactory := range dstores {
+ t.Run(name, func(t *testing.T) {
+ pt.TestPeerstore(t, peerstoreFactory(t, dsFactory, DefaultOpts()))
+ })
+
+ t.Run("protobook limits", func(t *testing.T) {
+ const limit = 10
+ opts := DefaultOpts()
+ opts.MaxProtocols = limit
+ ds, close := dsFactory(t)
+ defer close()
+ ps, err := NewPeerstore(context.Background(), ds, opts)
+ require.NoError(t, err)
+ defer ps.Close()
+ pt.TestPeerstoreProtoStoreLimits(t, ps, limit)
+ })
+ }
+}
+
+func TestDsAddrBook(t *testing.T) {
+ for name, dsFactory := range dstores {
+ t.Run(name+" Cacheful", func(t *testing.T) {
+ opts := DefaultOpts()
+ opts.GCPurgeInterval = 1 * time.Second
+ opts.CacheSize = 1024
+ clk := mockclock.NewMock()
+ opts.Clock = clk
+
+ pt.TestAddrBook(t, addressBookFactory(t, dsFactory, opts), clk)
+ })
+
+ t.Run(name+" Cacheless", func(t *testing.T) {
+ opts := DefaultOpts()
+ opts.GCPurgeInterval = 1 * time.Second
+ opts.CacheSize = 0
+ clk := mockclock.NewMock()
+ opts.Clock = clk
+
+ pt.TestAddrBook(t, addressBookFactory(t, dsFactory, opts), clk)
+ })
+ }
+}
+
+func TestDsKeyBook(t *testing.T) {
+ for name, dsFactory := range dstores {
+ t.Run(name, func(t *testing.T) {
+ pt.TestKeyBook(t, keyBookFactory(t, dsFactory, DefaultOpts()))
+ })
+ }
+}
+
+func BenchmarkDsKeyBook(b *testing.B) {
+ for name, dsFactory := range dstores {
+ b.Run(name, func(b *testing.B) {
+ pt.BenchmarkKeyBook(b, keyBookFactory(b, dsFactory, DefaultOpts()))
+ })
+ }
+}
+
+func BenchmarkDsPeerstore(b *testing.B) {
+ caching := DefaultOpts()
+ caching.CacheSize = 1024
+
+ cacheless := DefaultOpts()
+ cacheless.CacheSize = 0
+
+ for name, dsFactory := range dstores {
+ b.Run(name, func(b *testing.B) {
+ pt.BenchmarkPeerstore(b, peerstoreFactory(b, dsFactory, caching), "Caching")
+ })
+ b.Run(name, func(b *testing.B) {
+ pt.BenchmarkPeerstore(b, peerstoreFactory(b, dsFactory, cacheless), "Cacheless")
+ })
+ }
+}
+
+func peerstoreFactory(tb testing.TB, storeFactory datastoreFactory, opts Options) pt.PeerstoreFactory {
+ return func() (pstore.Peerstore, func()) {
+ store, storeCloseFn := storeFactory(tb)
+ ps, err := NewPeerstore(context.Background(), store, opts)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ closer := func() {
+ ps.Close()
+ storeCloseFn()
+ }
+ return ps, closer
+ }
+}
+
+func addressBookFactory(tb testing.TB, storeFactory datastoreFactory, opts Options) pt.AddrBookFactory {
+ return func() (pstore.AddrBook, func()) {
+ store, closeFunc := storeFactory(tb)
+ ab, err := NewAddrBook(context.Background(), store, opts)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ closer := func() {
+ ab.Close()
+ closeFunc()
+ }
+ return ab, closer
+ }
+}
+
+func keyBookFactory(tb testing.TB, storeFactory datastoreFactory, opts Options) pt.KeyBookFactory {
+ return func() (pstore.KeyBook, func()) {
+ store, storeCloseFn := storeFactory(tb)
+ kb, err := NewKeyBook(context.Background(), store, opts)
+ if err != nil {
+ tb.Fatal(err)
+ }
+ return kb, storeCloseFn
+ }
+}
diff --git a/p2p/host/peerstore/pstoreds/keybook.go b/p2p/host/peerstore/pstoreds/keybook.go
new file mode 100644
index 0000000000..18a1a955a0
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/keybook.go
@@ -0,0 +1,136 @@
+package pstoreds
+
+import (
+ "context"
+ "errors"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+
+ ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+ "github.com/multiformats/go-base32"
+)
+
+// Public and private keys are stored under the following db key pattern:
+// /peers/keys//{pub, priv}
+var (
+ kbBase = ds.NewKey("/peers/keys")
+ pubSuffix = ds.NewKey("/pub")
+ privSuffix = ds.NewKey("/priv")
+)
+
+type dsKeyBook struct {
+ ds ds.Datastore
+}
+
+var _ pstore.KeyBook = (*dsKeyBook)(nil)
+
+func NewKeyBook(_ context.Context, store ds.Datastore, _ Options) (*dsKeyBook, error) {
+ return &dsKeyBook{store}, nil
+}
+
+func (kb *dsKeyBook) PubKey(p peer.ID) ic.PubKey {
+ key := peerToKey(p, pubSuffix)
+
+ var pk ic.PubKey
+ if value, err := kb.ds.Get(context.TODO(), key); err == nil {
+ pk, err = ic.UnmarshalPublicKey(value)
+ if err != nil {
+ log.Error("error when unmarshalling pubkey from datastore for peer", "peer", p, "err", err)
+ }
+ } else if err == ds.ErrNotFound {
+ pk, err = p.ExtractPublicKey()
+ switch err {
+ case nil:
+ case peer.ErrNoPublicKey:
+ return nil
+ default:
+ log.Error("error when extracting pubkey from peer ID for peer", "peer", p, "err", err)
+ return nil
+ }
+ pkb, err := ic.MarshalPublicKey(pk)
+ if err != nil {
+ log.Error("error when turning extracted pubkey into bytes for peer", "peer", p, "err", err)
+ return nil
+ }
+ if err := kb.ds.Put(context.TODO(), key, pkb); err != nil {
+ log.Error("error when adding extracted pubkey to peerstore for peer", "peer", p, "err", err)
+ return nil
+ }
+ } else {
+ log.Error("error when fetching pubkey from datastore for peer", "peer", p, "err", err)
+ }
+
+ return pk
+}
+
+func (kb *dsKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error {
+ // check it's correct.
+ if !p.MatchesPublicKey(pk) {
+ return errors.New("peer ID does not match public key")
+ }
+
+ val, err := ic.MarshalPublicKey(pk)
+ if err != nil {
+ log.Error("error while converting pubkey byte string for peer", "peer", p, "err", err)
+ return err
+ }
+ if err := kb.ds.Put(context.TODO(), peerToKey(p, pubSuffix), val); err != nil {
+ log.Error("error while updating pubkey in datastore for peer", "peer", p, "err", err)
+ return err
+ }
+ return nil
+}
+
+func (kb *dsKeyBook) PrivKey(p peer.ID) ic.PrivKey {
+ value, err := kb.ds.Get(context.TODO(), peerToKey(p, privSuffix))
+ if err != nil {
+ return nil
+ }
+ sk, err := ic.UnmarshalPrivateKey(value)
+ if err != nil {
+ return nil
+ }
+ return sk
+}
+
+func (kb *dsKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error {
+ if sk == nil {
+ return errors.New("private key is nil")
+ }
+ // check it's correct.
+ if !p.MatchesPrivateKey(sk) {
+ return errors.New("peer ID does not match private key")
+ }
+
+ val, err := ic.MarshalPrivateKey(sk)
+ if err != nil {
+ log.Error("error while converting privkey byte string for peer", "peer", p, "err", err)
+ return err
+ }
+ if err := kb.ds.Put(context.TODO(), peerToKey(p, privSuffix), val); err != nil {
+ log.Error("error while updating privkey in datastore for peer", "peer", p, "err", err)
+ }
+ return err
+}
+
+func (kb *dsKeyBook) PeersWithKeys() peer.IDSlice {
+ ids, err := uniquePeerIds(kb.ds, kbBase, func(result query.Result) string {
+ return ds.RawKey(result.Key).Parent().Name()
+ })
+ if err != nil {
+ log.Error("error while retrieving peers with keys", "err", err)
+ }
+ return ids
+}
+
+func (kb *dsKeyBook) RemovePeer(p peer.ID) {
+ kb.ds.Delete(context.TODO(), peerToKey(p, privSuffix))
+ kb.ds.Delete(context.TODO(), peerToKey(p, pubSuffix))
+}
+
+func peerToKey(p peer.ID, suffix ds.Key) ds.Key {
+ return kbBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).Child(suffix)
+}
diff --git a/p2p/host/peerstore/pstoreds/metadata.go b/p2p/host/peerstore/pstoreds/metadata.go
new file mode 100644
index 0000000000..9dcfcc13b8
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/metadata.go
@@ -0,0 +1,82 @@
+package pstoreds
+
+import (
+ "bytes"
+ "context"
+ "encoding/gob"
+
+ pool "github.com/libp2p/go-buffer-pool"
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+ "github.com/multiformats/go-base32"
+)
+
+// Metadata is stored under the following db key pattern:
+// /peers/metadata//
+var pmBase = ds.NewKey("/peers/metadata")
+
+type dsPeerMetadata struct {
+ ds ds.Datastore
+}
+
+var _ pstore.PeerMetadata = (*dsPeerMetadata)(nil)
+
+func init() {
+ // Gob registers basic types by default.
+ //
+ // Register complex types used by the peerstore itself.
+ gob.Register(make(map[protocol.ID]struct{}))
+}
+
+// NewPeerMetadata creates a metadata store backed by a persistent db. It uses gob for serialisation.
+//
+// See `init()` to learn which types are registered by default. Modules wishing to store
+// values of other types will need to `gob.Register()` them explicitly, or else callers
+// will receive runtime errors.
+func NewPeerMetadata(_ context.Context, store ds.Datastore, _ Options) (*dsPeerMetadata, error) {
+ return &dsPeerMetadata{store}, nil
+}
+
+func (pm *dsPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
+ k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key)
+ value, err := pm.ds.Get(context.TODO(), k)
+ if err != nil {
+ if err == ds.ErrNotFound {
+ err = pstore.ErrNotFound
+ }
+ return nil, err
+ }
+
+ var res interface{}
+ if err := gob.NewDecoder(bytes.NewReader(value)).Decode(&res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func (pm *dsPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
+ k := pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).ChildString(key)
+ var buf pool.Buffer
+ if err := gob.NewEncoder(&buf).Encode(&val); err != nil {
+ return err
+ }
+ return pm.ds.Put(context.TODO(), k, buf.Bytes())
+}
+
+func (pm *dsPeerMetadata) RemovePeer(p peer.ID) {
+ result, err := pm.ds.Query(context.TODO(), query.Query{
+ Prefix: pmBase.ChildString(base32.RawStdEncoding.EncodeToString([]byte(p))).String(),
+ KeysOnly: true,
+ })
+ if err != nil {
+ log.Warn("querying datastore when removing peer failed", "peer", p, "err", err)
+ return
+ }
+ for entry := range result.Next() {
+ pm.ds.Delete(context.TODO(), ds.NewKey(entry.Key))
+ }
+}
diff --git a/p2p/host/peerstore/pstoreds/pb/pstore.pb.go b/p2p/host/peerstore/pstoreds/pb/pstore.pb.go
new file mode 100644
index 0000000000..b830d1159c
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/pb/pstore.pb.go
@@ -0,0 +1,274 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/host/peerstore/pstoreds/pb/pstore.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AddrBookRecord represents a record for a peer in the address book.
+type AddrBookRecord struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The peer ID.
+ Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // The multiaddresses. This is a sorted list where element 0 expires the soonest.
+ Addrs []*AddrBookRecord_AddrEntry `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"`
+ // The most recently received signed PeerRecord.
+ CertifiedRecord *AddrBookRecord_CertifiedRecord `protobuf:"bytes,3,opt,name=certified_record,json=certifiedRecord,proto3" json:"certified_record,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *AddrBookRecord) Reset() {
+ *x = AddrBookRecord{}
+ mi := &file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AddrBookRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddrBookRecord) ProtoMessage() {}
+
+func (x *AddrBookRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddrBookRecord.ProtoReflect.Descriptor instead.
+func (*AddrBookRecord) Descriptor() ([]byte, []int) {
+ return file_p2p_host_peerstore_pstoreds_pb_pstore_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AddrBookRecord) GetId() []byte {
+ if x != nil {
+ return x.Id
+ }
+ return nil
+}
+
+func (x *AddrBookRecord) GetAddrs() []*AddrBookRecord_AddrEntry {
+ if x != nil {
+ return x.Addrs
+ }
+ return nil
+}
+
+func (x *AddrBookRecord) GetCertifiedRecord() *AddrBookRecord_CertifiedRecord {
+ if x != nil {
+ return x.CertifiedRecord
+ }
+ return nil
+}
+
+// AddrEntry represents a single multiaddress.
+type AddrBookRecord_AddrEntry struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Addr []byte `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"`
+ // The point in time when this address expires.
+ Expiry int64 `protobuf:"varint,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
+ // The original TTL of this address.
+ Ttl int64 `protobuf:"varint,3,opt,name=ttl,proto3" json:"ttl,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *AddrBookRecord_AddrEntry) Reset() {
+ *x = AddrBookRecord_AddrEntry{}
+ mi := &file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AddrBookRecord_AddrEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddrBookRecord_AddrEntry) ProtoMessage() {}
+
+func (x *AddrBookRecord_AddrEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddrBookRecord_AddrEntry.ProtoReflect.Descriptor instead.
+func (*AddrBookRecord_AddrEntry) Descriptor() ([]byte, []int) {
+ return file_p2p_host_peerstore_pstoreds_pb_pstore_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *AddrBookRecord_AddrEntry) GetAddr() []byte {
+ if x != nil {
+ return x.Addr
+ }
+ return nil
+}
+
+func (x *AddrBookRecord_AddrEntry) GetExpiry() int64 {
+ if x != nil {
+ return x.Expiry
+ }
+ return 0
+}
+
+func (x *AddrBookRecord_AddrEntry) GetTtl() int64 {
+ if x != nil {
+ return x.Ttl
+ }
+ return 0
+}
+
+// CertifiedRecord contains a serialized signed PeerRecord used to
+// populate the signedAddrs list.
+type AddrBookRecord_CertifiedRecord struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The Seq counter from the signed PeerRecord envelope
+ Seq uint64 `protobuf:"varint,1,opt,name=seq,proto3" json:"seq,omitempty"`
+ // The serialized bytes of the SignedEnvelope containing the PeerRecord.
+ Raw []byte `protobuf:"bytes,2,opt,name=raw,proto3" json:"raw,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *AddrBookRecord_CertifiedRecord) Reset() {
+ *x = AddrBookRecord_CertifiedRecord{}
+ mi := &file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AddrBookRecord_CertifiedRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AddrBookRecord_CertifiedRecord) ProtoMessage() {}
+
+func (x *AddrBookRecord_CertifiedRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AddrBookRecord_CertifiedRecord.ProtoReflect.Descriptor instead.
+func (*AddrBookRecord_CertifiedRecord) Descriptor() ([]byte, []int) {
+ return file_p2p_host_peerstore_pstoreds_pb_pstore_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *AddrBookRecord_CertifiedRecord) GetSeq() uint64 {
+ if x != nil {
+ return x.Seq
+ }
+ return 0
+}
+
+func (x *AddrBookRecord_CertifiedRecord) GetRaw() []byte {
+ if x != nil {
+ return x.Raw
+ }
+ return nil
+}
+
+var File_p2p_host_peerstore_pstoreds_pb_pstore_proto protoreflect.FileDescriptor
+
+const file_p2p_host_peerstore_pstoreds_pb_pstore_proto_rawDesc = "" +
+ "\n" +
+ "+p2p/host/peerstore/pstoreds/pb/pstore.proto\x12\tpstore.pb\"\xb3\x02\n" +
+ "\x0eAddrBookRecord\x12\x0e\n" +
+ "\x02id\x18\x01 \x01(\fR\x02id\x129\n" +
+ "\x05addrs\x18\x02 \x03(\v2#.pstore.pb.AddrBookRecord.AddrEntryR\x05addrs\x12T\n" +
+ "\x10certified_record\x18\x03 \x01(\v2).pstore.pb.AddrBookRecord.CertifiedRecordR\x0fcertifiedRecord\x1aI\n" +
+ "\tAddrEntry\x12\x12\n" +
+ "\x04addr\x18\x01 \x01(\fR\x04addr\x12\x16\n" +
+ "\x06expiry\x18\x02 \x01(\x03R\x06expiry\x12\x10\n" +
+ "\x03ttl\x18\x03 \x01(\x03R\x03ttl\x1a5\n" +
+ "\x0fCertifiedRecord\x12\x10\n" +
+ "\x03seq\x18\x01 \x01(\x04R\x03seq\x12\x10\n" +
+ "\x03raw\x18\x02 \x01(\fR\x03rawB pstore.pb.AddrBookRecord.AddrEntry
+ 2, // 1: pstore.pb.AddrBookRecord.certified_record:type_name -> pstore.pb.AddrBookRecord.CertifiedRecord
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_p2p_host_peerstore_pstoreds_pb_pstore_proto_init() }
+func file_p2p_host_peerstore_pstoreds_pb_pstore_proto_init() {
+ if File_p2p_host_peerstore_pstoreds_pb_pstore_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_host_peerstore_pstoreds_pb_pstore_proto_rawDesc), len(file_p2p_host_peerstore_pstoreds_pb_pstore_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_host_peerstore_pstoreds_pb_pstore_proto_goTypes,
+ DependencyIndexes: file_p2p_host_peerstore_pstoreds_pb_pstore_proto_depIdxs,
+ MessageInfos: file_p2p_host_peerstore_pstoreds_pb_pstore_proto_msgTypes,
+ }.Build()
+ File_p2p_host_peerstore_pstoreds_pb_pstore_proto = out.File
+ file_p2p_host_peerstore_pstoreds_pb_pstore_proto_goTypes = nil
+ file_p2p_host_peerstore_pstoreds_pb_pstore_proto_depIdxs = nil
+}
diff --git a/p2p/host/peerstore/pstoreds/pb/pstore.proto b/p2p/host/peerstore/pstoreds/pb/pstore.proto
new file mode 100644
index 0000000000..2223668155
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/pb/pstore.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+package pstore.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds/pb";
+
+// AddrBookRecord represents a record for a peer in the address book.
+message AddrBookRecord {
+ // The peer ID.
+ bytes id = 1;
+
+ // The multiaddresses. This is a sorted list where element 0 expires the soonest.
+ repeated AddrEntry addrs = 2;
+
+ // The most recently received signed PeerRecord.
+ CertifiedRecord certified_record = 3;
+
+ // AddrEntry represents a single multiaddress.
+ message AddrEntry {
+ bytes addr = 1;
+
+ // The point in time when this address expires.
+ int64 expiry = 2;
+
+ // The original TTL of this address.
+ int64 ttl = 3;
+ }
+
+ // CertifiedRecord contains a serialized signed PeerRecord used to
+ // populate the signedAddrs list.
+ message CertifiedRecord {
+ // The Seq counter from the signed PeerRecord envelope
+ uint64 seq = 1;
+
+ // The serialized bytes of the SignedEnvelope containing the PeerRecord.
+ bytes raw = 2;
+ }
+}
diff --git a/p2p/host/peerstore/pstoreds/peerstore.go b/p2p/host/peerstore/pstoreds/peerstore.go
new file mode 100644
index 0000000000..8c9e9ff8b3
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/peerstore.go
@@ -0,0 +1,191 @@
+package pstoreds
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
+
+ ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
+ "github.com/multiformats/go-base32"
+)
+
+// Configuration object for the peerstore.
+type Options struct {
+ // The size of the in-memory cache. A value of 0 or lower disables the cache.
+ CacheSize uint
+
+ // MaxProtocols is the maximum number of protocols we store for one peer.
+ MaxProtocols int
+
+ // Sweep interval to purge expired addresses from the datastore. If this is a zero value, GC will not run
+ // automatically, but it'll be available on demand via explicit calls.
+ GCPurgeInterval time.Duration
+
+ // Interval to renew the GC lookahead window. If this is a zero value, lookahead will be disabled and we'll
+ // traverse the entire datastore for every purge cycle.
+ GCLookaheadInterval time.Duration
+
+ // Initial delay before GC processes start. Intended to give the system breathing room to fully boot
+ // before starting GC.
+ GCInitialDelay time.Duration
+
+ Clock clock
+}
+
+// DefaultOpts returns the default options for a persistent peerstore, with the full-purge GC algorithm:
+//
+// * Cache size: 1024.
+// * MaxProtocols: 1024.
+// * GC purge interval: 2 hours.
+// * GC lookahead interval: disabled.
+// * GC initial delay: 60 seconds.
+func DefaultOpts() Options {
+ return Options{
+ CacheSize: 1024,
+ MaxProtocols: 1024,
+ GCPurgeInterval: 2 * time.Hour,
+ GCLookaheadInterval: 0,
+ GCInitialDelay: 60 * time.Second,
+ Clock: realclock{},
+ }
+}
+
+type pstoreds struct {
+ peerstore.Metrics
+
+ *dsKeyBook
+ *dsAddrBook
+ *dsProtoBook
+ *dsPeerMetadata
+}
+
+var _ peerstore.Peerstore = &pstoreds{}
+
+// NewPeerstore creates a peerstore backed by the provided persistent datastore.
+// It's the caller's responsibility to call RemovePeer to ensure
+// that memory consumption of the peerstore doesn't grow unboundedly.
+func NewPeerstore(ctx context.Context, store ds.Batching, opts Options) (*pstoreds, error) {
+ addrBook, err := NewAddrBook(ctx, store, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ keyBook, err := NewKeyBook(ctx, store, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ peerMetadata, err := NewPeerMetadata(ctx, store, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ protoBook, err := NewProtoBook(peerMetadata, WithMaxProtocols(opts.MaxProtocols))
+ if err != nil {
+ return nil, err
+ }
+
+ return &pstoreds{
+ Metrics: pstore.NewMetrics(),
+ dsKeyBook: keyBook,
+ dsAddrBook: addrBook,
+ dsPeerMetadata: peerMetadata,
+ dsProtoBook: protoBook,
+ }, nil
+}
+
+// uniquePeerIds extracts and returns unique peer IDs from database keys.
+func uniquePeerIds(ds ds.Datastore, prefix ds.Key, extractor func(result query.Result) string) (peer.IDSlice, error) {
+ var (
+ q = query.Query{Prefix: prefix.String(), KeysOnly: true}
+ results query.Results
+ err error
+ )
+
+ if results, err = ds.Query(context.TODO(), q); err != nil {
+ log.Error("failed to query database", "err", err)
+ return nil, err
+ }
+
+ defer results.Close()
+
+ idset := make(map[string]struct{})
+ for result := range results.Next() {
+ k := extractor(result)
+ idset[k] = struct{}{}
+ }
+
+ if len(idset) == 0 {
+ return peer.IDSlice{}, nil
+ }
+
+ ids := make(peer.IDSlice, 0, len(idset))
+ for id := range idset {
+ pid, _ := base32.RawStdEncoding.DecodeString(id)
+ id, _ := peer.IDFromBytes(pid)
+ ids = append(ids, id)
+ }
+ return ids, nil
+}
+
+func (ps *pstoreds) Close() (err error) {
+ var errs []error
+ weakClose := func(name string, c interface{}) {
+ if cl, ok := c.(io.Closer); ok {
+ if err = cl.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("%s error: %s", name, err))
+ }
+ }
+ }
+ weakClose("keybook", ps.dsKeyBook)
+ weakClose("addressbook", ps.dsAddrBook)
+ weakClose("protobook", ps.dsProtoBook)
+ weakClose("peermetadata", ps.dsPeerMetadata)
+
+ if len(errs) > 0 {
+ return fmt.Errorf("failed while closing peerstore; err(s): %q", errs)
+ }
+ return nil
+}
+
+func (ps *pstoreds) Peers() peer.IDSlice {
+ set := map[peer.ID]struct{}{}
+ for _, p := range ps.PeersWithKeys() {
+ set[p] = struct{}{}
+ }
+ for _, p := range ps.PeersWithAddrs() {
+ set[p] = struct{}{}
+ }
+
+ pps := make(peer.IDSlice, 0, len(set))
+ for p := range set {
+ pps = append(pps, p)
+ }
+ return pps
+}
+
+func (ps *pstoreds) PeerInfo(p peer.ID) peer.AddrInfo {
+ return peer.AddrInfo{
+ ID: p,
+ Addrs: ps.dsAddrBook.Addrs(p),
+ }
+}
+
+// RemovePeer removes entries associated with a peer from:
+// * the KeyBook
+// * the ProtoBook
+// * the PeerMetadata
+// * the Metrics
+// It DOES NOT remove the peer from the AddrBook.
+func (ps *pstoreds) RemovePeer(p peer.ID) {
+ ps.dsKeyBook.RemovePeer(p)
+ ps.dsProtoBook.RemovePeer(p)
+ ps.dsPeerMetadata.RemovePeer(p)
+ ps.Metrics.RemovePeer(p)
+}
diff --git a/p2p/host/peerstore/pstoreds/protobook.go b/p2p/host/peerstore/pstoreds/protobook.go
new file mode 100644
index 0000000000..9ef7d1c9fa
--- /dev/null
+++ b/p2p/host/peerstore/pstoreds/protobook.go
@@ -0,0 +1,196 @@
+package pstoreds
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+type protoSegment struct {
+ sync.RWMutex
+}
+
+type protoSegments [256]*protoSegment
+
+func (s *protoSegments) get(p peer.ID) *protoSegment {
+ return s[p[len(p)-1]]
+}
+
+var errTooManyProtocols = errors.New("too many protocols")
+
+type ProtoBookOption func(*dsProtoBook) error
+
+func WithMaxProtocols(num int) ProtoBookOption {
+ return func(pb *dsProtoBook) error {
+ pb.maxProtos = num
+ return nil
+ }
+}
+
+type dsProtoBook struct {
+ segments protoSegments
+ meta pstore.PeerMetadata
+ maxProtos int
+}
+
+var _ pstore.ProtoBook = (*dsProtoBook)(nil)
+
+func NewProtoBook(meta pstore.PeerMetadata, opts ...ProtoBookOption) (*dsProtoBook, error) {
+ pb := &dsProtoBook{
+ meta: meta,
+ segments: func() (ret protoSegments) {
+ for i := range ret {
+ ret[i] = &protoSegment{}
+ }
+ return ret
+ }(),
+ maxProtos: 128,
+ }
+
+ for _, opt := range opts {
+ if err := opt(pb); err != nil {
+ return nil, err
+ }
+ }
+ return pb, nil
+}
+
+func (pb *dsProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error {
+ if len(protos) > pb.maxProtos {
+ return errTooManyProtocols
+ }
+
+ protomap := make(map[protocol.ID]struct{}, len(protos))
+ for _, proto := range protos {
+ protomap[proto] = struct{}{}
+ }
+
+ s := pb.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ return pb.meta.Put(p, "protocols", protomap)
+}
+
+func (pb *dsProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error {
+ s := pb.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ pmap, err := pb.getProtocolMap(p)
+ if err != nil {
+ return err
+ }
+ if len(pmap)+len(protos) > pb.maxProtos {
+ return errTooManyProtocols
+ }
+
+ for _, proto := range protos {
+ pmap[proto] = struct{}{}
+ }
+
+ return pb.meta.Put(p, "protocols", pmap)
+}
+
+func (pb *dsProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) {
+ s := pb.segments.get(p)
+ s.RLock()
+ defer s.RUnlock()
+
+ pmap, err := pb.getProtocolMap(p)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]protocol.ID, 0, len(pmap))
+ for proto := range pmap {
+ res = append(res, proto)
+ }
+
+ return res, nil
+}
+
+func (pb *dsProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) {
+ s := pb.segments.get(p)
+ s.RLock()
+ defer s.RUnlock()
+
+ pmap, err := pb.getProtocolMap(p)
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]protocol.ID, 0, len(protos))
+ for _, proto := range protos {
+ if _, ok := pmap[proto]; ok {
+ res = append(res, proto)
+ }
+ }
+
+ return res, nil
+}
+
+func (pb *dsProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) {
+ s := pb.segments.get(p)
+ s.RLock()
+ defer s.RUnlock()
+
+ pmap, err := pb.getProtocolMap(p)
+ if err != nil {
+ return "", err
+ }
+ for _, proto := range protos {
+ if _, ok := pmap[proto]; ok {
+ return proto, nil
+ }
+ }
+
+ return "", nil
+}
+
+func (pb *dsProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error {
+ s := pb.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ pmap, err := pb.getProtocolMap(p)
+ if err != nil {
+ return err
+ }
+
+ if len(pmap) == 0 {
+ // nothing to do.
+ return nil
+ }
+
+ for _, proto := range protos {
+ delete(pmap, proto)
+ }
+
+ return pb.meta.Put(p, "protocols", pmap)
+}
+
+func (pb *dsProtoBook) getProtocolMap(p peer.ID) (map[protocol.ID]struct{}, error) {
+ iprotomap, err := pb.meta.Get(p, "protocols")
+ switch err {
+ default:
+ return nil, err
+ case pstore.ErrNotFound:
+ return make(map[protocol.ID]struct{}), nil
+ case nil:
+ cast, ok := iprotomap.(map[protocol.ID]struct{})
+ if !ok {
+ return nil, fmt.Errorf("stored protocol set was not a map")
+ }
+
+ return cast, nil
+ }
+}
+
+func (pb *dsProtoBook) RemovePeer(p peer.ID) {
+ pb.meta.RemovePeer(p)
+}
diff --git a/p2p/host/peerstore/pstoremem/addr_book.go b/p2p/host/peerstore/pstoremem/addr_book.go
new file mode 100644
index 0000000000..4c389cfcd1
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/addr_book.go
@@ -0,0 +1,662 @@
+package pstoremem
+
+import (
+ "container/heap"
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/record"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var log = logging.Logger("peerstore")
+
+type expiringAddr struct {
+ Addr ma.Multiaddr
+ TTL time.Duration
+ Expiry time.Time
+ Peer peer.ID
+ // to sort by expiry time, -1 means it's not in the heap
+ heapIndex int
+}
+
+func (e *expiringAddr) ExpiredBy(t time.Time) bool {
+ return !t.Before(e.Expiry)
+}
+
+func (e *expiringAddr) IsConnected() bool {
+ return ttlIsConnected(e.TTL)
+}
+
+// ttlIsConnected returns true if the TTL is at least as long as the connected
+// TTL.
+func ttlIsConnected(ttl time.Duration) bool {
+ return ttl >= peerstore.ConnectedAddrTTL
+}
+
+type peerRecordState struct {
+ Envelope *record.Envelope
+ Seq uint64
+}
+
+// Essentially Go stdlib's Priority Queue example
+var _ heap.Interface = &peerAddrs{}
+
+type peerAddrs struct {
+ Addrs map[peer.ID]map[string]*expiringAddr // peer.ID -> addr.Bytes() -> *expiringAddr
+ // expiringHeap only stores non-connected addresses. Since connected address
+ // basically have an infinite TTL
+ expiringHeap []*expiringAddr
+}
+
+func newPeerAddrs() peerAddrs {
+ return peerAddrs{
+ Addrs: make(map[peer.ID]map[string]*expiringAddr),
+ }
+}
+
+func (pa *peerAddrs) Len() int { return len(pa.expiringHeap) }
+func (pa *peerAddrs) Less(i, j int) bool {
+ return pa.expiringHeap[i].Expiry.Before(pa.expiringHeap[j].Expiry)
+}
+func (pa *peerAddrs) Swap(i, j int) {
+ pa.expiringHeap[i], pa.expiringHeap[j] = pa.expiringHeap[j], pa.expiringHeap[i]
+ pa.expiringHeap[i].heapIndex = i
+ pa.expiringHeap[j].heapIndex = j
+}
+func (pa *peerAddrs) Push(x any) {
+ a := x.(*expiringAddr)
+ a.heapIndex = len(pa.expiringHeap)
+ pa.expiringHeap = append(pa.expiringHeap, a)
+}
+func (pa *peerAddrs) Pop() any {
+ a := pa.expiringHeap[len(pa.expiringHeap)-1]
+ a.heapIndex = -1
+ pa.expiringHeap = pa.expiringHeap[0 : len(pa.expiringHeap)-1]
+ return a
+}
+
+func (pa *peerAddrs) Delete(a *expiringAddr) {
+ if ea, ok := pa.Addrs[a.Peer][string(a.Addr.Bytes())]; ok {
+ if ea.heapIndex != -1 {
+ heap.Remove(pa, a.heapIndex)
+ }
+ delete(pa.Addrs[a.Peer], string(a.Addr.Bytes()))
+ if len(pa.Addrs[a.Peer]) == 0 {
+ delete(pa.Addrs, a.Peer)
+ }
+ }
+}
+
+func (pa *peerAddrs) FindAddr(p peer.ID, addr ma.Multiaddr) (*expiringAddr, bool) {
+ if m, ok := pa.Addrs[p]; ok {
+ v, ok := m[string(addr.Bytes())]
+ return v, ok
+ }
+ return nil, false
+}
+
+func (pa *peerAddrs) NextExpiry() time.Time {
+ if len(pa.expiringHeap) == 0 {
+ return time.Time{}
+ }
+ return pa.expiringHeap[0].Expiry
+}
+
+func (pa *peerAddrs) PopIfExpired(now time.Time) (*expiringAddr, bool) {
+ // Use `!Before` instead of `After` to ensure that we expire *at* now, and not *just after now*.
+ if len(pa.expiringHeap) > 0 && !now.Before(pa.NextExpiry()) {
+ ea := heap.Pop(pa).(*expiringAddr)
+ delete(pa.Addrs[ea.Peer], string(ea.Addr.Bytes()))
+ if len(pa.Addrs[ea.Peer]) == 0 {
+ delete(pa.Addrs, ea.Peer)
+ }
+ return ea, true
+ }
+ return nil, false
+}
+
+func (pa *peerAddrs) Update(a *expiringAddr) {
+ if a.heapIndex == -1 {
+ return
+ }
+ if a.IsConnected() {
+ heap.Remove(pa, a.heapIndex)
+ } else {
+ heap.Fix(pa, a.heapIndex)
+ }
+}
+
+func (pa *peerAddrs) Insert(a *expiringAddr) {
+ a.heapIndex = -1
+ if _, ok := pa.Addrs[a.Peer]; !ok {
+ pa.Addrs[a.Peer] = make(map[string]*expiringAddr)
+ }
+ pa.Addrs[a.Peer][string(a.Addr.Bytes())] = a
+ // don't add connected addr to heap.
+ if a.IsConnected() {
+ return
+ }
+ heap.Push(pa, a)
+}
+
+func (pa *peerAddrs) NumUnconnectedAddrs() int {
+ return len(pa.expiringHeap)
+}
+
+type clock interface {
+ Now() time.Time
+}
+
+type realclock struct{}
+
+func (rc realclock) Now() time.Time {
+ return time.Now()
+}
+
+const (
+ defaultMaxSignedPeerRecords = 100_000
+ defaultMaxUnconnectedAddrs = 1_000_000
+)
+
+// memoryAddrBook manages addresses.
+type memoryAddrBook struct {
+ mu sync.RWMutex
+ addrs peerAddrs
+ signedPeerRecords map[peer.ID]*peerRecordState
+ maxUnconnectedAddrs int
+ maxSignedPeerRecords int
+
+ refCount sync.WaitGroup
+ cancel func()
+
+ subManager *AddrSubManager
+ clock clock
+}
+
+var _ peerstore.AddrBook = (*memoryAddrBook)(nil)
+var _ peerstore.CertifiedAddrBook = (*memoryAddrBook)(nil)
+
+func NewAddrBook(opts ...AddrBookOption) *memoryAddrBook {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ ab := &memoryAddrBook{
+ addrs: newPeerAddrs(),
+ signedPeerRecords: make(map[peer.ID]*peerRecordState),
+ subManager: NewAddrSubManager(),
+ cancel: cancel,
+ clock: realclock{},
+ maxUnconnectedAddrs: defaultMaxUnconnectedAddrs,
+ maxSignedPeerRecords: defaultMaxSignedPeerRecords,
+ }
+ for _, opt := range opts {
+ opt(ab)
+ }
+
+ ab.refCount.Add(1)
+ go ab.background(ctx)
+ return ab
+}
+
+type AddrBookOption func(book *memoryAddrBook) error
+
+func WithClock(clock clock) AddrBookOption {
+ return func(book *memoryAddrBook) error {
+ book.clock = clock
+ return nil
+ }
+}
+
+// WithMaxAddresses sets the maximum number of unconnected addresses to store.
+// The maximum number of connected addresses is bounded by the connection
+// limits in the Connection Manager and Resource Manager.
+func WithMaxAddresses(n int) AddrBookOption {
+ return func(b *memoryAddrBook) error {
+ b.maxUnconnectedAddrs = n
+ return nil
+ }
+}
+
+func WithMaxSignedPeerRecords(n int) AddrBookOption {
+ return func(b *memoryAddrBook) error {
+ b.maxSignedPeerRecords = n
+ return nil
+ }
+}
+
+// background periodically schedules a gc
+func (mab *memoryAddrBook) background(ctx context.Context) {
+ defer mab.refCount.Done()
+ ticker := time.NewTicker(1 * time.Minute)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ mab.gc()
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (mab *memoryAddrBook) Close() error {
+ mab.cancel()
+ mab.refCount.Wait()
+ return nil
+}
+
+// gc garbage collects the in-memory address book.
+func (mab *memoryAddrBook) gc() {
+ now := mab.clock.Now()
+ mab.mu.Lock()
+ defer mab.mu.Unlock()
+ for {
+ ea, ok := mab.addrs.PopIfExpired(now)
+ if !ok {
+ return
+ }
+ mab.maybeDeleteSignedPeerRecordUnlocked(ea.Peer)
+ }
+}
+
+func (mab *memoryAddrBook) PeersWithAddrs() peer.IDSlice {
+ mab.mu.RLock()
+ defer mab.mu.RUnlock()
+ peers := make(peer.IDSlice, 0, len(mab.addrs.Addrs))
+ for pid := range mab.addrs.Addrs {
+ peers = append(peers, pid)
+ }
+ return peers
+}
+
+// AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)
+func (mab *memoryAddrBook) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
+ mab.AddAddrs(p, []ma.Multiaddr{addr}, ttl)
+}
+
+// AddAddrs adds `addrs` for peer `p`, which will expire after the given `ttl`.
+// This function never reduces the TTL or expiration of an address.
+func (mab *memoryAddrBook) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
+ mab.addAddrs(p, addrs, ttl)
+}
+
+// ConsumePeerRecord adds addresses from a signed peer.PeerRecord, which will expire after the given TTL.
+// See https://godoc.org/github.com/libp2p/go-libp2p/core/peerstore#CertifiedAddrBook for more details.
+func (mab *memoryAddrBook) ConsumePeerRecord(recordEnvelope *record.Envelope, ttl time.Duration) (bool, error) {
+ r, err := recordEnvelope.Record()
+ if err != nil {
+ return false, err
+ }
+ rec, ok := r.(*peer.PeerRecord)
+ if !ok {
+ return false, fmt.Errorf("unable to process envelope: not a PeerRecord")
+ }
+ if !rec.PeerID.MatchesPublicKey(recordEnvelope.PublicKey) {
+ return false, fmt.Errorf("signing key does not match PeerID in PeerRecord")
+ }
+
+ mab.mu.Lock()
+ defer mab.mu.Unlock()
+
+ // ensure seq is greater than or equal to the last received
+ lastState, found := mab.signedPeerRecords[rec.PeerID]
+ if found && lastState.Seq > rec.Seq {
+ return false, nil
+ }
+ // check if we are over the max signed peer record limit
+ if !found && len(mab.signedPeerRecords) >= mab.maxSignedPeerRecords {
+ return false, errors.New("too many signed peer records")
+ }
+ mab.signedPeerRecords[rec.PeerID] = &peerRecordState{
+ Envelope: recordEnvelope,
+ Seq: rec.Seq,
+ }
+ mab.addAddrsUnlocked(rec.PeerID, rec.Addrs, ttl)
+ return true, nil
+}
+
+func (mab *memoryAddrBook) maybeDeleteSignedPeerRecordUnlocked(p peer.ID) {
+ if len(mab.addrs.Addrs[p]) == 0 {
+ delete(mab.signedPeerRecords, p)
+ }
+}
+
+func (mab *memoryAddrBook) addAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
+ mab.mu.Lock()
+ defer mab.mu.Unlock()
+
+ mab.addAddrsUnlocked(p, addrs, ttl)
+}
+
+func (mab *memoryAddrBook) addAddrsUnlocked(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
+ defer mab.maybeDeleteSignedPeerRecordUnlocked(p)
+
+ // if ttl is zero, exit. nothing to do.
+ if ttl <= 0 {
+ return
+ }
+
+ // we are over limit, drop these addrs.
+ if !ttlIsConnected(ttl) && mab.addrs.NumUnconnectedAddrs() >= mab.maxUnconnectedAddrs {
+ return
+ }
+
+ exp := mab.clock.Now().Add(ttl)
+ for _, addr := range addrs {
+ // Remove suffix of /p2p/peer-id from address
+ addr, addrPid := peer.SplitAddr(addr)
+ if addr == nil {
+ log.Warn("Was passed nil multiaddr", "peer", p)
+ continue
+ }
+ if addrPid != "" && addrPid != p {
+ log.Warn("Was passed p2p address with a different peerId", "found", addrPid, "expected", p)
+ continue
+ }
+ a, found := mab.addrs.FindAddr(p, addr)
+ if !found {
+ // not found, announce it.
+ entry := &expiringAddr{Addr: addr, Expiry: exp, TTL: ttl, Peer: p}
+ mab.addrs.Insert(entry)
+ mab.subManager.BroadcastAddr(p, addr)
+ } else {
+ // update ttl & exp to whichever is greater between new and existing entry
+ var changed bool
+ if ttl > a.TTL {
+ changed = true
+ a.TTL = ttl
+ }
+ if exp.After(a.Expiry) {
+ changed = true
+ a.Expiry = exp
+ }
+ if changed {
+ mab.addrs.Update(a)
+ }
+ }
+ }
+}
+
+// SetAddr calls mgr.SetAddrs(p, addr, ttl)
+func (mab *memoryAddrBook) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {
+ mab.SetAddrs(p, []ma.Multiaddr{addr}, ttl)
+}
+
+// SetAddrs sets the ttl on addresses. This clears any TTL there previously.
+// This is used when we receive the best estimate of the validity of an address.
+func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {
+ mab.mu.Lock()
+ defer mab.mu.Unlock()
+
+ defer mab.maybeDeleteSignedPeerRecordUnlocked(p)
+
+ exp := mab.clock.Now().Add(ttl)
+ for _, addr := range addrs {
+ addr, addrPid := peer.SplitAddr(addr)
+ if addr == nil {
+ log.Warn("was passed nil multiaddr", "peer", p)
+ continue
+ }
+ if addrPid != "" && addrPid != p {
+ log.Warn("was passed p2p address with a different peerId", "found", addrPid, "expected", p)
+ continue
+ }
+
+ if a, found := mab.addrs.FindAddr(p, addr); found {
+ if ttl > 0 {
+ if a.IsConnected() && !ttlIsConnected(ttl) && mab.addrs.NumUnconnectedAddrs() >= mab.maxUnconnectedAddrs {
+ mab.addrs.Delete(a)
+ } else {
+ a.Addr = addr
+ a.Expiry = exp
+ a.TTL = ttl
+ mab.addrs.Update(a)
+ mab.subManager.BroadcastAddr(p, addr)
+ }
+ } else {
+ mab.addrs.Delete(a)
+ }
+ } else {
+ if ttl > 0 {
+ if !ttlIsConnected(ttl) && mab.addrs.NumUnconnectedAddrs() >= mab.maxUnconnectedAddrs {
+ continue
+ }
+ entry := &expiringAddr{Addr: addr, Expiry: exp, TTL: ttl, Peer: p}
+ mab.addrs.Insert(entry)
+ mab.subManager.BroadcastAddr(p, addr)
+ }
+ }
+ }
+}
+
+// UpdateAddrs updates the addresses associated with the given peer that have
+// the given oldTTL to have the given newTTL.
+func (mab *memoryAddrBook) UpdateAddrs(p peer.ID, oldTTL time.Duration, newTTL time.Duration) {
+ mab.mu.Lock()
+ defer mab.mu.Unlock()
+
+ defer mab.maybeDeleteSignedPeerRecordUnlocked(p)
+
+ exp := mab.clock.Now().Add(newTTL)
+ for _, a := range mab.addrs.Addrs[p] {
+ if oldTTL == a.TTL {
+ if newTTL == 0 {
+ mab.addrs.Delete(a)
+ } else {
+ // We are over limit, drop these addresses.
+ if ttlIsConnected(oldTTL) && !ttlIsConnected(newTTL) && mab.addrs.NumUnconnectedAddrs() >= mab.maxUnconnectedAddrs {
+ mab.addrs.Delete(a)
+ } else {
+ a.TTL = newTTL
+ a.Expiry = exp
+ mab.addrs.Update(a)
+ }
+ }
+ }
+ }
+}
+
+// Addrs returns all known (and valid) addresses for a given peer
+func (mab *memoryAddrBook) Addrs(p peer.ID) []ma.Multiaddr {
+ mab.mu.RLock()
+ defer mab.mu.RUnlock()
+ if _, ok := mab.addrs.Addrs[p]; !ok {
+ return nil
+ }
+ return validAddrs(mab.clock.Now(), mab.addrs.Addrs[p])
+}
+
+func validAddrs(now time.Time, amap map[string]*expiringAddr) []ma.Multiaddr {
+ good := make([]ma.Multiaddr, 0, len(amap))
+ if amap == nil {
+ return good
+ }
+ for _, m := range amap {
+ if !m.ExpiredBy(now) {
+ good = append(good, m.Addr)
+ }
+ }
+ return good
+}
+
+// GetPeerRecord returns a Envelope containing a PeerRecord for the
+// given peer id, if one exists.
+// Returns nil if no signed PeerRecord exists for the peer.
+func (mab *memoryAddrBook) GetPeerRecord(p peer.ID) *record.Envelope {
+ mab.mu.RLock()
+ defer mab.mu.RUnlock()
+
+ if _, ok := mab.addrs.Addrs[p]; !ok {
+ return nil
+ }
+ // The record may have expired, but not gargage collected.
+ if len(validAddrs(mab.clock.Now(), mab.addrs.Addrs[p])) == 0 {
+ return nil
+ }
+
+ state := mab.signedPeerRecords[p]
+ if state == nil {
+ return nil
+ }
+ return state.Envelope
+}
+
+// ClearAddrs removes all previously stored addresses
+func (mab *memoryAddrBook) ClearAddrs(p peer.ID) {
+ mab.mu.Lock()
+ defer mab.mu.Unlock()
+
+ delete(mab.signedPeerRecords, p)
+ for _, a := range mab.addrs.Addrs[p] {
+ mab.addrs.Delete(a)
+ }
+}
+
+// AddrStream returns a channel on which all new addresses discovered for a
+// given peer ID will be published.
+func (mab *memoryAddrBook) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {
+ var initial []ma.Multiaddr
+
+ mab.mu.RLock()
+ if m, ok := mab.addrs.Addrs[p]; ok {
+ initial = make([]ma.Multiaddr, 0, len(m))
+ for _, a := range m {
+ initial = append(initial, a.Addr)
+ }
+ }
+ mab.mu.RUnlock()
+
+ return mab.subManager.AddrStream(ctx, p, initial)
+}
+
+type addrSub struct {
+ pubch chan ma.Multiaddr
+ ctx context.Context
+}
+
+func (s *addrSub) pubAddr(a ma.Multiaddr) {
+ select {
+ case s.pubch <- a:
+ case <-s.ctx.Done():
+ }
+}
+
+// An abstracted, pub-sub manager for address streams. Extracted from
+// memoryAddrBook in order to support additional implementations.
+type AddrSubManager struct {
+ mu sync.RWMutex
+ subs map[peer.ID][]*addrSub
+}
+
+// NewAddrSubManager initializes an AddrSubManager.
+func NewAddrSubManager() *AddrSubManager {
+ return &AddrSubManager{
+ subs: make(map[peer.ID][]*addrSub),
+ }
+}
+
+// Used internally by the address stream coroutine to remove a subscription
+// from the manager.
+func (mgr *AddrSubManager) removeSub(p peer.ID, s *addrSub) {
+ mgr.mu.Lock()
+ defer mgr.mu.Unlock()
+
+ subs := mgr.subs[p]
+ if len(subs) == 1 {
+ if subs[0] != s {
+ return
+ }
+ delete(mgr.subs, p)
+ return
+ }
+
+ for i, v := range subs {
+ if v == s {
+ subs[i] = subs[len(subs)-1]
+ subs[len(subs)-1] = nil
+ mgr.subs[p] = subs[:len(subs)-1]
+ return
+ }
+ }
+}
+
+// BroadcastAddr broadcasts a new address to all subscribed streams.
+func (mgr *AddrSubManager) BroadcastAddr(p peer.ID, addr ma.Multiaddr) {
+ mgr.mu.RLock()
+ defer mgr.mu.RUnlock()
+
+ if subs, ok := mgr.subs[p]; ok {
+ for _, sub := range subs {
+ sub.pubAddr(addr)
+ }
+ }
+}
+
+// AddrStream creates a new subscription for a given peer ID, pre-populating the
+// channel with any addresses we might already have on file.
+func (mgr *AddrSubManager) AddrStream(ctx context.Context, p peer.ID, initial []ma.Multiaddr) <-chan ma.Multiaddr {
+ sub := &addrSub{pubch: make(chan ma.Multiaddr), ctx: ctx}
+ out := make(chan ma.Multiaddr)
+
+ mgr.mu.Lock()
+ mgr.subs[p] = append(mgr.subs[p], sub)
+ mgr.mu.Unlock()
+
+ sort.Sort(addrList(initial))
+
+ go func(buffer []ma.Multiaddr) {
+ defer close(out)
+
+ sent := make(map[string]struct{}, len(buffer))
+ for _, a := range buffer {
+ sent[string(a.Bytes())] = struct{}{}
+ }
+
+ var outch chan ma.Multiaddr
+ var next ma.Multiaddr
+ if len(buffer) > 0 {
+ next = buffer[0]
+ buffer = buffer[1:]
+ outch = out
+ }
+
+ for {
+ select {
+ case outch <- next:
+ if len(buffer) > 0 {
+ next = buffer[0]
+ buffer = buffer[1:]
+ } else {
+ outch = nil
+ next = nil
+ }
+ case naddr := <-sub.pubch:
+ if _, ok := sent[string(naddr.Bytes())]; ok {
+ continue
+ }
+ sent[string(naddr.Bytes())] = struct{}{}
+
+ if next == nil {
+ next = naddr
+ outch = out
+ } else {
+ buffer = append(buffer, naddr)
+ }
+ case <-ctx.Done():
+ mgr.removeSub(p, sub)
+ return
+ }
+ }
+ }(initial)
+
+ return out
+}
diff --git a/p2p/host/peerstore/pstoremem/addr_book_test.go b/p2p/host/peerstore/pstoremem/addr_book_test.go
new file mode 100644
index 0000000000..e8ba89ff93
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/addr_book_test.go
@@ -0,0 +1,212 @@
+package pstoremem
+
+import (
+ "container/heap"
+ "fmt"
+ "math/rand"
+ "slices"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPeerAddrsNextExpiry(t *testing.T) {
+ paa := newPeerAddrs()
+ pa := &paa
+ a1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ a2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+
+ // t1 is before t2
+ t1 := time.Time{}.Add(1 * time.Second)
+ t2 := time.Time{}.Add(2 * time.Second)
+ paa.Insert(&expiringAddr{Addr: a1, Expiry: t1, TTL: 10 * time.Second, Peer: "p1"})
+ paa.Insert(&expiringAddr{Addr: a2, Expiry: t2, TTL: 10 * time.Second, Peer: "p2"})
+
+ if pa.NextExpiry() != t1 {
+ t.Fatal("expiry should be set to t1, got", pa.NextExpiry())
+ }
+}
+
+func peerAddrsInput(n int) []*expiringAddr {
+ expiringAddrs := make([]*expiringAddr, n)
+ for i := 0; i < n; i++ {
+ port := i % 65535
+ a := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/udp/%d/quic-v1", port))
+ e := time.Time{}.Add(time.Duration(i) * time.Second)
+ p := peer.ID(fmt.Sprintf("p%d", i))
+ expiringAddrs[i] = &expiringAddr{Addr: a, Expiry: e, TTL: 10 * time.Second, Peer: p}
+ }
+ return expiringAddrs
+}
+
+func TestPeerAddrsHeapProperty(t *testing.T) {
+ paa := newPeerAddrs()
+ pa := &paa
+
+ const N = 10000
+ expiringAddrs := peerAddrsInput(N)
+ for i := 0; i < N; i++ {
+ paa.Insert(expiringAddrs[i])
+ }
+
+ for i := 0; i < N; i++ {
+ ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry)
+ require.True(t, ok, "pos: %d", i)
+ require.Equal(t, ea.Addr, expiringAddrs[i].Addr)
+
+ ea, ok = pa.PopIfExpired(expiringAddrs[i].Expiry)
+ require.False(t, ok)
+ require.Nil(t, ea)
+ }
+}
+
+func TestPeerAddrsHeapPropertyDeletions(t *testing.T) {
+ paa := newPeerAddrs()
+ pa := &paa
+
+ const N = 10000
+ expiringAddrs := peerAddrsInput(N)
+ for i := 0; i < N; i++ {
+ paa.Insert(expiringAddrs[i])
+ }
+
+ // delete every 3rd element
+ for i := 0; i < N; i += 3 {
+ paa.Delete(expiringAddrs[i])
+ }
+
+ for i := 0; i < N; i++ {
+ ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry)
+ if i%3 == 0 {
+ require.False(t, ok)
+ require.Nil(t, ea)
+ } else {
+ require.True(t, ok)
+ require.Equal(t, ea.Addr, expiringAddrs[i].Addr)
+ }
+
+ ea, ok = pa.PopIfExpired(expiringAddrs[i].Expiry)
+ require.False(t, ok)
+ require.Nil(t, ea)
+ }
+}
+
+func TestPeerAddrsHeapPropertyUpdates(t *testing.T) {
+ paa := newPeerAddrs()
+ pa := &paa
+
+ const N = 10000
+ expiringAddrs := peerAddrsInput(N)
+ for i := 0; i < N; i++ {
+ heap.Push(pa, expiringAddrs[i])
+ }
+
+ // update every 3rd element to expire at the end
+ var endElements []ma.Multiaddr
+ for i := 0; i < N; i += 3 {
+ expiringAddrs[i].Expiry = time.Time{}.Add(1000_000 * time.Second)
+ pa.Update(expiringAddrs[i])
+ endElements = append(endElements, expiringAddrs[i].Addr)
+ }
+
+ for i := 0; i < N; i++ {
+ if i%3 == 0 {
+ continue // skip the elements at the end
+ }
+ ea, ok := pa.PopIfExpired(expiringAddrs[i].Expiry)
+ require.True(t, ok, "pos: %d", i)
+ require.Equal(t, ea.Addr, expiringAddrs[i].Addr)
+
+ ea, ok = pa.PopIfExpired(expiringAddrs[i].Expiry)
+ require.False(t, ok)
+ require.Nil(t, ea)
+ }
+
+ for len(endElements) > 0 {
+ ea, ok := pa.PopIfExpired(time.Time{}.Add(1000_000 * time.Second))
+ require.True(t, ok)
+ require.Contains(t, endElements, ea.Addr)
+ endElements = slices.DeleteFunc(endElements, func(a ma.Multiaddr) bool { return ea.Addr.Equal(a) })
+ }
+}
+
+// TestPeerAddrsExpiry tests for multiple element expiry with PopIfExpired.
+func TestPeerAddrsExpiry(t *testing.T) {
+ const T = 100_000
+ for x := 0; x < T; x++ {
+ paa := newPeerAddrs()
+ pa := &paa
+ // Try a lot of random inputs.
+ // T > 5*((5^5)*5) (=15k)
+ // So this should test for all possible 5 element inputs.
+ const N = 5
+ expiringAddrs := peerAddrsInput(N)
+ for i := 0; i < N; i++ {
+ expiringAddrs[i].Expiry = time.Time{}.Add(time.Duration(1+rand.Intn(N)) * time.Second)
+ }
+ for i := 0; i < N; i++ {
+ pa.Insert(expiringAddrs[i])
+ }
+
+ expiry := time.Time{}.Add(time.Duration(1+rand.Intn(N)) * time.Second)
+ expected := []ma.Multiaddr{}
+ for i := 0; i < N; i++ {
+ if !expiry.Before(expiringAddrs[i].Expiry) {
+ expected = append(expected, expiringAddrs[i].Addr)
+ }
+ }
+ got := []ma.Multiaddr{}
+ for {
+ ea, ok := pa.PopIfExpired(expiry)
+ if !ok {
+ break
+ }
+ got = append(got, ea.Addr)
+ }
+ expiries := []int{}
+ for i := 0; i < N; i++ {
+ expiries = append(expiries, expiringAddrs[i].Expiry.Second())
+ }
+ require.ElementsMatch(t, expected, got, "failed for input: element expiries: %v, expiry: %v", expiries, expiry.Second())
+ }
+}
+
+func TestPeerLimits(t *testing.T) {
+ ab := NewAddrBook()
+ defer ab.Close()
+ ab.maxUnconnectedAddrs = 1024
+
+ peers := peerAddrsInput(2048)
+ for _, p := range peers {
+ ab.AddAddr(p.Peer, p.Addr, p.TTL)
+ }
+ require.Equal(t, 1024, ab.addrs.NumUnconnectedAddrs())
+}
+
+func BenchmarkPeerAddrs(b *testing.B) {
+ sizes := [...]int{1, 10, 100, 1000, 10_000, 100_000, 1000_000}
+ for _, sz := range sizes {
+ b.Run(fmt.Sprintf("%d", sz), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ paa := newPeerAddrs()
+ pa := &paa
+ expiringAddrs := peerAddrsInput(sz)
+ for i := 0; i < sz; i++ {
+ pa.Insert(expiringAddrs[i])
+ }
+ b.StartTimer()
+ for {
+ _, ok := pa.PopIfExpired(expiringAddrs[len(expiringAddrs)-1].Expiry)
+ if !ok {
+ break
+ }
+ }
+ }
+ })
+ }
+
+}
diff --git a/p2p/host/peerstore/pstoremem/inmem_test.go b/p2p/host/peerstore/pstoremem/inmem_test.go
new file mode 100644
index 0000000000..064b7d9ae9
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/inmem_test.go
@@ -0,0 +1,114 @@
+package pstoremem
+
+import (
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ pt "github.com/libp2p/go-libp2p/p2p/host/peerstore/test"
+ "github.com/multiformats/go-multiaddr"
+
+ mockClock "github.com/benbjohnson/clock"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/goleak"
+)
+
+func TestInvalidOption(t *testing.T) {
+ _, err := NewPeerstore(1337)
+ require.EqualError(t, err, "unexpected peer store option: 1337")
+}
+
+func TestFuzzInMemoryPeerstore(t *testing.T) {
+ // Just create and close a bunch of peerstores. If this leaks, we'll
+ // catch it in the leak check below.
+ for i := 0; i < 100; i++ {
+ ps, err := NewPeerstore()
+ require.NoError(t, err)
+ ps.Close()
+ }
+}
+
+func TestInMemoryPeerstore(t *testing.T) {
+ pt.TestPeerstore(t, func() (pstore.Peerstore, func()) {
+ ps, err := NewPeerstore()
+ require.NoError(t, err)
+ return ps, func() { ps.Close() }
+ })
+}
+
+func TestPeerstoreProtoStoreLimits(t *testing.T) {
+ const limit = 10
+ ps, err := NewPeerstore(WithMaxProtocols(limit))
+ require.NoError(t, err)
+ defer ps.Close()
+ pt.TestPeerstoreProtoStoreLimits(t, ps, limit)
+}
+
+func TestInMemoryAddrBook(t *testing.T) {
+ clk := mockClock.NewMock()
+ pt.TestAddrBook(t, func() (pstore.AddrBook, func()) {
+ ps, err := NewPeerstore(WithClock(clk))
+ require.NoError(t, err)
+ return ps, func() { ps.Close() }
+ }, clk)
+}
+
+func TestInMemoryKeyBook(t *testing.T) {
+ pt.TestKeyBook(t, func() (pstore.KeyBook, func()) {
+ ps, err := NewPeerstore()
+ require.NoError(t, err)
+ return ps, func() { ps.Close() }
+ })
+}
+
+func BenchmarkInMemoryPeerstore(b *testing.B) {
+ pt.BenchmarkPeerstore(b, func() (pstore.Peerstore, func()) {
+ ps, err := NewPeerstore()
+ require.NoError(b, err)
+ return ps, func() { ps.Close() }
+ }, "InMem")
+}
+
+func BenchmarkInMemoryKeyBook(b *testing.B) {
+ pt.BenchmarkKeyBook(b, func() (pstore.KeyBook, func()) {
+ ps, err := NewPeerstore()
+ require.NoError(b, err)
+ return ps, func() { ps.Close() }
+ })
+}
+
+func TestMain(m *testing.M) {
+ goleak.VerifyTestMain(
+ m,
+ goleak.IgnoreTopFunction("github.com/libp2p/go-libp2p/gologshim/writer.(*MirrorWriter).logRoutine"),
+ goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"),
+ )
+}
+
+func BenchmarkGC(b *testing.B) {
+ clock := mockClock.NewMock()
+ ps, err := NewPeerstore(WithClock(clock))
+ require.NoError(b, err)
+ defer ps.Close()
+
+ peerCount := 100_000
+ addrsPerPeer := 32
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ for i := 0; i < peerCount; i++ {
+ id := peer.ID(strconv.Itoa(i))
+ addrs := make([]multiaddr.Multiaddr, addrsPerPeer)
+ for j := 0; j < addrsPerPeer; j++ {
+ addrs[j] = multiaddr.StringCast("/ip4/1.2.3.4/tcp/" + strconv.Itoa(j))
+ }
+ ps.AddAddrs(id, addrs, 24*time.Hour)
+ }
+ clock.Add(25 * time.Hour)
+ b.StartTimer()
+ ps.gc()
+ }
+}
diff --git a/p2p/host/peerstore/pstoremem/keybook.go b/p2p/host/peerstore/pstoremem/keybook.go
new file mode 100644
index 0000000000..f995a08797
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/keybook.go
@@ -0,0 +1,97 @@
+package pstoremem
+
+import (
+ "errors"
+ "sync"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+)
+
+type memoryKeyBook struct {
+ sync.RWMutex // same lock. wont happen a ton.
+ pks map[peer.ID]ic.PubKey
+ sks map[peer.ID]ic.PrivKey
+}
+
+var _ pstore.KeyBook = (*memoryKeyBook)(nil)
+
+func NewKeyBook() *memoryKeyBook {
+ return &memoryKeyBook{
+ pks: map[peer.ID]ic.PubKey{},
+ sks: map[peer.ID]ic.PrivKey{},
+ }
+}
+
+func (mkb *memoryKeyBook) PeersWithKeys() peer.IDSlice {
+ mkb.RLock()
+ ps := make(peer.IDSlice, 0, len(mkb.pks)+len(mkb.sks))
+ for p := range mkb.pks {
+ ps = append(ps, p)
+ }
+ for p := range mkb.sks {
+ if _, found := mkb.pks[p]; !found {
+ ps = append(ps, p)
+ }
+ }
+ mkb.RUnlock()
+ return ps
+}
+
+func (mkb *memoryKeyBook) PubKey(p peer.ID) ic.PubKey {
+ mkb.RLock()
+ pk := mkb.pks[p]
+ mkb.RUnlock()
+ if pk != nil {
+ return pk
+ }
+ pk, err := p.ExtractPublicKey()
+ if err == nil {
+ mkb.Lock()
+ mkb.pks[p] = pk
+ mkb.Unlock()
+ }
+ return pk
+}
+
+func (mkb *memoryKeyBook) AddPubKey(p peer.ID, pk ic.PubKey) error {
+ // check it's correct first
+ if !p.MatchesPublicKey(pk) {
+ return errors.New("ID does not match PublicKey")
+ }
+
+ mkb.Lock()
+ mkb.pks[p] = pk
+ mkb.Unlock()
+ return nil
+}
+
+func (mkb *memoryKeyBook) PrivKey(p peer.ID) ic.PrivKey {
+ mkb.RLock()
+ defer mkb.RUnlock()
+ return mkb.sks[p]
+}
+
+func (mkb *memoryKeyBook) AddPrivKey(p peer.ID, sk ic.PrivKey) error {
+ if sk == nil {
+ return errors.New("sk is nil (PrivKey)")
+ }
+
+ // check it's correct first
+ if !p.MatchesPrivateKey(sk) {
+ return errors.New("ID does not match PrivateKey")
+ }
+
+ mkb.Lock()
+ mkb.sks[p] = sk
+ mkb.Unlock()
+ return nil
+}
+
+func (mkb *memoryKeyBook) RemovePeer(p peer.ID) {
+ mkb.Lock()
+ delete(mkb.sks, p)
+ delete(mkb.pks, p)
+ mkb.Unlock()
+}
diff --git a/p2p/host/peerstore/pstoremem/metadata.go b/p2p/host/peerstore/pstoremem/metadata.go
new file mode 100644
index 0000000000..305c741719
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/metadata.go
@@ -0,0 +1,54 @@
+package pstoremem
+
+import (
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+)
+
+type memoryPeerMetadata struct {
+ // store other data, like versions
+ ds map[peer.ID]map[string]interface{}
+ dslock sync.RWMutex
+}
+
+var _ pstore.PeerMetadata = (*memoryPeerMetadata)(nil)
+
+func NewPeerMetadata() *memoryPeerMetadata {
+ return &memoryPeerMetadata{
+ ds: make(map[peer.ID]map[string]interface{}),
+ }
+}
+
+func (ps *memoryPeerMetadata) Put(p peer.ID, key string, val interface{}) error {
+ ps.dslock.Lock()
+ defer ps.dslock.Unlock()
+ m, ok := ps.ds[p]
+ if !ok {
+ m = make(map[string]interface{})
+ ps.ds[p] = m
+ }
+ m[key] = val
+ return nil
+}
+
+func (ps *memoryPeerMetadata) Get(p peer.ID, key string) (interface{}, error) {
+ ps.dslock.RLock()
+ defer ps.dslock.RUnlock()
+ m, ok := ps.ds[p]
+ if !ok {
+ return nil, pstore.ErrNotFound
+ }
+ val, ok := m[key]
+ if !ok {
+ return nil, pstore.ErrNotFound
+ }
+ return val, nil
+}
+
+func (ps *memoryPeerMetadata) RemovePeer(p peer.ID) {
+ ps.dslock.Lock()
+ delete(ps.ds, p)
+ ps.dslock.Unlock()
+}
diff --git a/p2p/host/peerstore/pstoremem/peerstore.go b/p2p/host/peerstore/pstoremem/peerstore.go
new file mode 100644
index 0000000000..15383f068e
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/peerstore.go
@@ -0,0 +1,112 @@
+package pstoremem
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ pstore "github.com/libp2p/go-libp2p/p2p/host/peerstore"
+)
+
+type pstoremem struct {
+ peerstore.Metrics
+
+ *memoryKeyBook
+ *memoryAddrBook
+ *memoryProtoBook
+ *memoryPeerMetadata
+}
+
+var _ peerstore.Peerstore = &pstoremem{}
+
+type Option interface{}
+
+// NewPeerstore creates an in-memory thread-safe collection of peers.
+// It's the caller's responsibility to call RemovePeer to ensure
+// that memory consumption of the peerstore doesn't grow unboundedly.
+func NewPeerstore(opts ...Option) (ps *pstoremem, err error) {
+ var protoBookOpts []ProtoBookOption
+ var addrBookOpts []AddrBookOption
+ for _, opt := range opts {
+ switch o := opt.(type) {
+ case ProtoBookOption:
+ protoBookOpts = append(protoBookOpts, o)
+ case AddrBookOption:
+ addrBookOpts = append(addrBookOpts, o)
+ default:
+ return nil, fmt.Errorf("unexpected peer store option: %v", o)
+ }
+ }
+ ab := NewAddrBook(addrBookOpts...)
+
+ pb, err := NewProtoBook(protoBookOpts...)
+ if err != nil {
+ ab.Close()
+ return nil, err
+ }
+
+ return &pstoremem{
+ Metrics: pstore.NewMetrics(),
+ memoryKeyBook: NewKeyBook(),
+ memoryAddrBook: ab,
+ memoryProtoBook: pb,
+ memoryPeerMetadata: NewPeerMetadata(),
+ }, nil
+}
+
+func (ps *pstoremem) Close() (err error) {
+ var errs []error
+ weakClose := func(name string, c interface{}) {
+ if cl, ok := c.(io.Closer); ok {
+ if err = cl.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("%s error: %s", name, err))
+ }
+ }
+ }
+ weakClose("keybook", ps.memoryKeyBook)
+ weakClose("addressbook", ps.memoryAddrBook)
+ weakClose("protobook", ps.memoryProtoBook)
+ weakClose("peermetadata", ps.memoryPeerMetadata)
+
+ if len(errs) > 0 {
+ return fmt.Errorf("failed while closing peerstore; err(s): %q", errs)
+ }
+ return nil
+}
+
+func (ps *pstoremem) Peers() peer.IDSlice {
+ set := map[peer.ID]struct{}{}
+ for _, p := range ps.PeersWithKeys() {
+ set[p] = struct{}{}
+ }
+ for _, p := range ps.PeersWithAddrs() {
+ set[p] = struct{}{}
+ }
+
+ pps := make(peer.IDSlice, 0, len(set))
+ for p := range set {
+ pps = append(pps, p)
+ }
+ return pps
+}
+
+func (ps *pstoremem) PeerInfo(p peer.ID) peer.AddrInfo {
+ return peer.AddrInfo{
+ ID: p,
+ Addrs: ps.memoryAddrBook.Addrs(p),
+ }
+}
+
+// RemovePeer removes entries associated with a peer from:
+// * the KeyBook
+// * the ProtoBook
+// * the PeerMetadata
+// * the Metrics
+// It DOES NOT remove the peer from the AddrBook.
+func (ps *pstoremem) RemovePeer(p peer.ID) {
+ ps.memoryKeyBook.RemovePeer(p)
+ ps.memoryProtoBook.RemovePeer(p)
+ ps.memoryPeerMetadata.RemovePeer(p)
+ ps.Metrics.RemovePeer(p)
+}
diff --git a/p2p/host/peerstore/pstoremem/peerstore_test.go b/p2p/host/peerstore/pstoremem/peerstore_test.go
new file mode 100644
index 0000000000..5a07e266da
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/peerstore_test.go
@@ -0,0 +1,23 @@
+package pstoremem
+
+import (
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPeerStoreAddrBookOpts(t *testing.T) {
+ ps, err := NewPeerstore(WithMaxAddresses(1))
+ require.NoError(t, err)
+ defer ps.Close()
+
+ ps.AddAddr("p1", ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"), peerstore.TempAddrTTL)
+ res := ps.Addrs("p1")
+ require.NotEmpty(t, res)
+
+ ps.AddAddr("p2", ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"), peerstore.TempAddrTTL)
+ res = ps.Addrs("p2")
+ require.Empty(t, res)
+}
diff --git a/p2p/host/peerstore/pstoremem/protobook.go b/p2p/host/peerstore/pstoremem/protobook.go
new file mode 100644
index 0000000000..b28ffe11be
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/protobook.go
@@ -0,0 +1,167 @@
+package pstoremem
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+type protoSegment struct {
+ sync.RWMutex
+ protocols map[peer.ID]map[protocol.ID]struct{}
+}
+
+type protoSegments [256]*protoSegment
+
+func (s *protoSegments) get(p peer.ID) *protoSegment {
+ return s[p[len(p)-1]]
+}
+
+var errTooManyProtocols = errors.New("too many protocols")
+
+type memoryProtoBook struct {
+ segments protoSegments
+
+ maxProtos int
+}
+
+var _ pstore.ProtoBook = (*memoryProtoBook)(nil)
+
+type ProtoBookOption func(book *memoryProtoBook) error
+
+func WithMaxProtocols(num int) ProtoBookOption {
+ return func(pb *memoryProtoBook) error {
+ pb.maxProtos = num
+ return nil
+ }
+}
+
+func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) {
+ pb := &memoryProtoBook{
+ segments: func() (ret protoSegments) {
+ for i := range ret {
+ ret[i] = &protoSegment{
+ protocols: make(map[peer.ID]map[protocol.ID]struct{}),
+ }
+ }
+ return ret
+ }(),
+ maxProtos: 128,
+ }
+
+ for _, opt := range opts {
+ if err := opt(pb); err != nil {
+ return nil, err
+ }
+ }
+ return pb, nil
+}
+
+func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error {
+ if len(protos) > pb.maxProtos {
+ return errTooManyProtocols
+ }
+
+ newprotos := make(map[protocol.ID]struct{}, len(protos))
+ for _, proto := range protos {
+ newprotos[proto] = struct{}{}
+ }
+
+ s := pb.segments.get(p)
+ s.Lock()
+ s.protocols[p] = newprotos
+ s.Unlock()
+
+ return nil
+}
+
+func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error {
+ s := pb.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ protomap, ok := s.protocols[p]
+ if !ok {
+ protomap = make(map[protocol.ID]struct{})
+ s.protocols[p] = protomap
+ }
+ if len(protomap)+len(protos) > pb.maxProtos {
+ return errTooManyProtocols
+ }
+
+ for _, proto := range protos {
+ protomap[proto] = struct{}{}
+ }
+ return nil
+}
+
+func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) {
+ s := pb.segments.get(p)
+ s.RLock()
+ defer s.RUnlock()
+
+ out := make([]protocol.ID, 0, len(s.protocols[p]))
+ for k := range s.protocols[p] {
+ out = append(out, k)
+ }
+
+ return out, nil
+}
+
+func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error {
+ s := pb.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ protomap, ok := s.protocols[p]
+ if !ok {
+ // nothing to remove.
+ return nil
+ }
+
+ for _, proto := range protos {
+ delete(protomap, proto)
+ }
+ if len(protomap) == 0 {
+ delete(s.protocols, p)
+ }
+ return nil
+}
+
+func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) {
+ s := pb.segments.get(p)
+ s.RLock()
+ defer s.RUnlock()
+
+ out := make([]protocol.ID, 0, len(protos))
+ for _, proto := range protos {
+ if _, ok := s.protocols[p][proto]; ok {
+ out = append(out, proto)
+ }
+ }
+
+ return out, nil
+}
+
+func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) {
+ s := pb.segments.get(p)
+ s.RLock()
+ defer s.RUnlock()
+
+ for _, proto := range protos {
+ if _, ok := s.protocols[p][proto]; ok {
+ return proto, nil
+ }
+ }
+ return "", nil
+}
+
+func (pb *memoryProtoBook) RemovePeer(p peer.ID) {
+ s := pb.segments.get(p)
+ s.Lock()
+ delete(s.protocols, p)
+ s.Unlock()
+}
diff --git a/p2p/host/peerstore/pstoremem/sorting.go b/p2p/host/peerstore/pstoremem/sorting.go
new file mode 100644
index 0000000000..f36ac41b58
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/sorting.go
@@ -0,0 +1,50 @@
+package pstoremem
+
+import (
+ "bytes"
+
+ ma "github.com/multiformats/go-multiaddr"
+ mafmt "github.com/multiformats/go-multiaddr-fmt"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+func isFDCostlyTransport(a ma.Multiaddr) bool {
+ return mafmt.TCP.Matches(a)
+}
+
+type addrList []ma.Multiaddr
+
+func (al addrList) Len() int { return len(al) }
+func (al addrList) Swap(i, j int) { al[i], al[j] = al[j], al[i] }
+
+func (al addrList) Less(i, j int) bool {
+ a := al[i]
+ b := al[j]
+
+ // dial localhost addresses next, they should fail immediately
+ lba := manet.IsIPLoopback(a)
+ lbb := manet.IsIPLoopback(b)
+ if lba && !lbb {
+ return true
+ }
+
+ // dial utp and similar 'non-fd-consuming' addresses first
+ fda := isFDCostlyTransport(a)
+ fdb := isFDCostlyTransport(b)
+ if !fda {
+ return fdb
+ }
+
+ // if 'b' doesnt take a file descriptor
+ if !fdb {
+ return false
+ }
+
+ // if 'b' is loopback and both take file descriptors
+ if lbb {
+ return false
+ }
+
+ // for the rest, just sort by bytes
+ return bytes.Compare(a.Bytes(), b.Bytes()) > 0
+}
diff --git a/p2p/host/peerstore/pstoremem/sorting_test.go b/p2p/host/peerstore/pstoremem/sorting_test.go
new file mode 100644
index 0000000000..82c76ef170
--- /dev/null
+++ b/p2p/host/peerstore/pstoremem/sorting_test.go
@@ -0,0 +1,20 @@
+package pstoremem
+
+import (
+ "sort"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddressSorting(t *testing.T) {
+ u1 := ma.StringCast("/ip4/152.12.23.53/udp/1234/utp")
+ u2l := ma.StringCast("/ip4/127.0.0.1/udp/1234/utp")
+ local := ma.StringCast("/ip4/127.0.0.1/tcp/1234")
+ norm := ma.StringCast("/ip4/6.5.4.3/tcp/1234")
+
+ l := addrList{local, u1, u2l, norm}
+ sort.Sort(l)
+ require.Equal(t, addrList{u2l, u1, local, norm}, l)
+}
diff --git a/p2p/host/peerstore/test/addr_book_suite.go b/p2p/host/peerstore/test/addr_book_suite.go
new file mode 100644
index 0000000000..85929ad77a
--- /dev/null
+++ b/p2p/host/peerstore/test/addr_book_suite.go
@@ -0,0 +1,530 @@
+package test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/core/test"
+
+ mockClock "github.com/benbjohnson/clock"
+ "github.com/multiformats/go-multiaddr"
+)
+
+var addressBookSuite = map[string]func(book pstore.AddrBook, clk *mockClock.Mock) func(*testing.T){
+ "AddAddress": testAddAddress,
+ "Clear": testClearWorks,
+ "SetNegativeTTLClears": testSetNegativeTTLClears,
+ "UpdateTTLs": testUpdateTTLs,
+ "NilAddrsDontBreak": testNilAddrsDontBreak,
+ "AddressesExpire": testAddressesExpire,
+ "ClearWithIter": testClearWithIterator,
+ "PeersWithAddresses": testPeersWithAddrs,
+ "CertifiedAddresses": testCertifiedAddresses,
+}
+
+type AddrBookFactory func() (pstore.AddrBook, func())
+
+func TestAddrBook(t *testing.T, factory AddrBookFactory, clk *mockClock.Mock) {
+ for name, test := range addressBookSuite {
+ // Create a new peerstore.
+ ab, closeFunc := factory()
+
+ // Run the test.
+ t.Run(name, test(ab, clk))
+
+ // Cleanup.
+ if closeFunc != nil {
+ closeFunc()
+ }
+ }
+}
+
+func testAddAddress(ab pstore.AddrBook, clk *mockClock.Mock) func(*testing.T) {
+ return func(t *testing.T) {
+ t.Run("add a single address", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(1)
+
+ ab.AddAddr(id, addrs[0], time.Hour)
+
+ AssertAddressesEqual(t, addrs, ab.Addrs(id))
+ })
+
+ t.Run("idempotent add single address", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(1)
+
+ ab.AddAddr(id, addrs[0], time.Hour)
+ ab.AddAddr(id, addrs[0], time.Hour)
+
+ AssertAddressesEqual(t, addrs, ab.Addrs(id))
+ })
+
+ t.Run("add multiple addresses", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(3)
+
+ ab.AddAddrs(id, addrs, time.Hour)
+ AssertAddressesEqual(t, addrs, ab.Addrs(id))
+ })
+
+ t.Run("idempotent add multiple addresses", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(3)
+
+ ab.AddAddrs(id, addrs, time.Hour)
+ ab.AddAddrs(id, addrs, time.Hour)
+
+ AssertAddressesEqual(t, addrs, ab.Addrs(id))
+ })
+
+ t.Run("adding an existing address with a later expiration extends its ttl", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(3)
+
+ ab.AddAddrs(id, addrs, time.Second)
+
+ // same address as before but with a higher TTL
+ ab.AddAddrs(id, addrs[2:], time.Hour)
+
+ // after the initial TTL has expired, check that only the third address is present.
+ clk.Add(1200 * time.Millisecond)
+ AssertAddressesEqual(t, addrs[2:], ab.Addrs(id))
+
+ // make sure we actually set the TTL
+ ab.UpdateAddrs(id, time.Hour, 0)
+ AssertAddressesEqual(t, nil, ab.Addrs(id))
+ })
+
+ t.Run("adding an existing address with an earlier expiration never reduces the expiration", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(3)
+
+ ab.AddAddrs(id, addrs, time.Hour)
+
+ // same address as before but with a lower TTL
+ ab.AddAddrs(id, addrs[2:], time.Second)
+
+ // after the initial TTL has expired, check that all three addresses are still present (i.e. the TTL on
+ // the modified one was not shortened).
+ clk.Add(2100 * time.Millisecond)
+ AssertAddressesEqual(t, addrs, ab.Addrs(id))
+ })
+
+ t.Run("adding an existing address with an earlier expiration never reduces the TTL", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(1)
+
+ ab.AddAddrs(id, addrs, 4*time.Second)
+ // 4 seconds left
+ clk.Add(2 * time.Second)
+ // 2 second left
+ ab.AddAddrs(id, addrs, 3*time.Second)
+ // 3 seconds left
+ clk.Add(1 * time.Second)
+ // 2 seconds left.
+
+ // We still have the address.
+ AssertAddressesEqual(t, addrs, ab.Addrs(id))
+
+ // The TTL wasn't reduced
+ ab.UpdateAddrs(id, 4*time.Second, 0)
+ AssertAddressesEqual(t, nil, ab.Addrs(id))
+ })
+
+ t.Run("accessing an empty peer ID", func(t *testing.T) {
+ addrs := GenerateAddrs(5)
+ ab.AddAddrs("", addrs, time.Hour)
+ AssertAddressesEqual(t, addrs, ab.Addrs(""))
+ })
+
+ t.Run("add a /p2p address with valid peerid", func(t *testing.T) {
+ peerId := GeneratePeerIDs(1)[0]
+ addr := GenerateAddrs(1)
+ p2pAddr := addr[0].Encapsulate(Multiaddr("/p2p/" + peerId.String()))
+ ab.AddAddr(peerId, p2pAddr, time.Hour)
+ AssertAddressesEqual(t, addr, ab.Addrs(peerId))
+ })
+
+ t.Run("add a /p2p address with invalid peerid", func(t *testing.T) {
+ pids := GeneratePeerIDs(2)
+ pid1 := pids[0]
+ pid2 := pids[1]
+ addr := GenerateAddrs(1)
+ p2pAddr := addr[0].Encapsulate(Multiaddr("/p2p/" + pid1.String()))
+ ab.AddAddr(pid2, p2pAddr, time.Hour)
+ AssertAddressesEqual(t, nil, ab.Addrs(pid2))
+ })
+ }
+}
+
+func testClearWorks(ab pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
+ return func(t *testing.T) {
+ ids := GeneratePeerIDs(2)
+ addrs := GenerateAddrs(5)
+
+ ab.AddAddrs(ids[0], addrs[0:3], time.Hour)
+ ab.AddAddrs(ids[1], addrs[3:], time.Hour)
+
+ AssertAddressesEqual(t, addrs[0:3], ab.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs[3:], ab.Addrs(ids[1]))
+
+ ab.ClearAddrs(ids[0])
+ AssertAddressesEqual(t, nil, ab.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs[3:], ab.Addrs(ids[1]))
+
+ ab.ClearAddrs(ids[1])
+ AssertAddressesEqual(t, nil, ab.Addrs(ids[0]))
+ AssertAddressesEqual(t, nil, ab.Addrs(ids[1]))
+ }
+}
+
+func testSetNegativeTTLClears(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
+ return func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(100)
+
+ m.SetAddrs(id, addrs, time.Hour)
+ AssertAddressesEqual(t, addrs, m.Addrs(id))
+
+ // remove two addresses.
+ m.SetAddr(id, addrs[50], -1)
+ m.SetAddr(id, addrs[75], -1)
+
+ // calculate the survivors
+ survivors := append(addrs[0:50], addrs[51:]...)
+ survivors = append(survivors[0:74], survivors[75:]...)
+
+ AssertAddressesEqual(t, survivors, m.Addrs(id))
+
+ // remove _all_ the addresses
+ m.SetAddrs(id, survivors, -1)
+ if len(m.Addrs(id)) != 0 {
+ t.Error("expected empty address list after clearing all addresses")
+ }
+
+ // add half, but try to remove more than we added
+ m.SetAddrs(id, addrs[:50], time.Hour)
+ m.SetAddrs(id, addrs, -1)
+ if len(m.Addrs(id)) != 0 {
+ t.Error("expected empty address list after clearing all addresses")
+ }
+
+ // try to remove the same addr multiple times
+ m.SetAddrs(id, addrs[:5], time.Hour)
+ repeated := make([]multiaddr.Multiaddr, 10)
+ for i := 0; i < len(repeated); i++ {
+ repeated[i] = addrs[0]
+ }
+ m.SetAddrs(id, repeated, -1)
+ if len(m.Addrs(id)) != 4 {
+ t.Errorf("expected 4 addrs after removing one, got %d", len(m.Addrs(id)))
+ }
+ }
+}
+
+func testUpdateTTLs(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
+ return func(t *testing.T) {
+ t.Run("update ttl of peer with no addrs", func(_ *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+
+ // Shouldn't panic.
+ m.UpdateAddrs(id, time.Hour, time.Minute)
+ })
+
+ t.Run("update to 0 clears addrs", func(t *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+ addrs := GenerateAddrs(1)
+
+ // Shouldn't panic.
+ m.SetAddrs(id, addrs, time.Hour)
+ m.UpdateAddrs(id, time.Hour, 0)
+ if len(m.Addrs(id)) != 0 {
+ t.Error("expected no addresses")
+ }
+ })
+
+ t.Run("update ttls successfully", func(t *testing.T) {
+ ids := GeneratePeerIDs(2)
+ addrs1, addrs2 := GenerateAddrs(2), GenerateAddrs(2)
+
+ // set two keys with different ttls for each peer.
+ m.SetAddr(ids[0], addrs1[0], time.Hour)
+ m.SetAddr(ids[0], addrs1[1], time.Minute)
+ m.SetAddr(ids[1], addrs2[0], time.Hour)
+ m.SetAddr(ids[1], addrs2[1], time.Minute)
+
+ // Sanity check.
+ AssertAddressesEqual(t, addrs1, m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ // Will only affect addrs1[0].
+ // Badger does not support subsecond TTLs.
+ // https://github.com/dgraph-io/badger/issues/339
+ m.UpdateAddrs(ids[0], time.Hour, 1*time.Second)
+
+ // No immediate effect.
+ AssertAddressesEqual(t, addrs1, m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ // After a wait, addrs[0] is gone.
+ clk.Add(2 * time.Second)
+ AssertAddressesEqual(t, addrs1[1:2], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ // Will only affect addrs2[0].
+ m.UpdateAddrs(ids[1], time.Hour, 1*time.Second)
+
+ // No immediate effect.
+ AssertAddressesEqual(t, addrs1[1:2], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ clk.Add(2 * time.Second)
+
+ // First addrs is gone in both.
+ AssertAddressesEqual(t, addrs1[1:], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2[1:], m.Addrs(ids[1]))
+ })
+
+ }
+}
+
+func testNilAddrsDontBreak(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
+ return func(_ *testing.T) {
+ id := GeneratePeerIDs(1)[0]
+
+ m.SetAddr(id, nil, time.Hour)
+ m.AddAddr(id, nil, time.Hour)
+ }
+}
+
+func testAddressesExpire(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
+ return func(t *testing.T) {
+ ids := GeneratePeerIDs(2)
+ addrs1 := GenerateAddrs(3)
+ addrs2 := GenerateAddrs(2)
+
+ m.AddAddrs(ids[0], addrs1, time.Hour)
+ m.AddAddrs(ids[1], addrs2, time.Hour)
+
+ AssertAddressesEqual(t, addrs1, m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ m.AddAddrs(ids[0], addrs1, 2*time.Hour)
+ m.AddAddrs(ids[1], addrs2, 2*time.Hour)
+
+ AssertAddressesEqual(t, addrs1, m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ m.SetAddr(ids[0], addrs1[0], 100*time.Microsecond)
+ clk.Add(100 * time.Millisecond)
+ AssertAddressesEqual(t, addrs1[1:3], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ m.SetAddr(ids[0], addrs1[2], 100*time.Microsecond)
+ clk.Add(100 * time.Millisecond)
+ AssertAddressesEqual(t, addrs1[1:2], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2, m.Addrs(ids[1]))
+
+ m.SetAddr(ids[1], addrs2[0], 100*time.Microsecond)
+ clk.Add(100 * time.Millisecond)
+ AssertAddressesEqual(t, addrs1[1:2], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, addrs2[1:], m.Addrs(ids[1]))
+
+ m.SetAddr(ids[1], addrs2[1], 100*time.Microsecond)
+ clk.Add(100 * time.Millisecond)
+ AssertAddressesEqual(t, addrs1[1:2], m.Addrs(ids[0]))
+ AssertAddressesEqual(t, nil, m.Addrs(ids[1]))
+
+ m.SetAddr(ids[0], addrs1[1], 100*time.Microsecond)
+ clk.Add(100 * time.Millisecond)
+ AssertAddressesEqual(t, nil, m.Addrs(ids[0]))
+ AssertAddressesEqual(t, nil, m.Addrs(ids[1]))
+ }
+}
+
+func testClearWithIterator(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
+ return func(t *testing.T) {
+ ids := GeneratePeerIDs(2)
+ addrs := GenerateAddrs(100)
+
+ // Add the peers with 50 addresses each.
+ m.AddAddrs(ids[0], addrs[:50], pstore.PermanentAddrTTL)
+ m.AddAddrs(ids[1], addrs[50:], pstore.PermanentAddrTTL)
+
+ if all := append(m.Addrs(ids[0]), m.Addrs(ids[1])...); len(all) != 100 {
+ t.Fatal("expected pstore to contain both peers with all their maddrs")
+ }
+
+ // Since we don't fetch these peers, they won't be present in cache.
+
+ m.ClearAddrs(ids[0])
+ if all := append(m.Addrs(ids[0]), m.Addrs(ids[1])...); len(all) != 50 {
+ t.Fatal("expected pstore to contain only addrs of peer 2")
+ }
+
+ m.ClearAddrs(ids[1])
+ if all := append(m.Addrs(ids[0]), m.Addrs(ids[1])...); len(all) != 0 {
+ t.Fatal("expected pstore to contain no addresses")
+ }
+ }
+}
+
+func testPeersWithAddrs(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
+ return func(t *testing.T) {
+ // cannot run in parallel as the store is modified.
+ // go runs sequentially in the specified order
+ // see https://blog.golang.org/subtests
+
+ t.Run("empty addrbook", func(t *testing.T) {
+ if peers := m.PeersWithAddrs(); len(peers) != 0 {
+ t.Fatal("expected to find no peers")
+ }
+ })
+
+ t.Run("non-empty addrbook", func(t *testing.T) {
+ ids := GeneratePeerIDs(2)
+ addrs := GenerateAddrs(10)
+
+ m.AddAddrs(ids[0], addrs[:5], pstore.PermanentAddrTTL)
+ m.AddAddrs(ids[1], addrs[5:], pstore.PermanentAddrTTL)
+
+ if peers := m.PeersWithAddrs(); len(peers) != 2 {
+ t.Fatal("expected to find 2 peers")
+ }
+ })
+ }
+}
+
+func testCertifiedAddresses(m pstore.AddrBook, clk *mockClock.Mock) func(*testing.T) {
+ return func(t *testing.T) {
+ cab := m.(pstore.CertifiedAddrBook)
+
+ priv, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ if err != nil {
+ t.Errorf("error generating testing keys: %v", err)
+ }
+
+ id, _ := peer.IDFromPrivateKey(priv)
+ allAddrs := GenerateAddrs(10)
+ certifiedAddrs := allAddrs[:5]
+ uncertifiedAddrs := allAddrs[5:]
+ rec1 := peer.NewPeerRecord()
+ rec1.PeerID = id
+ rec1.Addrs = certifiedAddrs
+ signedRec1, err := record.Seal(rec1, priv)
+ if err != nil {
+ t.Errorf("error creating signed routing record: %v", err)
+ }
+
+ rec2 := peer.NewPeerRecord()
+ rec2.PeerID = id
+ rec2.Addrs = certifiedAddrs
+ signedRec2, err := record.Seal(rec2, priv)
+ if err != nil {
+ t.Errorf("error creating signed routing record: %v", err)
+ }
+
+ // add a few non-certified addrs
+ m.AddAddrs(id, uncertifiedAddrs, time.Hour)
+
+ // make sure they're present
+ AssertAddressesEqual(t, uncertifiedAddrs, m.Addrs(id))
+
+ // add the signed record to addr book
+ accepted, err := cab.ConsumePeerRecord(signedRec2, time.Hour)
+ if err != nil {
+ t.Errorf("error adding signed routing record to addrbook: %v", err)
+ }
+ if !accepted {
+ t.Errorf("should have accepted signed peer record")
+ }
+
+ // the non-certified addrs should be gone & we should get only certified addrs back from Addrs
+ // AssertAddressesEqual(t, certifiedAddrs, m.Addrs(id))
+ AssertAddressesEqual(t, allAddrs, m.Addrs(id))
+
+ // PeersWithAddrs should return a single peer
+ if len(m.PeersWithAddrs()) != 1 {
+ t.Errorf("expected PeersWithAddrs to return 1, got %d", len(m.PeersWithAddrs()))
+ }
+
+ // Adding an old record should fail
+ accepted, err = cab.ConsumePeerRecord(signedRec1, time.Hour)
+ if accepted {
+ t.Error("We should have failed to accept a record with an old sequence number")
+ }
+ if err != nil {
+ t.Errorf("expected no error, got: %s", err)
+ }
+
+ // once certified addrs exist, trying to add non-certified addrs should have no effect
+ // m.AddAddrs(id, uncertifiedAddrs, time.Hour)
+ // AssertAddressesEqual(t, certifiedAddrs, m.Addrs(id))
+ // XXX: Disabled until signed records are required
+ m.AddAddrs(id, uncertifiedAddrs, time.Hour)
+ AssertAddressesEqual(t, allAddrs, m.Addrs(id))
+
+ // we should be able to retrieve the signed peer record
+ rec3 := cab.GetPeerRecord(id)
+ if rec3 == nil || !signedRec2.Equal(rec3) {
+ t.Error("unable to retrieve signed routing record from addrbook")
+ }
+
+ // Adding a new envelope should clear existing certified addresses.
+ // Only the newly-added ones should remain
+ certifiedAddrs = certifiedAddrs[:3]
+ rec4 := peer.NewPeerRecord()
+ rec4.PeerID = id
+ rec4.Addrs = certifiedAddrs
+ signedRec4, err := record.Seal(rec4, priv)
+ test.AssertNilError(t, err)
+ accepted, err = cab.ConsumePeerRecord(signedRec4, time.Hour)
+ test.AssertNilError(t, err)
+ if !accepted {
+ t.Error("expected peer record to be accepted")
+ }
+ // AssertAddressesEqual(t, certifiedAddrs, m.Addrs(id))
+ AssertAddressesEqual(t, allAddrs, m.Addrs(id))
+
+ // update TTL on signed addrs to -1 to remove them.
+ // the signed routing record should be deleted
+ // m.SetAddrs(id, certifiedAddrs, -1)
+ // XXX: Disabled until signed records are required
+ m.SetAddrs(id, allAddrs, -1)
+ if len(m.Addrs(id)) != 0 {
+ t.Error("expected zero certified addrs after setting TTL to -1")
+ }
+ if cab.GetPeerRecord(id) != nil {
+ t.Error("expected signed peer record to be removed when addresses expire")
+ }
+
+ // Test that natural TTL expiration clears signed peer records
+ accepted, err = cab.ConsumePeerRecord(signedRec4, time.Second)
+ if !accepted {
+ t.Error("expected peer record to be accepted")
+ }
+ test.AssertNilError(t, err)
+ AssertAddressesEqual(t, certifiedAddrs, m.Addrs(id))
+
+ clk.Add(2 * time.Second)
+ if cab.GetPeerRecord(id) != nil {
+ t.Error("expected signed peer record to be removed when addresses expire")
+ }
+
+ // adding a peer record that's signed with the wrong key should fail
+ priv2, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ test.AssertNilError(t, err)
+ env, err := record.Seal(rec4, priv2)
+ test.AssertNilError(t, err)
+
+ accepted, err = cab.ConsumePeerRecord(env, time.Second)
+ if accepted || err == nil {
+ t.Error("expected adding a PeerRecord that's signed with the wrong key to fail")
+ }
+ }
+}
diff --git a/p2p/host/peerstore/test/benchmarks_suite.go b/p2p/host/peerstore/test/benchmarks_suite.go
new file mode 100644
index 0000000000..63446c16ee
--- /dev/null
+++ b/p2p/host/peerstore/test/benchmarks_suite.go
@@ -0,0 +1,79 @@
+package test
+
+import (
+ "fmt"
+ "testing"
+
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+)
+
+func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, _ string) {
+ for _, sz := range []int{1, 10, 100} {
+ const N = 10000
+ peers := getPeerPairs(b, N, sz)
+
+ b.Run(fmt.Sprintf("AddAddrs-%d", sz), func(b *testing.B) {
+ ps, cleanup := factory()
+ defer cleanup()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pp := peers[i%N]
+ ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
+ }
+ })
+
+ b.Run(fmt.Sprintf("GetAddrs-%d", sz), func(b *testing.B) {
+ ps, cleanup := factory()
+ defer cleanup()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pp := peers[i%N]
+ ps.SetAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
+ }
+ })
+
+ b.Run(fmt.Sprintf("GetAndClearAddrs-%d", sz), func(b *testing.B) {
+ ps, cleanup := factory()
+ defer cleanup()
+ b.ResetTimer()
+ itersPerBM := 10
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < itersPerBM; j++ {
+ pp := peers[(i+j)%N]
+ ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
+ }
+ for j := 0; j < itersPerBM; j++ {
+ pp := peers[(i+j)%N]
+ ps.Addrs(pp.ID)
+ }
+ for j := 0; j < itersPerBM; j++ {
+ pp := peers[(i+j)%N]
+ ps.ClearAddrs(pp.ID)
+ }
+ }
+ })
+
+ b.Run(fmt.Sprintf("PeersWithAddrs-%d", sz), func(b *testing.B) {
+ ps, cleanup := factory()
+ defer cleanup()
+ for _, pp := range peers {
+ ps.AddAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = ps.PeersWithAddrs()
+ }
+ })
+
+ b.Run(fmt.Sprintf("SetAddrs-%d", sz), func(b *testing.B) {
+ ps, cleanup := factory()
+ defer cleanup()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pp := peers[i%N]
+ ps.SetAddrs(pp.ID, pp.Addr, pstore.RecentlyConnectedAddrTTL)
+ }
+ })
+ }
+}
diff --git a/p2p/host/peerstore/test/keybook_suite.go b/p2p/host/peerstore/test/keybook_suite.go
new file mode 100644
index 0000000000..3e559753bc
--- /dev/null
+++ b/p2p/host/peerstore/test/keybook_suite.go
@@ -0,0 +1,337 @@
+package test
+
+import (
+ "sort"
+ "testing"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ pt "github.com/libp2p/go-libp2p/core/test"
+
+ "github.com/stretchr/testify/require"
+)
+
+var keyBookSuite = map[string]func(kb pstore.KeyBook) func(*testing.T){
+ "AddGetPrivKey": testKeybookPrivKey,
+ "AddGetPubKey": testKeyBookPubKey,
+ "PeersWithKeys": testKeyBookPeers,
+ "PubKeyAddedOnRetrieve": testInlinedPubKeyAddedOnRetrieve,
+ "Delete": testKeyBookDelete,
+}
+
+type KeyBookFactory func() (pstore.KeyBook, func())
+
+func TestKeyBook(t *testing.T, factory KeyBookFactory) {
+ for name, test := range keyBookSuite {
+ // Create a new peerstore.
+ kb, closeFunc := factory()
+
+ // Run the test.
+ t.Run(name, test(kb))
+
+ // Cleanup.
+ if closeFunc != nil {
+ closeFunc()
+ }
+ }
+}
+
+func testKeybookPrivKey(kb pstore.KeyBook) func(t *testing.T) {
+ return func(t *testing.T) {
+ if peers := kb.PeersWithKeys(); len(peers) > 0 {
+ t.Error("expected peers to be empty on init")
+ }
+
+ priv, _, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if res := kb.PrivKey(id); res != nil {
+ t.Error("retrieving private key should have failed")
+ }
+
+ err = kb.AddPrivKey(id, priv)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if res := kb.PrivKey(id); !priv.Equals(res) {
+ t.Error("retrieved private key did not match stored private key")
+ }
+
+ if peers := kb.PeersWithKeys(); len(peers) != 1 || peers[0] != id {
+ t.Error("list of peers did not include test peer")
+ }
+ }
+}
+
+func testKeyBookPubKey(kb pstore.KeyBook) func(t *testing.T) {
+ return func(t *testing.T) {
+ if peers := kb.PeersWithKeys(); len(peers) > 0 {
+ t.Error("expected peers to be empty on init")
+ }
+
+ _, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res := kb.PubKey(id); res != nil {
+ t.Error("retrieving public key should have failed")
+ }
+
+ err = kb.AddPubKey(id, pub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if res := kb.PubKey(id); !pub.Equals(res) {
+ t.Error("retrieved public key did not match stored public key")
+ }
+
+ if peers := kb.PeersWithKeys(); len(peers) != 1 || peers[0] != id {
+ t.Error("list of peers did not include test peer")
+ }
+ }
+}
+
+func testKeyBookPeers(kb pstore.KeyBook) func(t *testing.T) {
+ return func(t *testing.T) {
+ if peers := kb.PeersWithKeys(); len(peers) > 0 {
+ t.Error("expected peers to be empty on init")
+ }
+
+ var peers peer.IDSlice
+ for i := 0; i < 10; i++ {
+ // Add a public key.
+ _, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p1, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ kb.AddPubKey(p1, pub)
+
+ // Add a private key.
+ priv, _, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p2, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ kb.AddPrivKey(p2, priv)
+
+ peers = append(peers, []peer.ID{p1, p2}...)
+ }
+
+ kbPeers := kb.PeersWithKeys()
+ sort.Sort(kbPeers)
+ sort.Sort(peers)
+
+ for i, p := range kbPeers {
+ if p != peers[i] {
+ t.Errorf("mismatch of peer at index %d", i)
+ }
+ }
+ }
+}
+
+func testInlinedPubKeyAddedOnRetrieve(kb pstore.KeyBook) func(t *testing.T) {
+ return func(t *testing.T) {
+ t.Skip("key inlining disabled for now: see libp2p/specs#111")
+
+ if peers := kb.PeersWithKeys(); len(peers) > 0 {
+ t.Error("expected peers to be empty on init")
+ }
+
+ // Key small enough for inlining.
+ _, pub, err := ic.GenerateKeyPair(ic.Ed25519, 256)
+ if err != nil {
+ t.Error(err)
+ }
+
+ id, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pubKey := kb.PubKey(id)
+ if !pubKey.Equals(pub) {
+ t.Error("mismatch between original public key and keybook-calculated one")
+ }
+ }
+}
+
+func testKeyBookDelete(kb pstore.KeyBook) func(t *testing.T) {
+ return func(t *testing.T) {
+ // don't use an ed25519 key here, otherwise the key book might try to derive the pubkey from the peer ID
+ priv, pub, err := ic.GenerateKeyPair(ic.RSA, 2048)
+ require.NoError(t, err)
+ p, err := peer.IDFromPublicKey(pub)
+ require.NoError(t, err)
+ require.NoError(t, kb.AddPubKey(p, pub))
+ require.NoError(t, kb.AddPrivKey(p, priv))
+ require.NotNil(t, kb.PrivKey(p))
+ require.NotNil(t, kb.PubKey(p))
+ kb.RemovePeer(p)
+ require.Nil(t, kb.PrivKey(p))
+ require.Nil(t, kb.PubKey(p))
+ }
+}
+
+var keybookBenchmarkSuite = map[string]func(kb pstore.KeyBook) func(*testing.B){
+ "PubKey": benchmarkPubKey,
+ "AddPubKey": benchmarkAddPubKey,
+ "PrivKey": benchmarkPrivKey,
+ "AddPrivKey": benchmarkAddPrivKey,
+ "PeersWithKeys": benchmarkPeersWithKeys,
+}
+
+func BenchmarkKeyBook(b *testing.B, factory KeyBookFactory) {
+ ordernames := make([]string, 0, len(keybookBenchmarkSuite))
+ for name := range keybookBenchmarkSuite {
+ ordernames = append(ordernames, name)
+ }
+ sort.Strings(ordernames)
+ for _, name := range ordernames {
+ bench := keybookBenchmarkSuite[name]
+ kb, closeFunc := factory()
+
+ b.Run(name, bench(kb))
+
+ if closeFunc != nil {
+ closeFunc()
+ }
+ }
+}
+
+func benchmarkPubKey(kb pstore.KeyBook) func(*testing.B) {
+ return func(b *testing.B) {
+ _, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ id, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = kb.AddPubKey(id, pub)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ kb.PubKey(id)
+ }
+ }
+}
+
+func benchmarkAddPubKey(kb pstore.KeyBook) func(*testing.B) {
+ return func(b *testing.B) {
+ _, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ id, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ kb.AddPubKey(id, pub)
+ }
+ }
+}
+
+func benchmarkPrivKey(kb pstore.KeyBook) func(*testing.B) {
+ return func(b *testing.B) {
+ priv, _, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = kb.AddPrivKey(id, priv)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ kb.PrivKey(id)
+ }
+ }
+}
+
+func benchmarkAddPrivKey(kb pstore.KeyBook) func(*testing.B) {
+ return func(b *testing.B) {
+ priv, _, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ kb.AddPrivKey(id, priv)
+ }
+ }
+}
+
+func benchmarkPeersWithKeys(kb pstore.KeyBook) func(*testing.B) {
+ return func(b *testing.B) {
+ for i := 0; i < 10; i++ {
+ priv, pub, err := pt.RandTestKeyPair(ic.RSA, 2048)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ id, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ err = kb.AddPubKey(id, pub)
+ if err != nil {
+ b.Fatal(err)
+ }
+ err = kb.AddPrivKey(id, priv)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ kb.PeersWithKeys()
+ }
+ }
+}
diff --git a/p2p/host/peerstore/test/peerstore_suite.go b/p2p/host/peerstore/test/peerstore_suite.go
new file mode 100644
index 0000000000..369b459d0f
--- /dev/null
+++ b/p2p/host/peerstore/test/peerstore_suite.go
@@ -0,0 +1,407 @@
+package test
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "reflect"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ pstore "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+var peerstoreSuite = map[string]func(pstore.Peerstore) func(*testing.T){
+ "AddrStream": testAddrStream,
+ "GetStreamBeforePeerAdded": testGetStreamBeforePeerAdded,
+ "AddStreamDuplicates": testAddrStreamDuplicates,
+ "PeerstoreProtoStore": testPeerstoreProtoStore,
+ "BasicPeerstore": testBasicPeerstore,
+ "Metadata": testMetadata,
+ "CertifiedAddrBook": testCertifiedAddrBook,
+}
+
+type PeerstoreFactory func() (pstore.Peerstore, func())
+
+func TestPeerstore(t *testing.T, factory PeerstoreFactory) {
+ for name, test := range peerstoreSuite {
+ // Create a new peerstore.
+ ps, closeFunc := factory()
+
+ // Run the test.
+ t.Run(name, test(ps))
+
+ // Cleanup.
+ if closeFunc != nil {
+ closeFunc()
+ }
+ }
+}
+
+func sortProtos(protos []protocol.ID) {
+ sort.Slice(protos, func(i, j int) bool { return protos[i] < protos[j] })
+}
+
+func testAddrStream(ps pstore.Peerstore) func(t *testing.T) {
+ return func(t *testing.T) {
+ addrs, pid := getAddrs(t, 100), peer.ID("testpeer")
+ ps.AddAddrs(pid, addrs[:10], time.Hour)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ addrch := ps.AddrStream(ctx, pid)
+
+ // while that subscription is active, publish ten more addrs
+ // this tests that it doesnt hang
+ for i := 10; i < 20; i++ {
+ ps.AddAddr(pid, addrs[i], time.Hour)
+ }
+
+ // now receive them (without hanging)
+ timeout := time.After(time.Second * 10)
+ for i := 0; i < 20; i++ {
+ select {
+ case <-addrch:
+ case <-timeout:
+ t.Fatal("timed out")
+ }
+ }
+
+ // start a second stream
+ ctx2, cancel2 := context.WithCancel(context.Background())
+ addrch2 := ps.AddrStream(ctx2, pid)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ // now send the rest of the addresses
+ for _, a := range addrs[20:80] {
+ ps.AddAddr(pid, a, time.Hour)
+ }
+ }()
+
+ // receive some concurrently with the goroutine
+ timeout = time.After(time.Second * 10)
+ for i := 0; i < 40; i++ {
+ select {
+ case <-addrch:
+ case <-timeout:
+ }
+ }
+
+ <-done
+
+ // receive some more after waiting for that goroutine to complete
+ timeout = time.After(time.Second * 10)
+ for i := 0; i < 20; i++ {
+ select {
+ case <-addrch:
+ case <-timeout:
+ }
+ }
+
+ // now cancel it
+ cancel()
+
+ // now check the *second* subscription. We should see 80 addresses.
+ for i := 0; i < 80; i++ {
+ <-addrch2
+ }
+
+ cancel2()
+
+ // and add a few more addresses it doesnt hang afterwards
+ for _, a := range addrs[80:] {
+ ps.AddAddr(pid, a, time.Hour)
+ }
+ }
+}
+
+func testGetStreamBeforePeerAdded(ps pstore.Peerstore) func(t *testing.T) {
+ return func(t *testing.T) {
+ addrs, pid := getAddrs(t, 10), peer.ID("testpeer")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ ach := ps.AddrStream(ctx, pid)
+ for i := 0; i < 10; i++ {
+ ps.AddAddr(pid, addrs[i], time.Hour)
+ }
+
+ received := make(map[string]bool)
+ var count int
+
+ for i := 0; i < 10; i++ {
+ a, ok := <-ach
+ if !ok {
+ t.Fatal("channel shouldnt be closed yet")
+ }
+ if a == nil {
+ t.Fatal("got a nil address, that's weird")
+ }
+ count++
+ if received[a.String()] {
+ t.Fatal("received duplicate address")
+ }
+ received[a.String()] = true
+ }
+
+ select {
+ case <-ach:
+ t.Fatal("shouldnt have received any more addresses")
+ default:
+ }
+
+ if count != 10 {
+ t.Fatal("should have received exactly ten addresses, got ", count)
+ }
+
+ for _, a := range addrs {
+ if !received[a.String()] {
+ t.Log(received)
+ t.Fatalf("expected to receive address %s but didnt", a)
+ }
+ }
+ }
+}
+
+func testAddrStreamDuplicates(ps pstore.Peerstore) func(t *testing.T) {
+ return func(t *testing.T) {
+ addrs, pid := getAddrs(t, 10), peer.ID("testpeer")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ ach := ps.AddrStream(ctx, pid)
+ go func() {
+ for i := 0; i < 10; i++ {
+ ps.AddAddr(pid, addrs[i], time.Hour)
+ ps.AddAddr(pid, addrs[rand.Intn(10)], time.Hour)
+ }
+
+ // make sure that all addresses get processed before context is cancelled
+ time.Sleep(time.Millisecond * 50)
+ cancel()
+ }()
+
+ received := make(map[string]bool)
+ var count int
+ for a := range ach {
+ if a == nil {
+ t.Fatal("got a nil address, that's weird")
+ }
+ count++
+ if received[a.String()] {
+ t.Fatal("received duplicate address")
+ }
+ received[a.String()] = true
+ }
+
+ if count != 10 {
+ t.Fatal("should have received exactly ten addresses")
+ }
+ }
+}
+
+func testPeerstoreProtoStore(ps pstore.Peerstore) func(t *testing.T) {
+ return func(t *testing.T) {
+ t.Run("adding and removing protocols", func(t *testing.T) {
+ p1 := peer.ID("TESTPEER")
+ protos := []protocol.ID{"a", "b", "c", "d"}
+
+ require.NoError(t, ps.AddProtocols(p1, protos...))
+ out, err := ps.GetProtocols(p1)
+ require.NoError(t, err)
+ require.Len(t, out, len(protos), "got wrong number of protocols back")
+
+ sortProtos(out)
+ for i, p := range protos {
+ if out[i] != p {
+ t.Fatal("got wrong protocol")
+ }
+ }
+
+ supported, err := ps.SupportsProtocols(p1, "q", "w", "a", "y", "b")
+ require.NoError(t, err)
+ require.Len(t, supported, 2, "only expected 2 supported")
+
+ if supported[0] != "a" || supported[1] != "b" {
+ t.Fatal("got wrong supported array: ", supported)
+ }
+
+ b, err := ps.FirstSupportedProtocol(p1, "q", "w", "a", "y", "b")
+ require.NoError(t, err)
+ require.Equal(t, protocol.ID("a"), b)
+
+ b, err = ps.FirstSupportedProtocol(p1, "q", "x", "z")
+ require.NoError(t, err)
+ require.Empty(t, b)
+
+ b, err = ps.FirstSupportedProtocol(p1, "a")
+ require.NoError(t, err)
+ require.Equal(t, protocol.ID("a"), b)
+
+ protos = []protocol.ID{"other", "yet another", "one more"}
+ require.NoError(t, ps.SetProtocols(p1, protos...))
+
+ supported, err = ps.SupportsProtocols(p1, "q", "w", "a", "y", "b")
+ require.NoError(t, err)
+ require.Empty(t, supported, "none of those protocols should have been supported")
+
+ supported, err = ps.GetProtocols(p1)
+ require.NoError(t, err)
+
+ sortProtos(supported)
+ sortProtos(protos)
+ if !reflect.DeepEqual(supported, protos) {
+ t.Fatalf("expected previously set protos; expected: %v, have: %v", protos, supported)
+ }
+
+ require.NoError(t, ps.RemoveProtocols(p1, protos[:2]...))
+
+ supported, err = ps.GetProtocols(p1)
+ require.NoError(t, err)
+ if !reflect.DeepEqual(supported, protos[2:]) {
+ t.Fatal("expected only one protocol to remain")
+ }
+ })
+
+ t.Run("removing peer", func(t *testing.T) {
+ p := peer.ID("foobar")
+ protos := []protocol.ID{"a", "b"}
+
+ require.NoError(t, ps.SetProtocols(p, protos...))
+ out, err := ps.GetProtocols(p)
+ require.NoError(t, err)
+ require.Len(t, out, 2)
+ ps.RemovePeer(p)
+ out, err = ps.GetProtocols(p)
+ require.NoError(t, err)
+ require.Empty(t, out)
+ })
+ }
+}
+
+func testBasicPeerstore(ps pstore.Peerstore) func(t *testing.T) {
+ return func(t *testing.T) {
+ var pids []peer.ID
+ addrs := getAddrs(t, 10)
+
+ for _, a := range addrs {
+ priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ p, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pids = append(pids, p)
+ ps.AddAddr(p, a, pstore.PermanentAddrTTL)
+ }
+
+ peers := ps.Peers()
+ if len(peers) != 10 {
+ t.Fatal("expected ten peers, got", len(peers))
+ }
+
+ pinfo := ps.PeerInfo(pids[0])
+ if !pinfo.Addrs[0].Equal(addrs[0]) {
+ t.Fatal("stored wrong address")
+ }
+ }
+}
+
+func testMetadata(ps pstore.Peerstore) func(t *testing.T) {
+ return func(t *testing.T) {
+ t.Run("putting and getting", func(t *testing.T) {
+ pids := make([]peer.ID, 3)
+ for i := range pids {
+ priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
+ require.NoError(t, err)
+ p, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ pids[i] = p
+ }
+ for _, p := range pids {
+ require.NoError(t, ps.Put(p, "AgentVersion", "string"), "failed to put AgentVersion")
+ require.NoError(t, ps.Put(p, "bar", 1), "failed to put bar")
+ }
+ for _, p := range pids {
+ v, err := ps.Get(p, "AgentVersion")
+ require.NoError(t, err)
+ require.Equal(t, "string", v)
+
+ v, err = ps.Get(p, "bar")
+ require.NoError(t, err)
+ require.Equal(t, 1, v)
+ }
+ })
+
+ t.Run("removing a peer", func(t *testing.T) {
+ p := peer.ID("foo")
+ otherP := peer.ID("foobar")
+ require.NoError(t, ps.Put(otherP, "AgentVersion", "v1"))
+ require.NoError(t, ps.Put(p, "AgentVersion", "v1"))
+ require.NoError(t, ps.Put(p, "bar", 1))
+ ps.RemovePeer(p)
+ _, err := ps.Get(p, "AgentVersion")
+ require.ErrorIs(t, err, pstore.ErrNotFound)
+ _, err = ps.Get(p, "bar")
+ require.ErrorIs(t, err, pstore.ErrNotFound)
+ // make sure that entries for otherP were not deleted
+ val, err := ps.Get(otherP, "AgentVersion")
+ require.NoError(t, err)
+ require.Equal(t, "v1", val)
+ })
+ }
+}
+
+func testCertifiedAddrBook(ps pstore.Peerstore) func(*testing.T) {
+ return func(t *testing.T) {
+ _, ok := ps.(pstore.CertifiedAddrBook)
+ if !ok {
+ t.Error("expected peerstore to implement CertifiedAddrBook interface")
+ }
+ }
+}
+
+func getAddrs(t *testing.T, n int) []ma.Multiaddr {
+ var addrs []ma.Multiaddr
+ for i := 0; i < n; i++ {
+ a, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addrs = append(addrs, a)
+ }
+ return addrs
+}
+
+func TestPeerstoreProtoStoreLimits(t *testing.T, ps pstore.Peerstore, limit int) {
+ p := peer.ID("foobar")
+ protocols := make([]protocol.ID, limit)
+ for i := 0; i < limit; i++ {
+ protocols[i] = protocol.ID(fmt.Sprintf("protocol %d", i))
+ }
+
+ t.Run("setting protocols", func(t *testing.T) {
+ require.NoError(t, ps.SetProtocols(p, protocols...))
+ require.EqualError(t, ps.SetProtocols(p, append(protocols, "proto")...), "too many protocols")
+ })
+ t.Run("adding protocols", func(t *testing.T) {
+ p1 := protocols[:limit/2]
+ p2 := protocols[limit/2:]
+ require.NoError(t, ps.SetProtocols(p, p1...))
+ require.NoError(t, ps.AddProtocols(p, p2...))
+ require.EqualError(t, ps.AddProtocols(p, "proto"), "too many protocols")
+ })
+}
diff --git a/p2p/host/peerstore/test/utils.go b/p2p/host/peerstore/test/utils.go
new file mode 100644
index 0000000000..cb7309290c
--- /dev/null
+++ b/p2p/host/peerstore/test/utils.go
@@ -0,0 +1,91 @@
+package test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pt "github.com/libp2p/go-libp2p/core/test"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func Multiaddr(m string) ma.Multiaddr {
+ maddr, err := ma.NewMultiaddr(m)
+ if err != nil {
+ panic(err)
+ }
+ return maddr
+}
+
+type peerpair struct {
+ ID peer.ID
+ Addr []ma.Multiaddr
+}
+
+func RandomPeer(b *testing.B, addrCount int) *peerpair {
+ var (
+ pid peer.ID
+ err error
+ addrs = make([]ma.Multiaddr, addrCount)
+ aFmt = "/ip4/127.0.0.1/tcp/%d/ipfs/%s"
+ )
+
+ b.Helper()
+ if pid, err = pt.RandPeerID(); err != nil {
+ b.Fatal(err)
+ }
+
+ for i := 0; i < addrCount; i++ {
+ if addrs[i], err = ma.NewMultiaddr(fmt.Sprintf(aFmt, i, pid)); err != nil {
+ b.Fatal(err)
+ }
+ }
+ return &peerpair{pid, addrs}
+}
+
+func getPeerPairs(b *testing.B, n int, addrsPerPeer int) []*peerpair {
+ pps := make([]*peerpair, n)
+ for i := 0; i < n; i++ {
+ pps[i] = RandomPeer(b, addrsPerPeer)
+ }
+ return pps
+}
+
+func GenerateAddrs(count int) []ma.Multiaddr {
+ var addrs = make([]ma.Multiaddr, count)
+ for i := 0; i < count; i++ {
+ addrs[i] = Multiaddr(fmt.Sprintf("/ip4/1.1.1.%d/tcp/1111", i))
+ }
+ return addrs
+}
+
+func GeneratePeerIDs(count int) []peer.ID {
+ var ids = make([]peer.ID, count)
+ for i := 0; i < count; i++ {
+ ids[i], _ = pt.RandPeerID()
+ }
+ return ids
+}
+
+func AssertAddressesEqual(t *testing.T, exp, act []ma.Multiaddr) {
+ t.Helper()
+ if len(exp) != len(act) {
+ t.Fatalf("lengths not the same. expected %d, got %d\n", len(exp), len(act))
+ }
+
+ for _, a := range exp {
+ found := false
+
+ for _, b := range act {
+ if a.Equal(b) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatalf("expected address %s not found", a)
+ }
+ }
+}
diff --git a/p2p/host/pstoremanager/mock_peerstore_test.go b/p2p/host/pstoremanager/mock_peerstore_test.go
new file mode 100644
index 0000000000..2196dccbef
--- /dev/null
+++ b/p2p/host/pstoremanager/mock_peerstore_test.go
@@ -0,0 +1,451 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/peerstore (interfaces: Peerstore)
+//
+// Generated by this command:
+//
+// mockgen -package pstoremanager_test -destination mock_peerstore_test.go github.com/libp2p/go-libp2p/core/peerstore Peerstore
+//
+
+// Package pstoremanager_test is a generated GoMock package.
+package pstoremanager_test
+
+import (
+ context "context"
+ reflect "reflect"
+ time "time"
+
+ crypto "github.com/libp2p/go-libp2p/core/crypto"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ protocol "github.com/libp2p/go-libp2p/core/protocol"
+ multiaddr "github.com/multiformats/go-multiaddr"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockPeerstore is a mock of Peerstore interface.
+type MockPeerstore struct {
+ ctrl *gomock.Controller
+ recorder *MockPeerstoreMockRecorder
+ isgomock struct{}
+}
+
+// MockPeerstoreMockRecorder is the mock recorder for MockPeerstore.
+type MockPeerstoreMockRecorder struct {
+ mock *MockPeerstore
+}
+
+// NewMockPeerstore creates a new mock instance.
+func NewMockPeerstore(ctrl *gomock.Controller) *MockPeerstore {
+ mock := &MockPeerstore{ctrl: ctrl}
+ mock.recorder = &MockPeerstoreMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockPeerstore) EXPECT() *MockPeerstoreMockRecorder {
+ return m.recorder
+}
+
+// AddAddr mocks base method.
+func (m *MockPeerstore) AddAddr(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "AddAddr", p, addr, ttl)
+}
+
+// AddAddr indicates an expected call of AddAddr.
+func (mr *MockPeerstoreMockRecorder) AddAddr(p, addr, ttl any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAddr", reflect.TypeOf((*MockPeerstore)(nil).AddAddr), p, addr, ttl)
+}
+
+// AddAddrs mocks base method.
+func (m *MockPeerstore) AddAddrs(p peer.ID, addrs []multiaddr.Multiaddr, ttl time.Duration) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "AddAddrs", p, addrs, ttl)
+}
+
+// AddAddrs indicates an expected call of AddAddrs.
+func (mr *MockPeerstoreMockRecorder) AddAddrs(p, addrs, ttl any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAddrs", reflect.TypeOf((*MockPeerstore)(nil).AddAddrs), p, addrs, ttl)
+}
+
+// AddPrivKey mocks base method.
+func (m *MockPeerstore) AddPrivKey(arg0 peer.ID, arg1 crypto.PrivKey) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddPrivKey", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddPrivKey indicates an expected call of AddPrivKey.
+func (mr *MockPeerstoreMockRecorder) AddPrivKey(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPrivKey", reflect.TypeOf((*MockPeerstore)(nil).AddPrivKey), arg0, arg1)
+}
+
+// AddProtocols mocks base method.
+func (m *MockPeerstore) AddProtocols(arg0 peer.ID, arg1 ...protocol.ID) error {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0}
+ for _, a := range arg1 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "AddProtocols", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddProtocols indicates an expected call of AddProtocols.
+func (mr *MockPeerstoreMockRecorder) AddProtocols(arg0 any, arg1 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0}, arg1...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProtocols", reflect.TypeOf((*MockPeerstore)(nil).AddProtocols), varargs...)
+}
+
+// AddPubKey mocks base method.
+func (m *MockPeerstore) AddPubKey(arg0 peer.ID, arg1 crypto.PubKey) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddPubKey", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// AddPubKey indicates an expected call of AddPubKey.
+func (mr *MockPeerstoreMockRecorder) AddPubKey(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPubKey", reflect.TypeOf((*MockPeerstore)(nil).AddPubKey), arg0, arg1)
+}
+
+// AddrStream mocks base method.
+func (m *MockPeerstore) AddrStream(arg0 context.Context, arg1 peer.ID) <-chan multiaddr.Multiaddr {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddrStream", arg0, arg1)
+ ret0, _ := ret[0].(<-chan multiaddr.Multiaddr)
+ return ret0
+}
+
+// AddrStream indicates an expected call of AddrStream.
+func (mr *MockPeerstoreMockRecorder) AddrStream(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddrStream", reflect.TypeOf((*MockPeerstore)(nil).AddrStream), arg0, arg1)
+}
+
+// Addrs mocks base method.
+func (m *MockPeerstore) Addrs(p peer.ID) []multiaddr.Multiaddr {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Addrs", p)
+ ret0, _ := ret[0].([]multiaddr.Multiaddr)
+ return ret0
+}
+
+// Addrs indicates an expected call of Addrs.
+func (mr *MockPeerstoreMockRecorder) Addrs(p any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addrs", reflect.TypeOf((*MockPeerstore)(nil).Addrs), p)
+}
+
+// ClearAddrs mocks base method.
+func (m *MockPeerstore) ClearAddrs(p peer.ID) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "ClearAddrs", p)
+}
+
+// ClearAddrs indicates an expected call of ClearAddrs.
+func (mr *MockPeerstoreMockRecorder) ClearAddrs(p any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearAddrs", reflect.TypeOf((*MockPeerstore)(nil).ClearAddrs), p)
+}
+
+// Close mocks base method.
+func (m *MockPeerstore) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close.
+func (mr *MockPeerstoreMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockPeerstore)(nil).Close))
+}
+
+// FirstSupportedProtocol mocks base method.
+func (m *MockPeerstore) FirstSupportedProtocol(arg0 peer.ID, arg1 ...protocol.ID) (protocol.ID, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0}
+ for _, a := range arg1 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "FirstSupportedProtocol", varargs...)
+ ret0, _ := ret[0].(protocol.ID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// FirstSupportedProtocol indicates an expected call of FirstSupportedProtocol.
+func (mr *MockPeerstoreMockRecorder) FirstSupportedProtocol(arg0 any, arg1 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0}, arg1...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FirstSupportedProtocol", reflect.TypeOf((*MockPeerstore)(nil).FirstSupportedProtocol), varargs...)
+}
+
+// Get mocks base method.
+func (m *MockPeerstore) Get(p peer.ID, key string) (any, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Get", p, key)
+ ret0, _ := ret[0].(any)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// Get indicates an expected call of Get.
+func (mr *MockPeerstoreMockRecorder) Get(p, key any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockPeerstore)(nil).Get), p, key)
+}
+
+// GetProtocols mocks base method.
+func (m *MockPeerstore) GetProtocols(arg0 peer.ID) ([]protocol.ID, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetProtocols", arg0)
+ ret0, _ := ret[0].([]protocol.ID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetProtocols indicates an expected call of GetProtocols.
+func (mr *MockPeerstoreMockRecorder) GetProtocols(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProtocols", reflect.TypeOf((*MockPeerstore)(nil).GetProtocols), arg0)
+}
+
+// LatencyEWMA mocks base method.
+func (m *MockPeerstore) LatencyEWMA(arg0 peer.ID) time.Duration {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "LatencyEWMA", arg0)
+ ret0, _ := ret[0].(time.Duration)
+ return ret0
+}
+
+// LatencyEWMA indicates an expected call of LatencyEWMA.
+func (mr *MockPeerstoreMockRecorder) LatencyEWMA(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LatencyEWMA", reflect.TypeOf((*MockPeerstore)(nil).LatencyEWMA), arg0)
+}
+
+// PeerInfo mocks base method.
+func (m *MockPeerstore) PeerInfo(arg0 peer.ID) peer.AddrInfo {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PeerInfo", arg0)
+ ret0, _ := ret[0].(peer.AddrInfo)
+ return ret0
+}
+
+// PeerInfo indicates an expected call of PeerInfo.
+func (mr *MockPeerstoreMockRecorder) PeerInfo(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerInfo", reflect.TypeOf((*MockPeerstore)(nil).PeerInfo), arg0)
+}
+
+// Peers mocks base method.
+func (m *MockPeerstore) Peers() peer.IDSlice {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Peers")
+ ret0, _ := ret[0].(peer.IDSlice)
+ return ret0
+}
+
+// Peers indicates an expected call of Peers.
+func (mr *MockPeerstoreMockRecorder) Peers() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peers", reflect.TypeOf((*MockPeerstore)(nil).Peers))
+}
+
+// PeersWithAddrs mocks base method.
+func (m *MockPeerstore) PeersWithAddrs() peer.IDSlice {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PeersWithAddrs")
+ ret0, _ := ret[0].(peer.IDSlice)
+ return ret0
+}
+
+// PeersWithAddrs indicates an expected call of PeersWithAddrs.
+func (mr *MockPeerstoreMockRecorder) PeersWithAddrs() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeersWithAddrs", reflect.TypeOf((*MockPeerstore)(nil).PeersWithAddrs))
+}
+
+// PeersWithKeys mocks base method.
+func (m *MockPeerstore) PeersWithKeys() peer.IDSlice {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PeersWithKeys")
+ ret0, _ := ret[0].(peer.IDSlice)
+ return ret0
+}
+
+// PeersWithKeys indicates an expected call of PeersWithKeys.
+func (mr *MockPeerstoreMockRecorder) PeersWithKeys() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeersWithKeys", reflect.TypeOf((*MockPeerstore)(nil).PeersWithKeys))
+}
+
+// PrivKey mocks base method.
+func (m *MockPeerstore) PrivKey(arg0 peer.ID) crypto.PrivKey {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PrivKey", arg0)
+ ret0, _ := ret[0].(crypto.PrivKey)
+ return ret0
+}
+
+// PrivKey indicates an expected call of PrivKey.
+func (mr *MockPeerstoreMockRecorder) PrivKey(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrivKey", reflect.TypeOf((*MockPeerstore)(nil).PrivKey), arg0)
+}
+
+// PubKey mocks base method.
+func (m *MockPeerstore) PubKey(arg0 peer.ID) crypto.PubKey {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "PubKey", arg0)
+ ret0, _ := ret[0].(crypto.PubKey)
+ return ret0
+}
+
+// PubKey indicates an expected call of PubKey.
+func (mr *MockPeerstoreMockRecorder) PubKey(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PubKey", reflect.TypeOf((*MockPeerstore)(nil).PubKey), arg0)
+}
+
+// Put mocks base method.
+func (m *MockPeerstore) Put(p peer.ID, key string, val any) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Put", p, key, val)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Put indicates an expected call of Put.
+func (mr *MockPeerstoreMockRecorder) Put(p, key, val any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockPeerstore)(nil).Put), p, key, val)
+}
+
+// RecordLatency mocks base method.
+func (m *MockPeerstore) RecordLatency(arg0 peer.ID, arg1 time.Duration) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "RecordLatency", arg0, arg1)
+}
+
+// RecordLatency indicates an expected call of RecordLatency.
+func (mr *MockPeerstoreMockRecorder) RecordLatency(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordLatency", reflect.TypeOf((*MockPeerstore)(nil).RecordLatency), arg0, arg1)
+}
+
+// RemovePeer mocks base method.
+func (m *MockPeerstore) RemovePeer(arg0 peer.ID) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "RemovePeer", arg0)
+}
+
+// RemovePeer indicates an expected call of RemovePeer.
+func (mr *MockPeerstoreMockRecorder) RemovePeer(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePeer", reflect.TypeOf((*MockPeerstore)(nil).RemovePeer), arg0)
+}
+
+// RemoveProtocols mocks base method.
+func (m *MockPeerstore) RemoveProtocols(arg0 peer.ID, arg1 ...protocol.ID) error {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0}
+ for _, a := range arg1 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "RemoveProtocols", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// RemoveProtocols indicates an expected call of RemoveProtocols.
+func (mr *MockPeerstoreMockRecorder) RemoveProtocols(arg0 any, arg1 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0}, arg1...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveProtocols", reflect.TypeOf((*MockPeerstore)(nil).RemoveProtocols), varargs...)
+}
+
+// SetAddr mocks base method.
+func (m *MockPeerstore) SetAddr(p peer.ID, addr multiaddr.Multiaddr, ttl time.Duration) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetAddr", p, addr, ttl)
+}
+
+// SetAddr indicates an expected call of SetAddr.
+func (mr *MockPeerstoreMockRecorder) SetAddr(p, addr, ttl any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddr", reflect.TypeOf((*MockPeerstore)(nil).SetAddr), p, addr, ttl)
+}
+
+// SetAddrs mocks base method.
+func (m *MockPeerstore) SetAddrs(p peer.ID, addrs []multiaddr.Multiaddr, ttl time.Duration) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetAddrs", p, addrs, ttl)
+}
+
+// SetAddrs indicates an expected call of SetAddrs.
+func (mr *MockPeerstoreMockRecorder) SetAddrs(p, addrs, ttl any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAddrs", reflect.TypeOf((*MockPeerstore)(nil).SetAddrs), p, addrs, ttl)
+}
+
+// SetProtocols mocks base method.
+func (m *MockPeerstore) SetProtocols(arg0 peer.ID, arg1 ...protocol.ID) error {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0}
+ for _, a := range arg1 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SetProtocols", varargs...)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SetProtocols indicates an expected call of SetProtocols.
+func (mr *MockPeerstoreMockRecorder) SetProtocols(arg0 any, arg1 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0}, arg1...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetProtocols", reflect.TypeOf((*MockPeerstore)(nil).SetProtocols), varargs...)
+}
+
+// SupportsProtocols mocks base method.
+func (m *MockPeerstore) SupportsProtocols(arg0 peer.ID, arg1 ...protocol.ID) ([]protocol.ID, error) {
+ m.ctrl.T.Helper()
+ varargs := []any{arg0}
+ for _, a := range arg1 {
+ varargs = append(varargs, a)
+ }
+ ret := m.ctrl.Call(m, "SupportsProtocols", varargs...)
+ ret0, _ := ret[0].([]protocol.ID)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// SupportsProtocols indicates an expected call of SupportsProtocols.
+func (mr *MockPeerstoreMockRecorder) SupportsProtocols(arg0 any, arg1 ...any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ varargs := append([]any{arg0}, arg1...)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SupportsProtocols", reflect.TypeOf((*MockPeerstore)(nil).SupportsProtocols), varargs...)
+}
+
+// UpdateAddrs mocks base method.
+func (m *MockPeerstore) UpdateAddrs(p peer.ID, oldTTL, newTTL time.Duration) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "UpdateAddrs", p, oldTTL, newTTL)
+}
+
+// UpdateAddrs indicates an expected call of UpdateAddrs.
+func (mr *MockPeerstoreMockRecorder) UpdateAddrs(p, oldTTL, newTTL any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAddrs", reflect.TypeOf((*MockPeerstore)(nil).UpdateAddrs), p, oldTTL, newTTL)
+}
diff --git a/p2p/host/pstoremanager/pstoremanager.go b/p2p/host/pstoremanager/pstoremanager.go
new file mode 100644
index 0000000000..dd8444c3a4
--- /dev/null
+++ b/p2p/host/pstoremanager/pstoremanager.go
@@ -0,0 +1,144 @@
+package pstoremanager
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("pstoremanager")
+
+type Option func(*PeerstoreManager) error
+
+// WithGracePeriod sets the grace period.
+// If a peer doesn't reconnect during the grace period, its data is removed.
+// Default: 1 minute.
+func WithGracePeriod(p time.Duration) Option {
+ return func(m *PeerstoreManager) error {
+ m.gracePeriod = p
+ return nil
+ }
+}
+
+// WithCleanupInterval set the clean up interval.
+// During a clean up run peers that disconnected before the grace period are removed.
+// If unset, the interval is set to half the grace period.
+func WithCleanupInterval(t time.Duration) Option {
+ return func(m *PeerstoreManager) error {
+ m.cleanupInterval = t
+ return nil
+ }
+}
+
+type PeerstoreManager struct {
+ pstore peerstore.Peerstore
+ eventBus event.Bus
+ network network.Network
+
+ cancel context.CancelFunc
+ refCount sync.WaitGroup
+
+ gracePeriod time.Duration
+ cleanupInterval time.Duration
+}
+
+func NewPeerstoreManager(pstore peerstore.Peerstore, eventBus event.Bus, network network.Network, opts ...Option) (*PeerstoreManager, error) {
+ m := &PeerstoreManager{
+ pstore: pstore,
+ gracePeriod: time.Minute,
+ eventBus: eventBus,
+ network: network,
+ }
+ for _, opt := range opts {
+ if err := opt(m); err != nil {
+ return nil, err
+ }
+ }
+ if m.cleanupInterval == 0 {
+ m.cleanupInterval = m.gracePeriod / 2
+ }
+ return m, nil
+}
+
+func (m *PeerstoreManager) Start() {
+ ctx, cancel := context.WithCancel(context.Background())
+ m.cancel = cancel
+ sub, err := m.eventBus.Subscribe(&event.EvtPeerConnectednessChanged{}, eventbus.Name("pstoremanager"))
+ if err != nil {
+ log.Warn("subscription failed. Peerstore manager not activated", "err", err)
+ return
+ }
+ m.refCount.Add(1)
+ go m.background(ctx, sub)
+}
+
+func (m *PeerstoreManager) background(ctx context.Context, sub event.Subscription) {
+ defer m.refCount.Done()
+ defer sub.Close()
+ disconnected := make(map[peer.ID]time.Time)
+
+ ticker := time.NewTicker(m.cleanupInterval)
+ defer ticker.Stop()
+
+ defer func() {
+ for p := range disconnected {
+ m.pstore.RemovePeer(p)
+ }
+ }()
+
+ for {
+ select {
+ case e, ok := <-sub.Out():
+ if !ok {
+ return
+ }
+ ev := e.(event.EvtPeerConnectednessChanged)
+ p := ev.Peer
+ switch ev.Connectedness {
+ case network.Connected, network.Limited:
+ // If we reconnect to the peer before we've cleared the information,
+ // keep it. This is an optimization to keep the disconnected map
+ // small. We still need to check that a peer is actually
+ // disconnected before removing it from the peer store.
+ delete(disconnected, p)
+ default:
+ if _, ok := disconnected[p]; !ok {
+ disconnected[p] = time.Now()
+ }
+ }
+ case <-ticker.C:
+ now := time.Now()
+ for p, disconnectTime := range disconnected {
+ if disconnectTime.Add(m.gracePeriod).Before(now) {
+ // Check that the peer is actually not connected at this point.
+ // This avoids a race condition where the Connected notification
+ // is processed after this time has fired.
+ switch m.network.Connectedness(p) {
+ case network.Connected, network.Limited:
+ default:
+ m.pstore.RemovePeer(p)
+ }
+ delete(disconnected, p)
+ }
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (m *PeerstoreManager) Close() error {
+ if m.cancel != nil {
+ m.cancel()
+ }
+ m.refCount.Wait()
+ return nil
+}
diff --git a/p2p/host/pstoremanager/pstoremanager_test.go b/p2p/host/pstoremanager/pstoremanager_test.go
new file mode 100644
index 0000000000..58963c576a
--- /dev/null
+++ b/p2p/host/pstoremanager/pstoremanager_test.go
@@ -0,0 +1,112 @@
+package pstoremanager_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/pstoremanager"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package pstoremanager_test -destination mock_peerstore_test.go github.com/libp2p/go-libp2p/core/peerstore Peerstore"
+
+func TestGracePeriod(t *testing.T) {
+ t.Parallel()
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ eventBus := eventbus.NewBus()
+ pstore := NewMockPeerstore(ctrl)
+ const gracePeriod = 250 * time.Millisecond
+ man, err := pstoremanager.NewPeerstoreManager(pstore, eventBus, swarmt.GenSwarm(t), pstoremanager.WithGracePeriod(gracePeriod))
+ require.NoError(t, err)
+ defer man.Close()
+ man.Start()
+
+ emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+ start := time.Now()
+ removed := make(chan struct{})
+ pstore.EXPECT().RemovePeer(peer.ID("foobar")).DoAndReturn(func(_ peer.ID) {
+ defer close(removed)
+ // make sure the call happened after the grace period
+ require.GreaterOrEqual(t, time.Since(start), gracePeriod)
+ require.LessOrEqual(t, time.Since(start), 3*gracePeriod)
+ })
+ require.NoError(t, emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: "foobar",
+ Connectedness: network.NotConnected,
+ }))
+ <-removed
+}
+
+func TestReconnect(t *testing.T) {
+ t.Parallel()
+ ctrl := gomock.NewController(t)
+ eventBus := eventbus.NewBus()
+ pstore := NewMockPeerstore(ctrl)
+ const gracePeriod = 200 * time.Millisecond
+ man, err := pstoremanager.NewPeerstoreManager(pstore, eventBus, swarmt.GenSwarm(t), pstoremanager.WithGracePeriod(gracePeriod))
+ require.NoError(t, err)
+ defer man.Close()
+ man.Start()
+
+ emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+ require.NoError(t, emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: "foobar",
+ Connectedness: network.NotConnected,
+ }))
+ require.NoError(t, emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: "foobar",
+ Connectedness: network.Connected,
+ }))
+ time.Sleep(gracePeriod * 3 / 2)
+ // There should have been no calls to RemovePeer.
+ ctrl.Finish()
+}
+
+func TestClose(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ eventBus := eventbus.NewBus()
+ pstore := NewMockPeerstore(ctrl)
+ const gracePeriod = time.Hour
+ man, err := pstoremanager.NewPeerstoreManager(pstore, eventBus, swarmt.GenSwarm(t), pstoremanager.WithGracePeriod(gracePeriod))
+ require.NoError(t, err)
+ man.Start()
+
+ emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+
+ sub, err := eventBus.Subscribe(&event.EvtPeerConnectednessChanged{})
+ require.NoError(t, err)
+
+ require.NoError(t, emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: "foobar",
+ Connectedness: network.NotConnected,
+ }))
+
+ // make sure the event is sent before we close
+ select {
+ case <-sub.Out():
+ time.Sleep(100 * time.Millisecond) // make sure this event is also picked up by the pstoremanager
+ case <-time.After(5 * time.Second):
+ t.Fatalf("Hit timeout")
+ }
+
+ done := make(chan struct{})
+ pstore.EXPECT().RemovePeer(peer.ID("foobar")).Do(func(peer.ID) { close(done) })
+ require.NoError(t, man.Close())
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ t.Fatalf("Hit timeout")
+ }
+}
diff --git a/p2p/host/relaysvc/relay.go b/p2p/host/relaysvc/relay.go
new file mode 100644
index 0000000000..f9bbc7588e
--- /dev/null
+++ b/p2p/host/relaysvc/relay.go
@@ -0,0 +1,96 @@
+package relaysvc
+
+import (
+ "context"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+)
+
+type RelayManager struct {
+ host host.Host
+
+ mutex sync.Mutex
+ relay *relayv2.Relay
+ opts []relayv2.Option
+
+ refCount sync.WaitGroup
+ ctxCancel context.CancelFunc
+}
+
+func NewRelayManager(host host.Host, opts ...relayv2.Option) *RelayManager {
+ ctx, cancel := context.WithCancel(context.Background())
+ m := &RelayManager{
+ host: host,
+ opts: opts,
+ ctxCancel: cancel,
+ }
+ m.refCount.Add(1)
+ go m.background(ctx)
+ return m
+}
+
+func (m *RelayManager) background(ctx context.Context) {
+ defer m.refCount.Done()
+ defer func() {
+ m.mutex.Lock()
+ if m.relay != nil {
+ m.relay.Close()
+ }
+ m.mutex.Unlock()
+ }()
+
+ subReachability, _ := m.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("relaysvc"))
+ defer subReachability.Close()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case ev, ok := <-subReachability.Out():
+ if !ok {
+ return
+ }
+ if err := m.reachabilityChanged(ev.(event.EvtLocalReachabilityChanged).Reachability); err != nil {
+ return
+ }
+ }
+ }
+}
+
+func (m *RelayManager) reachabilityChanged(r network.Reachability) error {
+ switch r {
+ case network.ReachabilityPublic:
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ // This could happen if two consecutive EvtLocalReachabilityChanged report the same reachability.
+ // This shouldn't happen, but it's safer to double-check.
+ if m.relay != nil {
+ return nil
+ }
+ relay, err := relayv2.New(m.host, m.opts...)
+ if err != nil {
+ return err
+ }
+ m.relay = relay
+ default:
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ if m.relay != nil {
+ err := m.relay.Close()
+ m.relay = nil
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *RelayManager) Close() error {
+ m.ctxCancel()
+ m.refCount.Wait()
+ return nil
+}
diff --git a/p2p/host/relaysvc/relay_test.go b/p2p/host/relaysvc/relay_test.go
new file mode 100644
index 0000000000..83a1784ea2
--- /dev/null
+++ b/p2p/host/relaysvc/relay_test.go
@@ -0,0 +1,67 @@
+package relaysvc
+
+import (
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReachabilityChangeEvent(t *testing.T) {
+ h := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ rmgr := NewRelayManager(h)
+ emitter, err := rmgr.host.EventBus().Emitter(new(event.EvtLocalReachabilityChanged), eventbus.Stateful)
+ if err != nil {
+ t.Fatal(err)
+ }
+ evt := event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPublic}
+ emitter.Emit(evt)
+ require.Eventually(
+ t,
+ func() bool { rmgr.mutex.Lock(); defer rmgr.mutex.Unlock(); return rmgr.relay != nil },
+ 1*time.Second,
+ 100*time.Millisecond,
+ "relay should be set on public reachability")
+
+ evt = event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPrivate}
+ emitter.Emit(evt)
+ require.Eventually(
+ t,
+ func() bool { rmgr.mutex.Lock(); defer rmgr.mutex.Unlock(); return rmgr.relay == nil },
+ 3*time.Second,
+ 100*time.Millisecond,
+ "relay should be nil on private reachability")
+
+ evt = event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPublic}
+ emitter.Emit(evt)
+ evt = event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityUnknown}
+ emitter.Emit(evt)
+ require.Eventually(
+ t,
+ func() bool { rmgr.mutex.Lock(); defer rmgr.mutex.Unlock(); return rmgr.relay == nil },
+ 3*time.Second,
+ 100*time.Millisecond,
+ "relay should be nil on unknown reachability")
+
+ evt = event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPublic}
+ emitter.Emit(evt)
+ var relay *relayv2.Relay
+ require.Eventually(
+ t,
+ func() bool { rmgr.mutex.Lock(); defer rmgr.mutex.Unlock(); relay = rmgr.relay; return relay != nil },
+ 3*time.Second,
+ 100*time.Millisecond,
+ "relay should be set on public event")
+ emitter.Emit(evt)
+ require.Never(t,
+ func() bool { rmgr.mutex.Lock(); defer rmgr.mutex.Unlock(); return relay != rmgr.relay },
+ 3*time.Second,
+ 100*time.Millisecond,
+ "relay should not be updated on receiving the same event")
+}
diff --git a/p2p/host/resource-manager/README.md b/p2p/host/resource-manager/README.md
new file mode 100644
index 0000000000..ea7886409b
--- /dev/null
+++ b/p2p/host/resource-manager/README.md
@@ -0,0 +1,624 @@
+# The libp2p Network Resource Manager
+
+This package contains the canonical implementation of the libp2p
+Network Resource Manager interface.
+
+The implementation is based on the concept of Resource Management
+Scopes, whereby resource usage is constrained by a DAG of scopes,
+accounting for multiple levels of resource constraints.
+
+The Resource Manager doesn't prioritize resource requests at all, it simply
+checks if the resource being requested is currently below the defined limits and
+returns an error if the limit is reached. It has no notion of honest vs bad peers.
+
+The Resource Manager does have a special notion of [allowlisted](#allowlisting-multiaddrs-to-mitigate-eclipse-attacks) multiaddrs that
+have their own limits if the normal system limits are reached.
+
+## Usage
+
+The Resource Manager is intended to be used with go-libp2p. go-libp2p sets up a
+resource manager with the default autoscaled limits if none is provided, but if
+you want to configure things or if you want to enable metrics you'll use the
+resource manager like so:
+
+```go
+// Start with the default scaling limits.
+scalingLimits := rcmgr.DefaultLimits
+
+// Add limits around included libp2p protocols
+libp2p.SetDefaultServiceLimits(&scalingLimits)
+
+// Turn the scaling limits into a concrete set of limits using `.AutoScale`. This
+// scales the limits proportional to your system memory.
+scaledDefaultLimits := scalingLimits.AutoScale()
+
+// Tweak certain settings
+cfg := rcmgr.PartialLimitConfig{
+ System: rcmgr.ResourceLimits{
+ // Allow unlimited outbound streams
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ // Everything else is default. The exact values will come from `scaledDefaultLimits` above.
+}
+
+// Create our limits by using our cfg and replacing the default values with values from `scaledDefaultLimits`
+limits := cfg.Build(scaledDefaultLimits)
+
+// The resource manager expects a limiter, se we create one from our limits.
+limiter := rcmgr.NewFixedLimiter(limits)
+
+// Metrics are enabled by default. If you want to disable metrics, use the
+// WithMetricsDisabled option
+// Initialize the resource manager
+rm, err := rcmgr.NewResourceManager(limiter, rcmgr.WithMetricsDisabled())
+if err != nil {
+ panic(err)
+}
+
+// Create a libp2p host
+host, err := libp2p.New(libp2p.ResourceManager(rm))
+```
+
+### Saving the limits config
+The easiest way to save the defined limits is to serialize the `PartialLimitConfig`
+type as JSON.
+
+```go
+noisyNeighbor, _ := peer.Decode("QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf")
+cfg := rcmgr.PartialLimitConfig{
+ System: &rcmgr.ResourceLimits{
+ // Allow unlimited outbound streams
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ Peer: map[peer.ID]rcmgr.ResourceLimits{
+ noisyNeighbor: {
+ // No inbound connections from this peer
+ ConnsInbound: rcmgr.BlockAllLimit,
+ // But let me open connections to them
+ Conns: rcmgr.DefaultLimit,
+ ConnsOutbound: rcmgr.DefaultLimit,
+ // No inbound streams from this peer
+ StreamsInbound: rcmgr.BlockAllLimit,
+ // And let me open unlimited (by me) outbound streams (the peer may have their own limits on me)
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ },
+}
+jsonBytes, _ := json.Marshal(&cfg)
+
+// string(jsonBytes)
+// {
+// "System": {
+// "StreamsOutbound": "unlimited"
+// },
+// "Peer": {
+// "QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf": {
+// "StreamsInbound": "blockAll",
+// "StreamsOutbound": "unlimited",
+// "ConnsInbound": "blockAll"
+// }
+// }
+// }
+```
+
+This will omit defaults from the JSON output. It will also serialize the
+blockAll, and unlimited values explicitly.
+
+The `Memory` field is serialized as a string to workaround the JSON limitation
+of 32 bit integers (`Memory` is an int64).
+
+## Basic Resources
+
+### Memory
+
+Perhaps the most fundamental resource is memory, and in particular
+buffers used for network operations. The system must provide an
+interface for components to reserve memory that accounts for buffers
+(and possibly other live objects), which is scoped within the component.
+Before a new buffer is allocated, the component should try a memory
+reservation, which can fail if the resource limit is exceeded. It is
+then up to the component to react to the error condition, depending on
+the situation. For example, a muxer failing to grow a buffer in
+response to a window change should simply retain the old buffer and
+operate at perhaps degraded performance.
+
+### File Descriptors
+
+File descriptors are an important resource that uses memory (and
+computational time) at the system level. They are also a scarce
+resource, as typically (unless the user explicitly intervenes) they
+are constrained by the system. Exhaustion of file descriptors may
+render the application incapable of operating (e.g., because it is
+unable to open a file). This is important for libp2p because most
+operating systems represent sockets as file descriptors.
+
+### Connections
+
+Connections are a higher-level concept endemic to libp2p; in order to
+communicate with another peer, a connection must first be
+established. Connections are an important resource in libp2p, as they
+consume memory, goroutines, and possibly file descriptors.
+
+We distinguish between inbound and outbound connections, as the former
+are initiated by remote peers and consume resources in response to
+network events and thus need to be tightly controlled in order to
+protect the application from overload or attack. Outbound
+connections are typically initiated by the application's volition and
+don't need to be controlled as tightly. However, outbound connections
+still consume resources and may be initiated in response to network
+events because of (potentially faulty) application logic, so they
+still need to be constrained.
+
+### Streams
+
+Streams are the fundamental object of interaction in libp2p; all
+protocol interactions happen through a stream that goes over some
+connection. Streams are a fundamental resource in libp2p, as they
+consume memory and goroutines at all levels of the stack.
+
+Streams always belong to a peer, specify a protocol and they may
+belong to some service in the system. Hence, this suggests that apart
+from global limits, we can constrain stream usage at finer
+granularity, at the protocol and service level.
+
+Once again, we distinguish between inbound and outbound streams.
+Inbound streams are initiated by remote peers and consume resources in
+response to network events; controlling inbound stream usage is again
+paramount for protecting the system from overload or attack.
+Outbound streams are normally initiated by the application or some
+service in the system in order to effect some protocol
+interaction. However, they can also be initiated in response to
+network events because of application or service logic, so we still
+need to constrain them.
+
+
+## Resource Scopes
+
+The Resource Manager is based on the concept of resource
+scopes. Resource Scopes account for resource usage that is temporally
+delimited for the span of the scope. Resource Scopes conceptually
+form a DAG, providing us with a mechanism to enforce multiresolution
+resource accounting. Downstream resource usage is aggregated at scopes
+higher up the graph.
+
+The following diagram depicts the canonical scope graph:
+```
+System
+ +------------> Transient.............+................+
+ | . .
+ +------------> Service------------- . ----------+ .
+ | . | .
+ +-------------> Protocol----------- . ----------+ .
+ | . | .
+ +-------------->* Peer \/ | .
+ +------------> Connection | .
+ | \/ \/
+ +---------------------------> Stream
+```
+
+### The System Scope
+
+The system scope is the top level scope that accounts for global
+resource usage at all levels of the system. This scope nests and
+constrains all other scopes and institutes global hard limits.
+
+### The Transient Scope
+
+The transient scope accounts for resources that are in the process of
+full establishment. For instance, a new connection prior to the
+handshake does not belong to any peer, but it still needs to be
+constrained as this opens an avenue for attacks in transient resource
+usage. Similarly, a stream that has not negotiated a protocol yet is
+constrained by the transient scope.
+
+The transient scope effectively represents a DMZ (DeMilitarized Zone),
+where resource usage can be accounted for connections and streams that
+are not fully established.
+
+### The Allowlist System Scope
+
+Same as the normal system scope above, but is used if the normal system scope is
+already at its limits and the resource is from an allowlisted peer. See
+[Allowlisting multiaddrs to mitigate eclipse
+attacks](#allowlisting-multiaddrs-to-mitigate-eclipse-attacks) see for more
+information.
+
+### The Allowlist Transient Scope
+
+Same as the normal transient scope above, but is used if the normal transient
+scope is already at its limits and the resource is from an allowlisted peer. See
+[Allowlisting multiaddrs to mitigate eclipse
+attacks](#allowlisting-multiaddrs-to-mitigate-eclipse-attacks) see for more
+information.
+
+### Service Scopes
+
+The system is typically organized across services, which may be
+ambient and provide basic functionality to the system (e.g. identify,
+autonat, relay, etc). Alternatively, services may be explicitly
+instantiated by the application, and provide core components of its
+functionality (e.g. pubsub, the DHT, etc).
+
+Services are logical groupings of streams that implement protocol flow
+and may additionally consume resources such as memory. Services
+typically have at least one stream handler, so they are subject to
+inbound stream creation and resource usage in response to network
+events. As such, the system explicitly models them allowing for
+isolated resource usage that can be tuned by the user.
+
+### Protocol Scopes
+
+Protocol Scopes account for resources at the protocol level. They are
+an intermediate resource scope which can constrain streams which may
+not have a service associated or for resource control within a
+service. It also provides an opportunity for system operators to
+explicitly restrict specific protocols.
+
+For instance, a service that is not aware of the resource manager and
+has not been ported to mark its streams, may still gain limits
+transparently without any programmer intervention. Furthermore, the
+protocol scope can constrain resource usage for services that
+implement multiple protocols for the sake of backwards
+compatibility. A tighter limit in some older protocol can protect the
+application from resource consumption caused by legacy clients or
+potential attacks.
+
+For a concrete example, consider pubsub with the gossipsub router: the
+service also understands the floodsub protocol for backwards
+compatibility and support for unsophisticated clients that are lagging
+in the implementation effort. By specifying a lower limit for the
+floodsub protocol, we can can constrain the service level for legacy
+clients using an inefficient protocol.
+
+### Peer Scopes
+
+The peer scope accounts for resource usage by an individual peer. This
+constrains connections and streams and limits the blast radius of
+resource consumption by a single remote peer.
+
+This ensures that no single peer can use more resources than allowed
+by the peer limits. Every peer has a default limit, but the programmer
+may raise (or lower) limits for specific peers.
+
+
+### Connection Scopes
+
+The connection scope is delimited to the duration of a connection and
+constrains resource usage by a single connection. The scope is a leaf
+in the DAG, with a span that begins when a connection is established
+and ends when the connection is closed. Its resources are aggregated
+to the resource usage of a peer.
+
+### Stream Scopes
+
+The stream scope is delimited to the duration of a stream, and
+constrains resource usage by a single stream. This scope is also a
+leaf in the DAG, with span that begins when a stream is created and
+ends when the stream is closed. Its resources are aggregated to the
+resource usage of a peer, and constrained by a service and protocol
+scope.
+
+### User Transaction Scopes
+
+User transaction scopes can be created as a child of any extant
+resource scope, and provide the programmer with a delimited scope for
+easy resource accounting. Transactions may form a tree that is rooted
+to some canonical scope in the scope DAG.
+
+For instance, a programmer may create a transaction scope within a
+service that accounts for some control flow delimited resource
+usage. Similarly, a programmer may create a transaction scope for some
+interaction within a stream, e.g. a Request/Response interaction that
+uses a buffer.
+
+## Limits
+
+Each resource scope has an associated limit object, which designates
+limits for all [basic resources](#basic-resources). The limit is checked every time some
+resource is reserved and provides the system with an opportunity to
+constrain resource usage.
+
+There are separate limits for each class of scope, allowing for
+multiresolution and aggregate resource accounting. As such, we have
+limits for the system and transient scopes, default and specific
+limits for services, protocols, and peers, and limits for connections
+and streams.
+
+### Scaling Limits
+
+When building software that is supposed to run on many different kind of machines,
+with various memory and CPU configurations, it is desirable to have limits that
+scale with the size of the machine.
+
+This is done using the `ScalingLimitConfig`. For every scope, this configuration
+struct defines the absolutely bare minimum limits, and an (optional) increase of
+these limits, which will be applied on nodes that have sufficient memory.
+
+A `ScalingLimitConfig` can be converted into a `ConcreteLimitConfig` (which can then be
+used to initialize a fixed limiter with `NewFixedLimiter`) by calling the `Scale` method.
+The `Scale` method takes two parameters: the amount of memory and the number of file
+descriptors that an application is willing to dedicate to libp2p.
+
+These amounts will differ between use cases. A blockchain node running on a dedicated
+server might have a lot of memory, and dedicate 1/4 of that memory to libp2p. On the
+other end of the spectrum, a desktop companion application running as a background
+task on a consumer laptop will probably dedicate significantly less than 1/4 of its system
+memory to libp2p.
+
+For convenience, the `ScalingLimitConfig` also provides an `AutoScale` method,
+which determines the amount of memory and file descriptors available on the
+system, and dedicates up to 1/8 of the memory and 1/2 of the file descriptors to
+libp2p.
+
+For example, one might set:
+```go
+var scalingLimits = ScalingLimitConfig{
+ SystemBaseLimit: BaseLimit{
+ ConnsInbound: 64,
+ ConnsOutbound: 128,
+ Conns: 128,
+ StreamsInbound: 512,
+ StreamsOutbound: 1024,
+ Streams: 1024,
+ Memory: 128 << 20,
+ FD: 256,
+ },
+ SystemLimitIncrease: BaseLimitIncrease{
+ ConnsInbound: 32,
+ ConnsOutbound: 64,
+ Conns: 64,
+ StreamsInbound: 256,
+ StreamsOutbound: 512,
+ Streams: 512,
+ Memory: 256 << 20,
+ FDFraction: 1,
+ },
+}
+```
+
+The base limit (`SystemBaseLimit`) here is the minimum configuration that any
+node will have, no matter how little memory it possesses. For every GB of memory
+passed into the `Scale` method, an increase of (`SystemLimitIncrease`) is added.
+
+For Example, calling `Scale` with 4 GB of memory will result in a limit of 384 for
+`Conns` (128 + 4*64).
+
+The `FDFraction` defines how many of the file descriptors are allocated to this
+scope. In the example above, when called with a file descriptor value of 1000,
+this would result in a limit of 1000 (1000 * 1) file descriptors for the system
+scope. See `TestReadmeExample` in `limit_test.go`.
+
+Note that we only showed the configuration for the system scope here, equivalent
+configuration options apply to all other scopes as well.
+
+### Default limits
+
+By default the resource manager ships with some reasonable scaling limits and
+makes a reasonable guess at how much system memory you want to dedicate to the
+go-libp2p process. For the default definitions see [`DefaultLimits` and
+`ScalingLimitConfig.AutoScale()`](./limit_defaults.go).
+
+### Tweaking Defaults
+
+If the defaults seem mostly okay, but you want to adjust one facet you can
+simply copy the default struct object and update the field you want to change. You can
+apply changes to a `BaseLimit`, `BaseLimitIncrease`, and `ConcreteLimitConfig` with
+`.Apply`.
+
+Example
+```
+// An example on how to tweak the default limits
+tweakedDefaults := DefaultLimits
+tweakedDefaults.ProtocolBaseLimit.Streams = 1024
+tweakedDefaults.ProtocolBaseLimit.StreamsInbound = 512
+tweakedDefaults.ProtocolBaseLimit.StreamsOutbound = 512
+```
+
+### How to tune your limits
+
+Once you've set your limits and monitoring (see [Monitoring](#monitoring) below)
+you can now tune your limits better. The `rcmgr_blocked_resources` metric will
+tell you what was blocked and for what scope. If you see a steady stream of
+these blocked requests it means your resource limits are too low for your usage.
+If you see a rare sudden spike, this is okay and it means the resource manager
+protected you from some anomaly.
+
+### How to disable limits
+
+Sometimes disabling all limits is useful when you want to see how much
+resources you use during normal operation. You can then use this information to
+define your initial limits. Disable the limits by using `InfiniteLimits`.
+
+### Debug "resource limit exceeded" errors
+
+These errors occur whenever a limit is hit. For example, you'll get this error if
+you are at your limit for the number of streams you can have, and you try to
+open one more.
+
+Example Log:
+```
+2022-08-12T15:49:35.459-0700 DEBUG rcmgr go-libp2p-resource-manager@v0.5.3/scope.go:541 blocked connection from constraining edge {"scope": "conn-19667", "edge": "system", "direction": "Inbound", "usefd": false, "current": 100, "attempted": 1, "limit": 100, "stat": {"NumStreamsInbound":28,"NumStreamsOutbound":66,"NumConnsInbound":37,"NumConnsOutbound":63,"NumFD":33,"Memory":8687616}, "error": "system: cannot reserve connection: resource limit exceeded"}
+```
+
+The log line above is an example log line that gets emitted if you enable debug
+logging in the resource manager. You can do this by setting the environment
+variable `GOLOG_LOG_LEVEL="rcmgr=debug"`. By default only the error is
+returned to the caller, and nothing is logged by the resource manager itself.
+
+The log line message (and returned error) will tell you which resource limit was
+hit (connection in the log above) and what blocked it (in this case it was the
+system scope that blocked it). The log will also include some more information
+about the current usage of the resources. In the example log above, there is a
+limit of 100 connections, and you can see that we have 37 inbound connections
+and 63 outbound connections. We've reached the limit and the resource manager
+will block any further connections.
+
+The next step in debugging is seeing if this is a recurring problem or just a
+transient error. If it's a transient error it's okay to ignore it since the
+resource manager was doing its job in keeping resource usage under the limit. If
+it's recurring then you should understand what's causing you to hit these limits
+and either refactor your application or raise the limits.
+
+To check if it's a recurring problem you can count the number of times you've
+seen the `"resource limit exceeded"` error over time. You can also check the
+`rcmgr_blocked_resources` metric to see how many times the resource manager has
+blocked a resource over time.
+
+If the resource is blocked by a protocol-level scope, take a look at the various
+resource usages in the metrics. For example, if you run into a new stream being blocked,
+you can check the
+`rcmgr_streams` metric and the "Streams by protocol" graph in the Grafana
+dashboard (assuming you've set that up or something similar โย see
+[Monitoring](#monitoring)) to understand the usage pattern of that specific
+protocol. This can help answer questions such as: "Am I constantly around my
+limit?", "Does it make sense to raise my limit?", "Are there any patterns around
+hitting this limit?", and "should I refactor my protocol implementation?"
+
+## Monitoring
+
+Once you have limits set, you'll want to monitor to see if you're running into
+your limits often. This could be a sign that you need to raise your limits
+(your process is more intensive than you originally thought) or that you need
+to fix something in your application (surely you don't need over 1000 streams?).
+
+There are Prometheus metrics that can be hooked up to the resource manager. See
+`obs/stats_test.go` for an example on how to enable this, and `DefaultViews` in
+`stats.go` for recommended views. These metrics can be hooked up to Prometheus
+or any other platform that can scrape a prometheus endpoint.
+
+There is also an included Grafana dashboard to help kickstart your
+observability into the resource manager. Find more information about it at
+[here](./../../../dashboards/resource-manager/README.md).
+
+## Allowlisting multiaddrs to mitigate eclipse attacks
+
+If you have a set of trusted peers and IP addresses, you can use the resource
+manager's [Allowlist](./docs/allowlist.md) to protect yourself from eclipse
+attacks. The set of peers in the allowlist will have their own limits in case
+the normal limits are reached. This means you will always be able to connect to
+these trusted peers even if you've already reached your system limits.
+
+Look at `WithAllowlistedMultiaddrs` and its example in the GoDoc to learn more.
+
+## ConnManager vs Resource Manager
+
+go-libp2p already includes a [connection
+manager](https://pkg.go.dev/github.com/libp2p/go-libp2p/core/connmgr#ConnManager),
+so what's the difference between the `ConnManager` and the `ResourceManager`?
+
+ConnManager:
+1. Configured with a low and high watermark number of connections.
+2. Attempts to maintain the number of connections between the low and high
+ markers.
+3. Connections can be given metadata and weight (e.g. a hole punched
+ connection is more valuable than a connection to a publicly addressable
+ endpoint since it took more effort to make the hole punched connection).
+4. The ConnManager will trim connections once the high watermark is reached. and
+ trim down to the low watermark.
+5. Won't block adding another connection above the high watermark, but will
+ trigger the trim mentioned above.
+6. Can trim and prioritize connections with custom logic.
+7. No concept of scopes (like the resource manager).
+
+Resource Manager:
+1. Configured with limits on the number of outgoing and incoming connections at
+ different [resource scopes](#resource-scopes).
+2. Will block adding any more connections if any of the scope-specific limits would be exceeded.
+
+The natural question when comparing these two managers is "how do the watermarks
+and limits interact with each other?". The short answer is that they don't know
+about each other. This can lead to some surprising subtleties, such as the
+trimming never happening because the resource manager's limit is lower than the
+high watermark. This is confusing, and we'd like to fix it. The issue is
+captured in [go-libp2p#1640](https://github.com/libp2p/go-libp2p/issues/1640).
+
+When configuring the resource manager and connection manager, you should set the
+limits in the resource manager as your hard limits that you would never want to
+go over, and set the low/high watermarks as the range at which your application
+works best.
+
+## Examples
+
+Here we consider some concrete examples that can elucidate the abstract
+design as described so far.
+
+### Stream Lifetime
+
+Let's consider a stream and the limits that apply to it.
+When the stream scope is first opened, it is created by calling
+`ResourceManager.OpenStream`.
+
+Initially the stream is constrained by:
+- the system scope, where global hard limits apply.
+- the transient scope, where unnegotiated streams live.
+- the peer scope, where the limits for the peer at the other end of the stream
+ apply.
+
+Once the protocol has been negotiated, the protocol is set by calling
+`StreamManagementScope.SetProtocol`. The constraint from the
+transient scope is removed and the stream is now constrained by the
+protocol instead.
+
+More specifically, the following constraints apply:
+- the system scope, where global hard limits apply.
+- the peer scope, where the limits for the peer at the other end of the stream
+ apply.
+- the protocol scope, where the limits of the specific protocol used apply.
+
+The existence of the protocol limit allows us to implicitly constrain
+streams for services that have not been ported to the resource manager
+yet. Once the programmer attaches a stream to a service by calling
+`StreamScope.SetService`, the stream resources are aggregated and constrained
+by the service scope in addition to its protocol scope.
+
+More specifically the following constraints apply:
+- the system scope, where global hard limits apply.
+- the peer scope, where the limits for the peer at the other end of the stream
+ apply.
+- the service scope, where the limits of the specific service owning the stream apply.
+- the protocol scope, where the limits of the specific protocol for the stream apply.
+
+
+The resource transfer that happens in the `SetProtocol` and `SetService`
+gives the opportunity to the resource manager to gate the streams. If
+the transfer results in exceeding the scope limits, then a error
+indicating "resource limit exceeded" is returned. The wrapped error
+includes the name of the scope rejecting the resource acquisition to
+aid understanding of applicable limits. Note that the (wrapped) error
+implements `net.Error` and is marked as temporary, so that the
+programmer can handle by backoff retry.
+
+
+## Implementation Notes
+
+- The package only exports a constructor for the resource manager and
+ basic types for defining limits. Internals are not exposed.
+- Internally, there is a resources object that is embedded in every scope and
+ implements resource accounting.
+- There is a single implementation of a generic resource scope, that
+ provides all necessary interface methods.
+- There are concrete types for all canonical scopes, embedding a
+ pointer to a generic resource scope.
+- Peer and Protocol scopes, which may be created in response to
+ network events, are periodically garbage collected.
+
+## Design Considerations
+
+- The Resource Manager must account for basic resource usage at all
+ levels of the stack, from the internals to application components
+ that use the network facilities of libp2p.
+- Basic resources include memory, streams, connections, and file
+ descriptors. These account for both space and time used by
+ the stack, as each resource has a direct effect on the system
+ availability and performance.
+- The design must support seamless integration for user applications,
+ which should reap the benefits of resource management without any
+ changes. That is, existing applications should be oblivious of the
+ resource manager and transparently obtain limits which protects it
+ from resource exhaustion and OOM conditions.
+- At the same time, the design must support opt-in resource usage
+ accounting for applications that want to explicitly utilize the
+ facilities of the system to inform about and constrain their own
+ resource usage.
+- The design must allow the user to set their own limits, which can be
+ static (fixed) or dynamic.
diff --git a/p2p/host/resource-manager/allowlist.go b/p2p/host/resource-manager/allowlist.go
new file mode 100644
index 0000000000..d2bdb86935
--- /dev/null
+++ b/p2p/host/resource-manager/allowlist.go
@@ -0,0 +1,216 @@
+package rcmgr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type Allowlist struct {
+ mu sync.RWMutex
+ // a simple structure of lists of networks. There is probably a faster way
+ // to check if an IP address is in this network than iterating over this
+ // list, but this is good enough for small numbers of networks (<1_000).
+ // Analyze the benchmark before trying to optimize this.
+
+ // Any peer with these IPs are allowed
+ allowedNetworks []*net.IPNet
+
+ // Only the specified peers can use these IPs
+ allowedPeerByNetwork map[peer.ID][]*net.IPNet
+}
+
+// WithAllowlistedMultiaddrs sets the multiaddrs to be in the allowlist
+func WithAllowlistedMultiaddrs(mas []multiaddr.Multiaddr) Option {
+ return func(rm *resourceManager) error {
+ for _, ma := range mas {
+ err := rm.allowlist.Add(ma)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func newAllowlist() Allowlist {
+ return Allowlist{
+ allowedPeerByNetwork: make(map[peer.ID][]*net.IPNet),
+ }
+}
+
+func toIPNet(ma multiaddr.Multiaddr) (*net.IPNet, peer.ID, error) {
+ var ipString string
+ var mask string
+ var allowedPeerStr string
+ var allowedPeer peer.ID
+ var isIPV4 bool
+
+ multiaddr.ForEach(ma, func(c multiaddr.Component) bool {
+ if c.Protocol().Code == multiaddr.P_IP4 || c.Protocol().Code == multiaddr.P_IP6 {
+ isIPV4 = c.Protocol().Code == multiaddr.P_IP4
+ ipString = c.Value()
+ }
+ if c.Protocol().Code == multiaddr.P_IPCIDR {
+ mask = c.Value()
+ }
+ if c.Protocol().Code == multiaddr.P_P2P {
+ allowedPeerStr = c.Value()
+ }
+ return ipString == "" || mask == "" || allowedPeerStr == ""
+ })
+
+ if ipString == "" {
+ return nil, allowedPeer, errors.New("missing ip address")
+ }
+
+ if allowedPeerStr != "" {
+ var err error
+ allowedPeer, err = peer.Decode(allowedPeerStr)
+ if err != nil {
+ return nil, allowedPeer, fmt.Errorf("failed to decode allowed peer: %w", err)
+ }
+ }
+
+ if mask == "" {
+ ip := net.ParseIP(ipString)
+ if ip == nil {
+ return nil, allowedPeer, errors.New("invalid ip address")
+ }
+ var mask net.IPMask
+ if isIPV4 {
+ mask = net.CIDRMask(32, 32)
+ } else {
+ mask = net.CIDRMask(128, 128)
+ }
+
+ net := &net.IPNet{IP: ip, Mask: mask}
+ return net, allowedPeer, nil
+ }
+
+ _, ipnet, err := net.ParseCIDR(ipString + "/" + mask)
+ return ipnet, allowedPeer, err
+
+}
+
+// Add takes a multiaddr and adds it to the allowlist. The multiaddr should be
+// an ip address of the peer with or without a `/p2p` protocol.
+// e.g. /ip4/1.2.3.4/p2p/QmFoo, /ip4/1.2.3.4, and /ip4/1.2.3.0/ipcidr/24 are valid.
+// /p2p/QmFoo is not valid.
+func (al *Allowlist) Add(ma multiaddr.Multiaddr) error {
+ ipnet, allowedPeer, err := toIPNet(ma)
+ if err != nil {
+ return err
+ }
+ al.mu.Lock()
+ defer al.mu.Unlock()
+
+ if allowedPeer != peer.ID("") {
+ // We have a peerID constraint
+ if al.allowedPeerByNetwork == nil {
+ al.allowedPeerByNetwork = make(map[peer.ID][]*net.IPNet)
+ }
+ al.allowedPeerByNetwork[allowedPeer] = append(al.allowedPeerByNetwork[allowedPeer], ipnet)
+ } else {
+ al.allowedNetworks = append(al.allowedNetworks, ipnet)
+ }
+ return nil
+}
+
+func (al *Allowlist) Remove(ma multiaddr.Multiaddr) error {
+ ipnet, allowedPeer, err := toIPNet(ma)
+ if err != nil {
+ return err
+ }
+ al.mu.Lock()
+ defer al.mu.Unlock()
+
+ ipNetList := al.allowedNetworks
+
+ if allowedPeer != "" {
+ // We have a peerID constraint
+ ipNetList = al.allowedPeerByNetwork[allowedPeer]
+ }
+
+ if ipNetList == nil {
+ return nil
+ }
+
+ i := len(ipNetList)
+ for i > 0 {
+ i--
+ if ipNetList[i].IP.Equal(ipnet.IP) && bytes.Equal(ipNetList[i].Mask, ipnet.Mask) {
+ // swap remove
+ ipNetList[i] = ipNetList[len(ipNetList)-1]
+ ipNetList = ipNetList[:len(ipNetList)-1]
+ // We only remove one thing
+ break
+ }
+ }
+
+ if allowedPeer != "" {
+ al.allowedPeerByNetwork[allowedPeer] = ipNetList
+ } else {
+ al.allowedNetworks = ipNetList
+ }
+
+ return nil
+}
+
+func (al *Allowlist) Allowed(ma multiaddr.Multiaddr) bool {
+ ip, err := manet.ToIP(ma)
+ if err != nil {
+ return false
+ }
+ al.mu.RLock()
+ defer al.mu.RUnlock()
+
+ for _, network := range al.allowedNetworks {
+ if network.Contains(ip) {
+ return true
+ }
+ }
+
+ for _, allowedNetworks := range al.allowedPeerByNetwork {
+ for _, network := range allowedNetworks {
+ if network.Contains(ip) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+func (al *Allowlist) AllowedPeerAndMultiaddr(peerID peer.ID, ma multiaddr.Multiaddr) bool {
+ ip, err := manet.ToIP(ma)
+ if err != nil {
+ return false
+ }
+ al.mu.RLock()
+ defer al.mu.RUnlock()
+
+ for _, network := range al.allowedNetworks {
+ if network.Contains(ip) {
+ // We found a match that isn't constrained by a peerID
+ return true
+ }
+ }
+
+ if expectedNetworks, ok := al.allowedPeerByNetwork[peerID]; ok {
+ for _, expectedNetwork := range expectedNetworks {
+ if expectedNetwork.Contains(ip) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/p2p/host/resource-manager/allowlist_test.go b/p2p/host/resource-manager/allowlist_test.go
new file mode 100644
index 0000000000..d665b63ff8
--- /dev/null
+++ b/p2p/host/resource-manager/allowlist_test.go
@@ -0,0 +1,288 @@
+package rcmgr
+
+import (
+ "crypto/rand"
+ "fmt"
+ "net"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/test"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+func ExampleWithAllowlistedMultiaddrs() {
+ somePeer, err := test.RandPeerID()
+ if err != nil {
+ panic("Failed to generate somePeer")
+ }
+
+ limits := DefaultLimits.AutoScale()
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{
+ // Any peer connecting from this IP address
+ multiaddr.StringCast("/ip4/1.2.3.4"),
+ // Only the specified peer from this address
+ multiaddr.StringCast("/ip4/2.2.3.4/p2p/" + somePeer.String()),
+ // Only peers from this 1.2.3.0/24 IP address range
+ multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24"),
+ }))
+ if err != nil {
+ panic("Failed to start resource manager")
+ }
+
+ // Use rcmgr as before
+ _ = rcmgr
+}
+
+func TestAllowedSimple(t *testing.T) {
+ allowlist := newAllowlist()
+ ma := multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234")
+ err := allowlist.Add(ma)
+ if err != nil {
+ t.Fatalf("failed to add ip4: %s", err)
+ }
+
+ if !allowlist.Allowed(ma) {
+ t.Fatalf("addr should be allowed")
+ }
+}
+
+func TestAllowedWithPeer(t *testing.T) {
+ type testcase struct {
+ name string
+ allowlist []string
+ endpoint multiaddr.Multiaddr
+ peer peer.ID
+ // Is this endpoint allowed? (We don't have peer info yet)
+ isConnAllowed bool
+ // Is this peer + endpoint allowed?
+ isAllowedWithPeer bool
+ }
+
+ peerA := test.RandPeerIDFatal(t)
+ peerB := test.RandPeerIDFatal(t)
+ multiaddrA := multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234")
+ multiaddrB := multiaddr.StringCast("/ip4/2.2.3.4/tcp/1234")
+
+ testcases := []testcase{
+ {
+ name: "Blocked",
+ isConnAllowed: false,
+ isAllowedWithPeer: false,
+ allowlist: []string{"/ip4/1.2.3.1"},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "Blocked wrong peer",
+ isConnAllowed: true,
+ isAllowedWithPeer: false,
+ allowlist: []string{"/ip4/1.2.3.4" + "/p2p/" + peerB.String()},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "allowed on network",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.0/ipcidr/24"},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "Blocked peer not on network",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.0/ipcidr/24"},
+ endpoint: multiaddrA,
+ peer: peerA,
+ }, {
+ name: "allowed. right network, right peer",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.0/ipcidr/24" + "/p2p/" + peerA.String()},
+ endpoint: multiaddrA,
+ peer: peerA,
+ }, {
+ name: "allowed. right network, no peer",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.0/ipcidr/24"},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "Blocked. right network, wrong peer",
+ isConnAllowed: true,
+ isAllowedWithPeer: false,
+ allowlist: []string{"/ip4/1.2.3.0/ipcidr/24" + "/p2p/" + peerB.String()},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "allowed peer any ip",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/0.0.0.0/ipcidr/0"},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "allowed peer multiple ips in allowlist",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.4/p2p/" + peerA.String(), "/ip4/2.2.3.4/p2p/" + peerA.String()},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "allowed peer multiple ips in allowlist",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.4/p2p/" + peerA.String(), "/ip4/1.2.3.4/p2p/" + peerA.String()},
+ endpoint: multiaddrA,
+ peer: peerA,
+ },
+ {
+ name: "allowed peer multiple ips in allowlist",
+ isConnAllowed: true,
+ isAllowedWithPeer: true,
+ allowlist: []string{"/ip4/1.2.3.4/p2p/" + peerA.String(), "/ip4/2.2.3.4/p2p/" + peerA.String()},
+ endpoint: multiaddrB,
+ peer: peerA,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ allowlist := newAllowlist()
+ for _, maStr := range tc.allowlist {
+ ma, err := multiaddr.NewMultiaddr(maStr)
+ if err != nil {
+ fmt.Printf("failed to parse multiaddr: %s", err)
+ }
+ allowlist.Add(ma)
+ }
+
+ if allowlist.Allowed(tc.endpoint) != tc.isConnAllowed {
+ t.Fatalf("%v: expected %v", !tc.isConnAllowed, tc.isConnAllowed)
+ }
+
+ if allowlist.AllowedPeerAndMultiaddr(tc.peer, tc.endpoint) != tc.isAllowedWithPeer {
+ t.Fatalf("%v: expected %v", !tc.isAllowedWithPeer, tc.isAllowedWithPeer)
+ }
+ })
+ }
+
+}
+
+func TestRemoved(t *testing.T) {
+ type testCase struct {
+ name string
+ allowedMA string
+ }
+ peerA := test.RandPeerIDFatal(t)
+ maA := multiaddr.StringCast("/ip4/1.2.3.4")
+
+ testCases := []testCase{
+ {name: "ip4", allowedMA: "/ip4/1.2.3.4"},
+ {name: "ip4 with peer", allowedMA: "/ip4/1.2.3.4/p2p/" + peerA.String()},
+ {name: "ip4 network", allowedMA: "/ip4/0.0.0.0/ipcidr/0"},
+ {name: "ip4 network with peer", allowedMA: "/ip4/0.0.0.0/ipcidr/0/p2p/" + peerA.String()},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ allowlist := newAllowlist()
+ ma := multiaddr.StringCast(tc.allowedMA)
+
+ err := allowlist.Add(ma)
+ if err != nil {
+ t.Fatalf("failed to add ip4: %s", err)
+ }
+
+ if !allowlist.AllowedPeerAndMultiaddr(peerA, maA) {
+ t.Fatalf("addr should be allowed")
+ }
+
+ allowlist.Remove((ma))
+
+ if allowlist.AllowedPeerAndMultiaddr(peerA, maA) {
+ t.Fatalf("addr should not be allowed")
+ }
+ })
+ }
+}
+
+// BenchmarkAllowlistCheck benchmarks the allowlist with plausible conditions.
+func BenchmarkAllowlistCheck(b *testing.B) {
+ allowlist := newAllowlist()
+
+ // How often do we expect a peer to be specified? 1 in N
+ ratioOfSpecifiedPeers := 10
+
+ // How often do we expect an allowlist hit? 1 in N
+ ratioOfAllowlistHit := 100
+
+ // How many multiaddrs in our allowlist?
+ howManyMultiaddrsInAllowList := 1_000
+
+ // How often is the IP addr an IPV6? 1 in N
+ ratioOfIPV6 := 20
+
+ countOfTotalPeersForTest := 100_000
+
+ mas := make([]multiaddr.Multiaddr, countOfTotalPeersForTest)
+ for i := 0; i < countOfTotalPeersForTest; i++ {
+
+ ip := make([]byte, 16)
+ n, err := rand.Reader.Read(ip)
+ if err != nil || n != 16 {
+ b.Fatalf("Failed to generate IP address")
+ }
+
+ var ipString string
+
+ if i%ratioOfIPV6 == 0 {
+ // IPv6
+ ip6 := net.IP(ip)
+ ipString = "/ip6/" + ip6.String()
+ } else {
+ // IPv4
+ ip4 := net.IPv4(ip[0], ip[1], ip[2], ip[3])
+ ipString = "/ip4/" + ip4.String()
+ }
+
+ var ma multiaddr.Multiaddr
+ if i%ratioOfSpecifiedPeers == 0 {
+ ma = multiaddr.StringCast(ipString + "/p2p/" + test.RandPeerIDFatal(b).String())
+ } else {
+ ma = multiaddr.StringCast(ipString)
+ }
+ if err != nil {
+ b.Fatalf("Failed to generate multiaddr: %v", ipString)
+ }
+
+ mas[i] = ma
+ }
+
+ for _, ma := range mas[:howManyMultiaddrsInAllowList] {
+ err := allowlist.Add(ma)
+ if err != nil {
+ b.Fatalf("Failed to add multiaddr")
+ }
+ }
+
+ masInAllowList := mas[:howManyMultiaddrsInAllowList]
+ masNotInAllowList := mas[howManyMultiaddrsInAllowList:]
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ if n%ratioOfAllowlistHit == 0 {
+ allowlist.Allowed(masInAllowList[n%len(masInAllowList)])
+ } else {
+ allowlist.Allowed(masNotInAllowList[n%len(masNotInAllowList)])
+ }
+ }
+}
diff --git a/p2p/host/resource-manager/conn_limiter.go b/p2p/host/resource-manager/conn_limiter.go
new file mode 100644
index 0000000000..5c25627464
--- /dev/null
+++ b/p2p/host/resource-manager/conn_limiter.go
@@ -0,0 +1,343 @@
+package rcmgr
+
+import (
+ "math"
+ "net/netip"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/x/rate"
+)
+
+type ConnLimitPerSubnet struct {
+ // This defines how big the subnet is. For example, a /24 subnet has a
+ // PrefixLength of 24. All IPs that share the same 24 bit prefix are in the same
+ // subnet. Are in the same subnet, and bound to the same limit.
+ PrefixLength int
+ // The maximum number of connections allowed for each subnet.
+ ConnCount int
+}
+
+type NetworkPrefixLimit struct {
+ // The Network prefix for which this limit applies.
+ Network netip.Prefix
+
+ // The maximum number of connections allowed for this subnet.
+ ConnCount int
+}
+
+// 8 for now so that it matches the number of concurrent dials we may do
+// in swarm_dial.go. With future smart dialing work we should bring this
+// down
+var defaultMaxConcurrentConns = 8
+
+var defaultIP4Limit = ConnLimitPerSubnet{
+ ConnCount: defaultMaxConcurrentConns,
+ PrefixLength: 32,
+}
+var defaultIP6Limits = []ConnLimitPerSubnet{
+ {
+ ConnCount: defaultMaxConcurrentConns,
+ PrefixLength: 56,
+ },
+ {
+ ConnCount: 8 * defaultMaxConcurrentConns,
+ PrefixLength: 48,
+ },
+}
+
+var DefaultNetworkPrefixLimitV4 = sortNetworkPrefixes([]NetworkPrefixLimit{
+ {
+ // Loopback address for v4 https://datatracker.ietf.org/doc/html/rfc6890#section-2.2.2
+ Network: netip.MustParsePrefix("127.0.0.0/8"),
+ ConnCount: math.MaxInt, // Unlimited
+ },
+})
+var DefaultNetworkPrefixLimitV6 = sortNetworkPrefixes([]NetworkPrefixLimit{
+ {
+ // Loopback address for v6 https://datatracker.ietf.org/doc/html/rfc6890#section-2.2.3
+ Network: netip.MustParsePrefix("::1/128"),
+ ConnCount: math.MaxInt, // Unlimited
+ },
+})
+
+// Network prefixes limits must be sorted by most specific to least specific. This lets us
+// actually use the more specific limits, otherwise only the less specific ones
+// would be matched. e.g. 1.2.3.0/24 must come before 1.2.0.0/16.
+func sortNetworkPrefixes(limits []NetworkPrefixLimit) []NetworkPrefixLimit {
+ slices.SortStableFunc(limits, func(a, b NetworkPrefixLimit) int {
+ return b.Network.Bits() - a.Network.Bits()
+ })
+ return limits
+}
+
+// WithNetworkPrefixLimit sets the limits for the number of connections allowed
+// for a specific Network Prefix. Use this when you want to set higher limits
+// for a specific subnet than the default limit per subnet.
+func WithNetworkPrefixLimit(ipv4 []NetworkPrefixLimit, ipv6 []NetworkPrefixLimit) Option {
+ return func(rm *resourceManager) error {
+ if ipv4 != nil {
+ rm.connLimiter.networkPrefixLimitV4 = sortNetworkPrefixes(ipv4)
+ }
+ if ipv6 != nil {
+ rm.connLimiter.networkPrefixLimitV6 = sortNetworkPrefixes(ipv6)
+ }
+ return nil
+ }
+}
+
+// WithLimitPerSubnet sets the limits for the number of connections allowed per
+// subnet. This will limit the number of connections per subnet if that subnet
+// is not defined in the NetworkPrefixLimit option. Think of this as a default
+// limit for any given subnet.
+func WithLimitPerSubnet(ipv4 []ConnLimitPerSubnet, ipv6 []ConnLimitPerSubnet) Option {
+ return func(rm *resourceManager) error {
+ if ipv4 != nil {
+ rm.connLimiter.connLimitPerSubnetV4 = ipv4
+ }
+ if ipv6 != nil {
+ rm.connLimiter.connLimitPerSubnetV6 = ipv6
+ }
+ return nil
+ }
+}
+
+type connLimiter struct {
+ mu sync.Mutex
+
+ // Specific Network Prefix limits. If these are set, they take precedence over the
+ // subnet limits.
+ // These must be sorted by most specific to least specific.
+ networkPrefixLimitV4 []NetworkPrefixLimit
+ networkPrefixLimitV6 []NetworkPrefixLimit
+ connsPerNetworkPrefixV4 []int
+ connsPerNetworkPrefixV6 []int
+
+ // Subnet limits.
+ connLimitPerSubnetV4 []ConnLimitPerSubnet
+ connLimitPerSubnetV6 []ConnLimitPerSubnet
+ ip4connsPerLimit []map[netip.Prefix]int
+ ip6connsPerLimit []map[netip.Prefix]int
+}
+
+func newConnLimiter() *connLimiter {
+ return &connLimiter{
+ networkPrefixLimitV4: DefaultNetworkPrefixLimitV4,
+ networkPrefixLimitV6: DefaultNetworkPrefixLimitV6,
+
+ connLimitPerSubnetV4: []ConnLimitPerSubnet{defaultIP4Limit},
+ connLimitPerSubnetV6: defaultIP6Limits,
+ }
+}
+
+func (cl *connLimiter) addNetworkPrefixLimit(isIP6 bool, npLimit NetworkPrefixLimit) {
+ cl.mu.Lock()
+ defer cl.mu.Unlock()
+ if isIP6 {
+ cl.networkPrefixLimitV6 = append(cl.networkPrefixLimitV6, npLimit)
+ cl.networkPrefixLimitV6 = sortNetworkPrefixes(cl.networkPrefixLimitV6)
+ } else {
+ cl.networkPrefixLimitV4 = append(cl.networkPrefixLimitV4, npLimit)
+ cl.networkPrefixLimitV4 = sortNetworkPrefixes(cl.networkPrefixLimitV4)
+ }
+}
+
+// addConn adds a connection for the given IP address. It returns true if the connection is allowed.
+func (cl *connLimiter) addConn(ip netip.Addr) bool {
+ cl.mu.Lock()
+ defer cl.mu.Unlock()
+ networkPrefixLimits := cl.networkPrefixLimitV4
+ connsPerNetworkPrefix := cl.connsPerNetworkPrefixV4
+ limits := cl.connLimitPerSubnetV4
+ connsPerLimit := cl.ip4connsPerLimit
+ isIP6 := ip.Is6()
+ if isIP6 {
+ networkPrefixLimits = cl.networkPrefixLimitV6
+ connsPerNetworkPrefix = cl.connsPerNetworkPrefixV6
+ limits = cl.connLimitPerSubnetV6
+ connsPerLimit = cl.ip6connsPerLimit
+ }
+
+ // Check Network Prefix limits first
+ if len(connsPerNetworkPrefix) == 0 && len(networkPrefixLimits) > 0 {
+ // Initialize the counts
+ connsPerNetworkPrefix = make([]int, len(networkPrefixLimits))
+ if isIP6 {
+ cl.connsPerNetworkPrefixV6 = connsPerNetworkPrefix
+ } else {
+ cl.connsPerNetworkPrefixV4 = connsPerNetworkPrefix
+ }
+ }
+
+ for i, limit := range networkPrefixLimits {
+ if limit.Network.Contains(ip) {
+ if connsPerNetworkPrefix[i]+1 > limit.ConnCount {
+ return false
+ }
+ connsPerNetworkPrefix[i]++
+ // Done. If we find a match in the network prefix limits, we use
+ // that and don't use the general subnet limits.
+ return true
+ }
+ }
+
+ if len(connsPerLimit) == 0 && len(limits) > 0 {
+ connsPerLimit = make([]map[netip.Prefix]int, len(limits))
+ if isIP6 {
+ cl.ip6connsPerLimit = connsPerLimit
+ } else {
+ cl.ip4connsPerLimit = connsPerLimit
+ }
+ }
+
+ for i, limit := range limits {
+ prefix, err := ip.Prefix(limit.PrefixLength)
+ if err != nil {
+ return false
+ }
+ counts, ok := connsPerLimit[i][prefix]
+ if !ok {
+ if connsPerLimit[i] == nil {
+ connsPerLimit[i] = make(map[netip.Prefix]int)
+ }
+ connsPerLimit[i][prefix] = 0
+ }
+ if counts+1 > limit.ConnCount {
+ return false
+ }
+ }
+
+ // All limit checks passed, now we update the counts
+ for i, limit := range limits {
+ prefix, _ := ip.Prefix(limit.PrefixLength)
+ connsPerLimit[i][prefix]++
+ }
+
+ return true
+}
+
+func (cl *connLimiter) rmConn(ip netip.Addr) {
+ cl.mu.Lock()
+ defer cl.mu.Unlock()
+ networkPrefixLimits := cl.networkPrefixLimitV4
+ connsPerNetworkPrefix := cl.connsPerNetworkPrefixV4
+ limits := cl.connLimitPerSubnetV4
+ connsPerLimit := cl.ip4connsPerLimit
+ isIP6 := ip.Is6()
+ if isIP6 {
+ networkPrefixLimits = cl.networkPrefixLimitV6
+ connsPerNetworkPrefix = cl.connsPerNetworkPrefixV6
+ limits = cl.connLimitPerSubnetV6
+ connsPerLimit = cl.ip6connsPerLimit
+ }
+
+ // Check NetworkPrefix limits first
+ if len(connsPerNetworkPrefix) == 0 && len(networkPrefixLimits) > 0 {
+ // Initialize just in case. We should have already initialized in
+ // addConn, but if the callers calls rmConn first we don't want to panic
+ connsPerNetworkPrefix = make([]int, len(networkPrefixLimits))
+ if isIP6 {
+ cl.connsPerNetworkPrefixV6 = connsPerNetworkPrefix
+ } else {
+ cl.connsPerNetworkPrefixV4 = connsPerNetworkPrefix
+ }
+ }
+ for i, limit := range networkPrefixLimits {
+ if limit.Network.Contains(ip) {
+ count := connsPerNetworkPrefix[i]
+ if count <= 0 {
+ log.Error("unexpected conn count for ip. Was this not added with addConn first?", "ip", ip)
+ return
+ }
+ connsPerNetworkPrefix[i]--
+ // Done. We updated the count in the defined network prefix limit.
+ return
+ }
+ }
+
+ if len(connsPerLimit) == 0 && len(limits) > 0 {
+ // Initialize just in case. We should have already initialized in
+ // addConn, but if the callers calls rmConn first we don't want to panic
+ connsPerLimit = make([]map[netip.Prefix]int, len(limits))
+ if isIP6 {
+ cl.ip6connsPerLimit = connsPerLimit
+ } else {
+ cl.ip4connsPerLimit = connsPerLimit
+ }
+ }
+
+ for i, limit := range limits {
+ prefix, err := ip.Prefix(limit.PrefixLength)
+ if err != nil {
+ // Unexpected since we should have seen this IP before in addConn
+ log.Error("unexpected error getting prefix", "err", err)
+ continue
+ }
+ counts, ok := connsPerLimit[i][prefix]
+ if !ok || counts == 0 {
+ // Unexpected, but don't panic
+ log.Error("unexpected conn count", "prefix", prefix, "ok", ok, "count", counts)
+ continue
+ }
+ connsPerLimit[i][prefix]--
+ if connsPerLimit[i][prefix] <= 0 {
+ delete(connsPerLimit[i], prefix)
+ }
+ }
+}
+
+// handshakeDuration is a higher end estimate of QUIC handshake time
+const handshakeDuration = 5 * time.Second
+
+// sourceAddressRPS is the refill rate for the source address verification rate limiter.
+// A spoofed address if not verified will take a connLimiter token for handshakeDuration.
+// Slow refill rate here favours increasing latency(because of address verification) in
+// exchange for reducing the chances of spoofing successfully causing a DoS.
+const sourceAddressRPS = float64(1.0*time.Second) / (2 * float64(handshakeDuration))
+
+// newVerifySourceAddressRateLimiter returns a rate limiter for verifying source addresses.
+// The returned limiter allows maxAllowedConns / 2 unverified addresses to begin handshake.
+// This ensures that in the event someone is spoofing IPs, 1/2 the maximum allowed connections
+// will be able to connect, although they will have increased latency because of address
+// verification.
+func newVerifySourceAddressRateLimiter(cl *connLimiter) *rate.Limiter {
+ networkPrefixLimits := make([]rate.PrefixLimit, 0, len(cl.networkPrefixLimitV4)+len(cl.networkPrefixLimitV6))
+ for _, l := range cl.networkPrefixLimitV4 {
+ networkPrefixLimits = append(networkPrefixLimits, rate.PrefixLimit{
+ Prefix: l.Network,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: l.ConnCount / 2},
+ })
+ }
+ for _, l := range cl.networkPrefixLimitV6 {
+ networkPrefixLimits = append(networkPrefixLimits, rate.PrefixLimit{
+ Prefix: l.Network,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: l.ConnCount / 2},
+ })
+ }
+
+ ipv4SubnetLimits := make([]rate.SubnetLimit, 0, len(cl.connLimitPerSubnetV4))
+ for _, l := range cl.connLimitPerSubnetV4 {
+ ipv4SubnetLimits = append(ipv4SubnetLimits, rate.SubnetLimit{
+ PrefixLength: l.PrefixLength,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: l.ConnCount / 2},
+ })
+ }
+
+ ipv6SubnetLimits := make([]rate.SubnetLimit, 0, len(cl.connLimitPerSubnetV6))
+ for _, l := range cl.connLimitPerSubnetV6 {
+ ipv6SubnetLimits = append(ipv6SubnetLimits, rate.SubnetLimit{
+ PrefixLength: l.PrefixLength,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: l.ConnCount / 2},
+ })
+ }
+
+ return &rate.Limiter{
+ NetworkPrefixLimits: networkPrefixLimits,
+ SubnetRateLimiter: rate.SubnetLimiter{
+ IPv4SubnetLimits: ipv4SubnetLimits,
+ IPv6SubnetLimits: ipv6SubnetLimits,
+ GracePeriod: 1 * time.Minute,
+ },
+ }
+}
diff --git a/p2p/host/resource-manager/conn_limiter_test.go b/p2p/host/resource-manager/conn_limiter_test.go
new file mode 100644
index 0000000000..d86f9d7a39
--- /dev/null
+++ b/p2p/host/resource-manager/conn_limiter_test.go
@@ -0,0 +1,392 @@
+package rcmgr
+
+import (
+ "encoding/binary"
+ "net"
+ "net/netip"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/x/rate"
+ "github.com/stretchr/testify/require"
+)
+
+func TestItLimits(t *testing.T) {
+ t.Run("IPv4", func(t *testing.T) {
+ ip, err := netip.ParseAddr("1.2.3.4")
+ require.NoError(t, err)
+ cl := newConnLimiter()
+ cl.connLimitPerSubnetV4[0].ConnCount = 1
+ require.True(t, cl.addConn(ip))
+
+ // should fail the second time
+ require.False(t, cl.addConn(ip))
+
+ otherIP, err := netip.ParseAddr("1.2.3.5")
+ require.NoError(t, err)
+ require.True(t, cl.addConn(otherIP))
+ })
+
+ t.Run("IPv4 removal", func(t *testing.T) {
+ ip, err := netip.ParseAddr("1.2.3.4")
+ require.NoError(t, err)
+ cl := newConnLimiter()
+ cl.connLimitPerSubnetV4[0].ConnCount = 1
+ require.True(t, cl.addConn(ip))
+
+ // should fail the second time
+ require.False(t, cl.addConn(ip))
+ // remove the connection
+ cl.rmConn(ip)
+ // should succeed now
+ require.True(t, cl.addConn(ip))
+ })
+
+ t.Run("IPv6", func(t *testing.T) {
+ ip, err := netip.ParseAddr("1:2:3:4::1")
+ require.NoError(t, err)
+ cl := newConnLimiter()
+ original := cl.connLimitPerSubnetV6[0].ConnCount
+ cl.connLimitPerSubnetV6[0].ConnCount = 1
+ defer func() {
+ cl.connLimitPerSubnetV6[0].ConnCount = original
+ }()
+ require.True(t, cl.addConn(ip))
+
+ // should fail the second time
+ require.False(t, cl.addConn(ip))
+ otherIPSameSubnet := netip.MustParseAddr("1:2:3:4::2")
+ require.False(t, cl.addConn(otherIPSameSubnet))
+
+ otherIP := netip.MustParseAddr("2:2:3:4::2")
+ require.True(t, cl.addConn(otherIP))
+ })
+
+ t.Run("IPv6 with multiple limits", func(t *testing.T) {
+ cl := newConnLimiter()
+ for i := 0; i < defaultMaxConcurrentConns; i++ {
+ ip := net.ParseIP("ff:2:3:4::1")
+ binary.BigEndian.PutUint16(ip[14:], uint16(i))
+ ipAddr := netip.MustParseAddr(ip.String())
+ require.True(t, cl.addConn(ipAddr))
+ }
+
+ // Next one should fail
+ ip := net.ParseIP("ff:2:3:4::1")
+ binary.BigEndian.PutUint16(ip[14:], uint16(defaultMaxConcurrentConns+1))
+ require.False(t, cl.addConn(netip.MustParseAddr(ip.String())))
+
+ // But on a different root subnet should work
+ otherIP := netip.MustParseAddr("ffef:2:3::1")
+ require.True(t, cl.addConn(otherIP))
+
+ // But too many on the next subnet limit will fail too
+ for i := 0; i < defaultMaxConcurrentConns*8; i++ {
+ ip := net.ParseIP("ffef:2:3:4::1")
+ binary.BigEndian.PutUint16(ip[5:7], uint16(i))
+ ipAddr := netip.MustParseAddr(ip.String())
+ require.True(t, cl.addConn(ipAddr))
+ }
+
+ ip = net.ParseIP("ffef:2:3:4::1")
+ binary.BigEndian.PutUint16(ip[5:7], uint16(defaultMaxConcurrentConns*8+1))
+ ipAddr := netip.MustParseAddr(ip.String())
+ require.False(t, cl.addConn(ipAddr))
+ })
+
+ t.Run("IPv4 with localhost", func(t *testing.T) {
+ cl := &connLimiter{
+ networkPrefixLimitV4: DefaultNetworkPrefixLimitV4,
+ connLimitPerSubnetV4: []ConnLimitPerSubnet{
+ {PrefixLength: 0, ConnCount: 1}, // 1 connection for the whole IPv4 space
+ },
+ }
+
+ ip := netip.MustParseAddr("1.2.3.4")
+ require.True(t, cl.addConn(ip))
+
+ ip = netip.MustParseAddr("4.3.2.1")
+ // should fail the second time, we only allow 1 connection for the whole IPv4 space
+ require.False(t, cl.addConn(ip))
+
+ ip = netip.MustParseAddr("127.0.0.1")
+ // Succeeds because we defined an explicit limit for the loopback subnet
+ require.True(t, cl.addConn(ip))
+ })
+}
+
+func genIP(data *[]byte) (netip.Addr, bool) {
+ if len(*data) < 1 {
+ return netip.Addr{}, false
+ }
+
+ genIP6 := (*data)[0]&0x01 == 1
+ bytesRequired := 4
+ if genIP6 {
+ bytesRequired = 16
+ }
+
+ if len((*data)[1:]) < bytesRequired {
+ return netip.Addr{}, false
+ }
+
+ *data = (*data)[1:]
+ ip, ok := netip.AddrFromSlice((*data)[:bytesRequired])
+ *data = (*data)[bytesRequired:]
+ return ip, ok
+}
+
+func FuzzConnLimiter(f *testing.F) {
+ // The goal is to try to enter a state where the count is incorrectly 0
+ f.Fuzz(func(t *testing.T, data []byte) {
+ ips := make([]netip.Addr, 0, len(data)/5)
+ for {
+ ip, ok := genIP(&data)
+ if !ok {
+ break
+ }
+ ips = append(ips, ip)
+ }
+
+ cl := newConnLimiter()
+ addedConns := make([]netip.Addr, 0, len(ips))
+ for _, ip := range ips {
+ if cl.addConn(ip) {
+ addedConns = append(addedConns, ip)
+ }
+ }
+
+ addedCount := 0
+ for _, ip := range cl.ip4connsPerLimit {
+ for _, count := range ip {
+ addedCount += count
+ }
+ }
+ for _, ip := range cl.ip6connsPerLimit {
+ for _, count := range ip {
+ addedCount += count
+ }
+ }
+ for _, count := range cl.connsPerNetworkPrefixV4 {
+ addedCount += count
+ }
+ for _, count := range cl.connsPerNetworkPrefixV6 {
+ addedCount += count
+ }
+ if addedCount == 0 && len(addedConns) > 0 {
+ t.Fatalf("added count: %d", addedCount)
+ }
+
+ for _, ip := range addedConns {
+ cl.rmConn(ip)
+ }
+
+ leftoverCount := 0
+ for _, ip := range cl.ip4connsPerLimit {
+ for _, count := range ip {
+ leftoverCount += count
+ }
+ }
+ for _, ip := range cl.ip6connsPerLimit {
+ for _, count := range ip {
+ leftoverCount += count
+ }
+ }
+ for _, count := range cl.connsPerNetworkPrefixV4 {
+ addedCount += count
+ }
+ for _, count := range cl.connsPerNetworkPrefixV6 {
+ addedCount += count
+ }
+ if leftoverCount != 0 {
+ t.Fatalf("leftover count: %d", leftoverCount)
+ }
+ })
+}
+
+func TestSortedNetworkPrefixLimits(t *testing.T) {
+ npLimits := []NetworkPrefixLimit{
+ {
+ Network: netip.MustParsePrefix("1.2.0.0/16"),
+ },
+ {
+ Network: netip.MustParsePrefix("1.2.3.0/28"),
+ },
+ {
+ Network: netip.MustParsePrefix("1.2.3.4/32"),
+ },
+ }
+ npLimits = sortNetworkPrefixes(npLimits)
+ sorted := []NetworkPrefixLimit{
+ {
+ Network: netip.MustParsePrefix("1.2.3.4/32"),
+ },
+ {
+ Network: netip.MustParsePrefix("1.2.3.0/28"),
+ },
+ {
+ Network: netip.MustParsePrefix("1.2.0.0/16"),
+ },
+ }
+ require.EqualValues(t, sorted, npLimits)
+}
+
+func TestNewVerifySourceAddressRateLimiter(t *testing.T) {
+ testCases := []struct {
+ name string
+ cl *connLimiter
+ expected *rate.Limiter
+ }{
+ {
+ name: "basic configuration",
+ cl: &connLimiter{
+ networkPrefixLimitV4: []NetworkPrefixLimit{
+ {
+ Network: netip.MustParsePrefix("192.168.0.0/16"),
+ ConnCount: 10,
+ },
+ },
+ networkPrefixLimitV6: []NetworkPrefixLimit{
+ {
+ Network: netip.MustParsePrefix("2001:db8::/32"),
+ ConnCount: 20,
+ },
+ },
+ connLimitPerSubnetV4: []ConnLimitPerSubnet{
+ {
+ PrefixLength: 24,
+ ConnCount: 5,
+ },
+ },
+ connLimitPerSubnetV6: []ConnLimitPerSubnet{
+ {
+ PrefixLength: 56,
+ ConnCount: 8,
+ },
+ },
+ },
+ expected: &rate.Limiter{
+ NetworkPrefixLimits: []rate.PrefixLimit{
+ {
+ Prefix: netip.MustParsePrefix("192.168.0.0/16"),
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 5},
+ },
+ {
+ Prefix: netip.MustParsePrefix("2001:db8::/32"),
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 10},
+ },
+ },
+ SubnetRateLimiter: rate.SubnetLimiter{
+ IPv4SubnetLimits: []rate.SubnetLimit{
+ {
+ PrefixLength: 24,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 2},
+ },
+ },
+ IPv6SubnetLimits: []rate.SubnetLimit{
+ {
+ PrefixLength: 56,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 4},
+ },
+ },
+ GracePeriod: 1 * time.Minute,
+ },
+ },
+ },
+ {
+ name: "empty configuration",
+ cl: &connLimiter{},
+ expected: &rate.Limiter{
+ NetworkPrefixLimits: []rate.PrefixLimit{},
+ SubnetRateLimiter: rate.SubnetLimiter{
+ IPv4SubnetLimits: []rate.SubnetLimit{},
+ IPv6SubnetLimits: []rate.SubnetLimit{},
+ GracePeriod: 1 * time.Minute,
+ },
+ },
+ },
+ {
+ name: "multiple network prefixes",
+ cl: &connLimiter{
+ networkPrefixLimitV4: []NetworkPrefixLimit{
+ {
+ Network: netip.MustParsePrefix("192.168.0.0/16"),
+ ConnCount: 10,
+ },
+ {
+ Network: netip.MustParsePrefix("10.0.0.0/8"),
+ ConnCount: 20,
+ },
+ },
+ connLimitPerSubnetV4: []ConnLimitPerSubnet{
+ {
+ PrefixLength: 24,
+ ConnCount: 5,
+ },
+ {
+ PrefixLength: 16,
+ ConnCount: 10,
+ },
+ },
+ },
+ expected: &rate.Limiter{
+ NetworkPrefixLimits: []rate.PrefixLimit{
+ {
+ Prefix: netip.MustParsePrefix("192.168.0.0/16"),
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 5},
+ },
+ {
+ Prefix: netip.MustParsePrefix("10.0.0.0/8"),
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 10},
+ },
+ },
+ SubnetRateLimiter: rate.SubnetLimiter{
+ IPv4SubnetLimits: []rate.SubnetLimit{
+ {
+ PrefixLength: 24,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 2},
+ },
+ {
+ PrefixLength: 16,
+ Limit: rate.Limit{RPS: sourceAddressRPS, Burst: 5},
+ },
+ },
+ IPv6SubnetLimits: []rate.SubnetLimit{},
+ GracePeriod: 1 * time.Minute,
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ actual := newVerifySourceAddressRateLimiter(tc.cl)
+
+ require.Equal(t, len(tc.expected.NetworkPrefixLimits), len(actual.NetworkPrefixLimits))
+ for i, expected := range tc.expected.NetworkPrefixLimits {
+ actual := actual.NetworkPrefixLimits[i]
+ require.Equal(t, expected.Prefix, actual.Prefix)
+ require.Equal(t, expected.RPS, actual.RPS)
+ require.Equal(t, expected.Burst, actual.Burst)
+ }
+
+ require.Equal(t, len(tc.expected.SubnetRateLimiter.IPv4SubnetLimits), len(actual.SubnetRateLimiter.IPv4SubnetLimits))
+ for i, expected := range tc.expected.SubnetRateLimiter.IPv4SubnetLimits {
+ actual := actual.SubnetRateLimiter.IPv4SubnetLimits[i]
+ require.Equal(t, expected.PrefixLength, actual.PrefixLength)
+ require.Equal(t, expected.RPS, actual.RPS)
+ require.Equal(t, expected.Burst, actual.Burst)
+ }
+
+ require.Equal(t, len(tc.expected.SubnetRateLimiter.IPv6SubnetLimits), len(actual.SubnetRateLimiter.IPv6SubnetLimits))
+ for i, expected := range tc.expected.SubnetRateLimiter.IPv6SubnetLimits {
+ actual := actual.SubnetRateLimiter.IPv6SubnetLimits[i]
+ require.Equal(t, expected.PrefixLength, actual.PrefixLength)
+ require.Equal(t, expected.RPS, actual.RPS)
+ require.Equal(t, expected.Burst, actual.Burst)
+ }
+
+ require.Equal(t, tc.expected.SubnetRateLimiter.GracePeriod, actual.SubnetRateLimiter.GracePeriod)
+ })
+ }
+}
diff --git a/p2p/host/resource-manager/conn_rate_limiter.go b/p2p/host/resource-manager/conn_rate_limiter.go
new file mode 100644
index 0000000000..1f44edffe3
--- /dev/null
+++ b/p2p/host/resource-manager/conn_rate_limiter.go
@@ -0,0 +1,59 @@
+package rcmgr
+
+import (
+ "net/netip"
+ "time"
+
+ "github.com/libp2p/go-libp2p/x/rate"
+)
+
+var defaultIPv4SubnetLimits = []rate.SubnetLimit{
+ {
+ PrefixLength: 32,
+ Limit: rate.Limit{RPS: 0.2, Burst: 2 * defaultMaxConcurrentConns},
+ },
+}
+
+var defaultIPv6SubnetLimits = []rate.SubnetLimit{
+ {
+ PrefixLength: 56,
+ Limit: rate.Limit{RPS: 0.2, Burst: 2 * defaultMaxConcurrentConns},
+ },
+ {
+ PrefixLength: 48,
+ Limit: rate.Limit{RPS: 0.5, Burst: 10 * defaultMaxConcurrentConns},
+ },
+}
+
+// defaultNetworkPrefixLimits ensure that all connections on localhost always succeed
+var defaultNetworkPrefixLimits = []rate.PrefixLimit{
+ {
+ Prefix: netip.MustParsePrefix("127.0.0.0/8"),
+ Limit: rate.Limit{},
+ },
+ {
+ Prefix: netip.MustParsePrefix("::1/128"),
+ Limit: rate.Limit{},
+ },
+}
+
+// WithConnRateLimiters sets a custom rate limiter for new connections.
+// connRateLimiter is used for OpenConnection calls
+func WithConnRateLimiters(connRateLimiter *rate.Limiter) Option {
+ return func(rm *resourceManager) error {
+ rm.connRateLimiter = connRateLimiter
+ return nil
+ }
+}
+
+func newConnRateLimiter() *rate.Limiter {
+ return &rate.Limiter{
+ NetworkPrefixLimits: defaultNetworkPrefixLimits,
+ GlobalLimit: rate.Limit{},
+ SubnetRateLimiter: rate.SubnetLimiter{
+ IPv4SubnetLimits: defaultIPv4SubnetLimits,
+ IPv6SubnetLimits: defaultIPv6SubnetLimits,
+ GracePeriod: 1 * time.Minute,
+ },
+ }
+}
diff --git a/p2p/host/resource-manager/docs/allowlist.md b/p2p/host/resource-manager/docs/allowlist.md
new file mode 100644
index 0000000000..6cf88e042d
--- /dev/null
+++ b/p2p/host/resource-manager/docs/allowlist.md
@@ -0,0 +1,54 @@
+# Allowlist
+
+Imagine you have a node that is getting overloaded by possibly malicious
+incoming connections. This node won't be able to accept incoming connections
+from peers it _knows_ to be good. This node would effectively be _eclipsed_ from
+the network since no other nodes will be able to connect to it.
+
+This is the problem that the Allowlist is designed to solve.
+
+## Design Goals
+
+- We should not fail to allocate a resource for an allowlisted peer because the
+ normal transient and system scopes are at their limits. This is the minimum
+ bar to avoid eclipse attacks.
+- Minimal changes to resource manager and existing code (e.g. go-libp2p).
+- The allowlist scope itself is limited to avoid giving an allowlisted peer the
+ ability to DoS a node.
+- PeerIDs can optionally be fed into the allowlist. This will give an extra
+ step of verification before continuing to allow the peer to open streams.
+ - A peer may be able to open a connection, but after the handshake, if it's
+ not an expected peer id we move it to the normal system scope.
+- We can have multiple PeerIDs for a given IP addr.
+- No extra cost for the happy path when we are still below system and transient
+ limits.
+
+## Proposed change
+
+Add a change to `ResourceManager.OpenConnection` so that it accepts a multiaddr
+parameter of the endpoint the connection is for.
+
+Add a change to `ResourceManager` to initialize it with a set of allowlisted
+multiaddrs. This set can be modified at runtime as well for dynamic updating.
+
+For example, an allowlist set could look like:
+```
+/ip4/1.1.1.1
+/ip6/2345:0425:2CA1::0567:5673:23b5
+/ip4/192.168.1.1/p2p/qmFoo
+/ip4/192.168.1.1/p2p/qmBar
+/ip4/1.2.3.0/ipcidr/24
+```
+
+When a new connection is opened, the resource manager tries to allocate with the
+normal system and transient resource scopes. If that fails, it checks if the
+multiaddr matches an item in the set of allowlisted multiaddrs. If so, it
+creates the connection resource scope using the allowlisted specific system and
+transient resource scopes. If it wasn't an allowlisted multiaddr it fails as
+before.
+
+When an allowlisted connection is tied to a peer id and transferred with
+`ConnManagementScope.SetPeer`, we check if that peer id matches the expected
+value in the allowlist (if it exists). If it does not match, we attempt to
+transfer this resource to the normal system and peer scope. If that transfer
+fails we close the connection.
diff --git a/p2p/host/resource-manager/error.go b/p2p/host/resource-manager/error.go
new file mode 100644
index 0000000000..1e87e00aae
--- /dev/null
+++ b/p2p/host/resource-manager/error.go
@@ -0,0 +1,81 @@
+package rcmgr
+
+import (
+ "errors"
+
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+type ErrStreamOrConnLimitExceeded struct {
+ current, attempted, limit int
+ err error
+}
+
+func (e *ErrStreamOrConnLimitExceeded) Error() string { return e.err.Error() }
+func (e *ErrStreamOrConnLimitExceeded) Unwrap() error { return e.err }
+
+// edge may be "" if this is not an edge error
+func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []interface{} {
+ logValues := make([]interface{}, 0, 2*8)
+ logValues = append(logValues, "scope", scope)
+ if edge != "" {
+ logValues = append(logValues, "edge", edge)
+ }
+ logValues = append(logValues, "direction", dir)
+ var e *ErrStreamOrConnLimitExceeded
+ if errors.As(err, &e) {
+ logValues = append(logValues,
+ "current", e.current,
+ "attempted", e.attempted,
+ "limit", e.limit,
+ )
+ }
+ return append(logValues, "stat", stat, "error", err)
+}
+
+// edge may be "" if this is not an edge error
+func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, stat network.ScopeStat, err error) []interface{} {
+ logValues := make([]interface{}, 0, 2*9)
+ logValues = append(logValues, "scope", scope)
+ if edge != "" {
+ logValues = append(logValues, "edge", edge)
+ }
+ logValues = append(logValues, "direction", dir, "usefd", usefd)
+ var e *ErrStreamOrConnLimitExceeded
+ if errors.As(err, &e) {
+ logValues = append(logValues,
+ "current", e.current,
+ "attempted", e.attempted,
+ "limit", e.limit,
+ )
+ }
+ return append(logValues, "stat", stat, "error", err)
+}
+
+type ErrMemoryLimitExceeded struct {
+ current, attempted, limit int64
+ priority uint8
+ err error
+}
+
+func (e *ErrMemoryLimitExceeded) Error() string { return e.err.Error() }
+func (e *ErrMemoryLimitExceeded) Unwrap() error { return e.err }
+
+// edge may be "" if this is not an edge error
+func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []interface{} {
+ logValues := make([]interface{}, 0, 2*8)
+ logValues = append(logValues, "scope", scope)
+ if edge != "" {
+ logValues = append(logValues, "edge", edge)
+ }
+ var e *ErrMemoryLimitExceeded
+ if errors.As(err, &e) {
+ logValues = append(logValues,
+ "current", e.current,
+ "attempted", e.attempted,
+ "priority", e.priority,
+ "limit", e.limit,
+ )
+ }
+ return append(logValues, "stat", stat, "error", err)
+}
diff --git a/p2p/host/resource-manager/extapi.go b/p2p/host/resource-manager/extapi.go
new file mode 100644
index 0000000000..415d7f8bd1
--- /dev/null
+++ b/p2p/host/resource-manager/extapi.go
@@ -0,0 +1,151 @@
+package rcmgr
+
+import (
+ "bytes"
+ "sort"
+ "strings"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// ResourceScopeLimiter is a trait interface that allows you to access scope limits.
+type ResourceScopeLimiter interface {
+ Limit() Limit
+ SetLimit(Limit)
+}
+
+var _ ResourceScopeLimiter = (*resourceScope)(nil)
+
+// ResourceManagerStat is a trait that allows you to access resource manager state.
+type ResourceManagerState interface {
+ ListServices() []string
+ ListProtocols() []protocol.ID
+ ListPeers() []peer.ID
+
+ Stat() ResourceManagerStat
+}
+
+type ResourceManagerStat struct {
+ System network.ScopeStat
+ Transient network.ScopeStat
+ Services map[string]network.ScopeStat
+ Protocols map[protocol.ID]network.ScopeStat
+ Peers map[peer.ID]network.ScopeStat
+}
+
+var _ ResourceManagerState = (*resourceManager)(nil)
+
+func (s *resourceScope) Limit() Limit {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.rc.limit
+}
+
+func (s *resourceScope) SetLimit(limit Limit) {
+ s.Lock()
+ defer s.Unlock()
+
+ s.rc.limit = limit
+}
+
+func (s *protocolScope) SetLimit(limit Limit) {
+ s.rcmgr.setStickyProtocol(s.proto)
+ s.resourceScope.SetLimit(limit)
+}
+
+func (s *peerScope) SetLimit(limit Limit) {
+ s.rcmgr.setStickyPeer(s.peer)
+ s.resourceScope.SetLimit(limit)
+}
+
+func (r *resourceManager) ListServices() []string {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ result := make([]string, 0, len(r.svc))
+ for svc := range r.svc {
+ result = append(result, svc)
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return strings.Compare(result[i], result[j]) < 0
+ })
+
+ return result
+}
+
+func (r *resourceManager) ListProtocols() []protocol.ID {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ result := make([]protocol.ID, 0, len(r.proto))
+ for p := range r.proto {
+ result = append(result, p)
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return result[i] < result[j]
+ })
+
+ return result
+}
+
+func (r *resourceManager) ListPeers() []peer.ID {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ result := make([]peer.ID, 0, len(r.peer))
+ for p := range r.peer {
+ result = append(result, p)
+ }
+
+ sort.Slice(result, func(i, j int) bool {
+ return bytes.Compare([]byte(result[i]), []byte(result[j])) < 0
+ })
+
+ return result
+}
+
+func (r *resourceManager) Stat() (result ResourceManagerStat) {
+ r.mx.Lock()
+ svcs := make([]*serviceScope, 0, len(r.svc))
+ for _, svc := range r.svc {
+ svcs = append(svcs, svc)
+ }
+ protos := make([]*protocolScope, 0, len(r.proto))
+ for _, proto := range r.proto {
+ protos = append(protos, proto)
+ }
+ peers := make([]*peerScope, 0, len(r.peer))
+ for _, peer := range r.peer {
+ peers = append(peers, peer)
+ }
+ r.mx.Unlock()
+
+ // Note: there is no global lock, so the system is updating while we are dumping its state...
+ // as such stats might not exactly add up to the system level; we take the system stat
+ // last nonetheless so that this is the most up-to-date snapshot
+ result.Peers = make(map[peer.ID]network.ScopeStat, len(peers))
+ for _, peer := range peers {
+ result.Peers[peer.peer] = peer.Stat()
+ }
+ result.Protocols = make(map[protocol.ID]network.ScopeStat, len(protos))
+ for _, proto := range protos {
+ result.Protocols[proto.proto] = proto.Stat()
+ }
+ result.Services = make(map[string]network.ScopeStat, len(svcs))
+ for _, svc := range svcs {
+ result.Services[svc.service] = svc.Stat()
+ }
+ result.Transient = r.transient.Stat()
+ result.System = r.system.Stat()
+
+ return result
+}
+
+func (r *resourceManager) GetConnLimit() int {
+ return r.limits.GetSystemLimits().GetConnTotalLimit()
+}
diff --git a/p2p/host/resource-manager/limit.go b/p2p/host/resource-manager/limit.go
new file mode 100644
index 0000000000..003ab6f0f8
--- /dev/null
+++ b/p2p/host/resource-manager/limit.go
@@ -0,0 +1,297 @@
+/*
+Package rcmgr is the resource manager for go-libp2p. This allows you to track
+resources being used throughout your go-libp2p process. As well as making sure
+that the process doesn't use more resources than what you define as your
+limits. The resource manager only knows about things it is told about, so it's
+the responsibility of the user of this library (either go-libp2p or a go-libp2p
+user) to make sure they check with the resource manager before actually
+allocating the resource.
+*/
+package rcmgr
+
+import (
+ "encoding/json"
+ "io"
+ "math"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// Limit is an object that specifies basic resource limits.
+type Limit interface {
+ // GetMemoryLimit returns the (current) memory limit.
+ GetMemoryLimit() int64
+ // GetStreamLimit returns the stream limit, for inbound or outbound streams.
+ GetStreamLimit(network.Direction) int
+ // GetStreamTotalLimit returns the total stream limit
+ GetStreamTotalLimit() int
+ // GetConnLimit returns the connection limit, for inbound or outbound connections.
+ GetConnLimit(network.Direction) int
+ // GetConnTotalLimit returns the total connection limit
+ GetConnTotalLimit() int
+ // GetFDLimit returns the file descriptor limit.
+ GetFDLimit() int
+}
+
+// Limiter is the interface for providing limits to the resource manager.
+type Limiter interface {
+ GetSystemLimits() Limit
+ GetTransientLimits() Limit
+ GetAllowlistedSystemLimits() Limit
+ GetAllowlistedTransientLimits() Limit
+ GetServiceLimits(svc string) Limit
+ GetServicePeerLimits(svc string) Limit
+ GetProtocolLimits(proto protocol.ID) Limit
+ GetProtocolPeerLimits(proto protocol.ID) Limit
+ GetPeerLimits(p peer.ID) Limit
+ GetStreamLimits(p peer.ID) Limit
+ GetConnLimits() Limit
+}
+
+// NewDefaultLimiterFromJSON creates a new limiter by parsing a json configuration,
+// using the default limits for fallback.
+func NewDefaultLimiterFromJSON(in io.Reader) (Limiter, error) {
+ return NewLimiterFromJSON(in, DefaultLimits.AutoScale())
+}
+
+// NewLimiterFromJSON creates a new limiter by parsing a json configuration.
+func NewLimiterFromJSON(in io.Reader, defaults ConcreteLimitConfig) (Limiter, error) {
+ cfg, err := readLimiterConfigFromJSON(in, defaults)
+ if err != nil {
+ return nil, err
+ }
+ return &fixedLimiter{cfg}, nil
+}
+
+func readLimiterConfigFromJSON(in io.Reader, defaults ConcreteLimitConfig) (ConcreteLimitConfig, error) {
+ var cfg PartialLimitConfig
+ if err := json.NewDecoder(in).Decode(&cfg); err != nil {
+ return ConcreteLimitConfig{}, err
+ }
+ return cfg.Build(defaults), nil
+}
+
+// fixedLimiter is a limiter with fixed limits.
+type fixedLimiter struct {
+ ConcreteLimitConfig
+}
+
+var _ Limiter = (*fixedLimiter)(nil)
+
+func NewFixedLimiter(conf ConcreteLimitConfig) Limiter {
+ log.Debug("initializing new limiter with config", "limits", conf)
+ return &fixedLimiter{conf}
+}
+
+// BaseLimit is a mixin type for basic resource limits.
+type BaseLimit struct {
+ Streams int `json:",omitempty"`
+ StreamsInbound int `json:",omitempty"`
+ StreamsOutbound int `json:",omitempty"`
+ Conns int `json:",omitempty"`
+ ConnsInbound int `json:",omitempty"`
+ ConnsOutbound int `json:",omitempty"`
+ FD int `json:",omitempty"`
+ Memory int64 `json:",omitempty"`
+}
+
+func valueOrBlockAll(n int) LimitVal {
+ if n == 0 {
+ return BlockAllLimit
+ } else if n == math.MaxInt {
+ return Unlimited
+ }
+ return LimitVal(n)
+}
+func valueOrBlockAll64(n int64) LimitVal64 {
+ if n == 0 {
+ return BlockAllLimit64
+ } else if n == math.MaxInt {
+ return Unlimited64
+ }
+ return LimitVal64(n)
+}
+
+// ToResourceLimits converts the BaseLimit to a ResourceLimits
+func (l BaseLimit) ToResourceLimits() ResourceLimits {
+ return ResourceLimits{
+ Streams: valueOrBlockAll(l.Streams),
+ StreamsInbound: valueOrBlockAll(l.StreamsInbound),
+ StreamsOutbound: valueOrBlockAll(l.StreamsOutbound),
+ Conns: valueOrBlockAll(l.Conns),
+ ConnsInbound: valueOrBlockAll(l.ConnsInbound),
+ ConnsOutbound: valueOrBlockAll(l.ConnsOutbound),
+ FD: valueOrBlockAll(l.FD),
+ Memory: valueOrBlockAll64(l.Memory),
+ }
+}
+
+// Apply overwrites all zero-valued limits with the values of l2
+// Must not use a pointer receiver.
+func (l *BaseLimit) Apply(l2 BaseLimit) {
+ if l.Streams == 0 {
+ l.Streams = l2.Streams
+ }
+ if l.StreamsInbound == 0 {
+ l.StreamsInbound = l2.StreamsInbound
+ }
+ if l.StreamsOutbound == 0 {
+ l.StreamsOutbound = l2.StreamsOutbound
+ }
+ if l.Conns == 0 {
+ l.Conns = l2.Conns
+ }
+ if l.ConnsInbound == 0 {
+ l.ConnsInbound = l2.ConnsInbound
+ }
+ if l.ConnsOutbound == 0 {
+ l.ConnsOutbound = l2.ConnsOutbound
+ }
+ if l.Memory == 0 {
+ l.Memory = l2.Memory
+ }
+ if l.FD == 0 {
+ l.FD = l2.FD
+ }
+}
+
+// BaseLimitIncrease is the increase per GiB of allowed memory.
+type BaseLimitIncrease struct {
+ Streams int `json:",omitempty"`
+ StreamsInbound int `json:",omitempty"`
+ StreamsOutbound int `json:",omitempty"`
+ Conns int `json:",omitempty"`
+ ConnsInbound int `json:",omitempty"`
+ ConnsOutbound int `json:",omitempty"`
+ // Memory is in bytes. Values over 1>>30 (1GiB) don't make sense.
+ Memory int64 `json:",omitempty"`
+ // FDFraction is expected to be >= 0 and <= 1.
+ FDFraction float64 `json:",omitempty"`
+}
+
+// Apply overwrites all zero-valued limits with the values of l2
+// Must not use a pointer receiver.
+func (l *BaseLimitIncrease) Apply(l2 BaseLimitIncrease) {
+ if l.Streams == 0 {
+ l.Streams = l2.Streams
+ }
+ if l.StreamsInbound == 0 {
+ l.StreamsInbound = l2.StreamsInbound
+ }
+ if l.StreamsOutbound == 0 {
+ l.StreamsOutbound = l2.StreamsOutbound
+ }
+ if l.Conns == 0 {
+ l.Conns = l2.Conns
+ }
+ if l.ConnsInbound == 0 {
+ l.ConnsInbound = l2.ConnsInbound
+ }
+ if l.ConnsOutbound == 0 {
+ l.ConnsOutbound = l2.ConnsOutbound
+ }
+ if l.Memory == 0 {
+ l.Memory = l2.Memory
+ }
+ if l.FDFraction == 0 {
+ l.FDFraction = l2.FDFraction
+ }
+}
+
+func (l BaseLimit) GetStreamLimit(dir network.Direction) int {
+ if dir == network.DirInbound {
+ return l.StreamsInbound
+ } else {
+ return l.StreamsOutbound
+ }
+}
+
+func (l BaseLimit) GetStreamTotalLimit() int {
+ return l.Streams
+}
+
+func (l BaseLimit) GetConnLimit(dir network.Direction) int {
+ if dir == network.DirInbound {
+ return l.ConnsInbound
+ } else {
+ return l.ConnsOutbound
+ }
+}
+
+func (l BaseLimit) GetConnTotalLimit() int {
+ return l.Conns
+}
+
+func (l BaseLimit) GetFDLimit() int {
+ return l.FD
+}
+
+func (l BaseLimit) GetMemoryLimit() int64 {
+ return l.Memory
+}
+
+func (l *fixedLimiter) GetSystemLimits() Limit {
+ return &l.system
+}
+
+func (l *fixedLimiter) GetTransientLimits() Limit {
+ return &l.transient
+}
+
+func (l *fixedLimiter) GetAllowlistedSystemLimits() Limit {
+ return &l.allowlistedSystem
+}
+
+func (l *fixedLimiter) GetAllowlistedTransientLimits() Limit {
+ return &l.allowlistedTransient
+}
+
+func (l *fixedLimiter) GetServiceLimits(svc string) Limit {
+ sl, ok := l.service[svc]
+ if !ok {
+ return &l.serviceDefault
+ }
+ return &sl
+}
+
+func (l *fixedLimiter) GetServicePeerLimits(svc string) Limit {
+ pl, ok := l.servicePeer[svc]
+ if !ok {
+ return &l.servicePeerDefault
+ }
+ return &pl
+}
+
+func (l *fixedLimiter) GetProtocolLimits(proto protocol.ID) Limit {
+ pl, ok := l.protocol[proto]
+ if !ok {
+ return &l.protocolDefault
+ }
+ return &pl
+}
+
+func (l *fixedLimiter) GetProtocolPeerLimits(proto protocol.ID) Limit {
+ pl, ok := l.protocolPeer[proto]
+ if !ok {
+ return &l.protocolPeerDefault
+ }
+ return &pl
+}
+
+func (l *fixedLimiter) GetPeerLimits(p peer.ID) Limit {
+ pl, ok := l.peer[p]
+ if !ok {
+ return &l.peerDefault
+ }
+ return &pl
+}
+
+func (l *fixedLimiter) GetStreamLimits(_ peer.ID) Limit {
+ return &l.stream
+}
+
+func (l *fixedLimiter) GetConnLimits() Limit {
+ return &l.conn
+}
diff --git a/p2p/host/resource-manager/limit_config_test.backwards-compat.json b/p2p/host/resource-manager/limit_config_test.backwards-compat.json
new file mode 100644
index 0000000000..b1a5e9ecb7
--- /dev/null
+++ b/p2p/host/resource-manager/limit_config_test.backwards-compat.json
@@ -0,0 +1,45 @@
+{
+ "System": {
+ "Memory": 65536,
+ "Conns": 16,
+ "ConnsInbound": 8,
+ "ConnsOutbound": 16,
+ "FD": 16
+ },
+ "ServiceDefault": {
+ "Memory": 8765
+ },
+ "Service": {
+ "A": {
+ "Memory": 8192
+ },
+ "B": {}
+ },
+ "ServicePeerDefault": {
+ "Memory": 2048
+ },
+ "ServicePeer": {
+ "A": {
+ "Memory": 4096
+ }
+ },
+ "ProtocolDefault": {
+ "Memory": 2048
+ },
+ "ProtocolPeerDefault": {
+ "Memory": 1024
+ },
+ "Protocol": {
+ "/A": {
+ "Memory": 8192
+ }
+ },
+ "PeerDefault": {
+ "Memory": 4096
+ },
+ "Peer": {
+ "12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS": {
+ "Memory": 4097
+ }
+ }
+}
\ No newline at end of file
diff --git a/p2p/host/resource-manager/limit_config_test.go b/p2p/host/resource-manager/limit_config_test.go
new file mode 100644
index 0000000000..d7bd11061c
--- /dev/null
+++ b/p2p/host/resource-manager/limit_config_test.go
@@ -0,0 +1,169 @@
+package rcmgr
+
+import (
+ "bytes"
+ "encoding/json"
+ "os"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/stretchr/testify/require"
+)
+
+func withMemoryLimit(l BaseLimit, m int64) BaseLimit {
+ l2 := l
+ l2.Memory = m
+ return l2
+}
+
+func TestLimitConfigParserBackwardsCompat(t *testing.T) {
+ // Tests that we can parse the old limit config format.
+ in, err := os.Open("limit_config_test.backwards-compat.json")
+ require.NoError(t, err)
+ defer in.Close()
+
+ defaultScaledLimits := DefaultLimits
+ defaultScaledLimits.AddServiceLimit("C", DefaultLimits.ServiceBaseLimit, BaseLimitIncrease{})
+ defaultScaledLimits.AddProtocolPeerLimit("C", DefaultLimits.ServiceBaseLimit, BaseLimitIncrease{})
+ defaults := defaultScaledLimits.AutoScale()
+ cfg, err := readLimiterConfigFromJSON(in, defaults)
+ require.NoError(t, err)
+
+ require.Equal(t, int64(65536), cfg.system.Memory)
+ require.Equal(t, defaults.system.Streams, cfg.system.Streams)
+ require.Equal(t, defaults.system.StreamsInbound, cfg.system.StreamsInbound)
+ require.Equal(t, defaults.system.StreamsOutbound, cfg.system.StreamsOutbound)
+ require.Equal(t, 16, cfg.system.Conns)
+ require.Equal(t, 8, cfg.system.ConnsInbound)
+ require.Equal(t, 16, cfg.system.ConnsOutbound)
+ require.Equal(t, 16, cfg.system.FD)
+
+ require.Equal(t, defaults.transient, cfg.transient)
+ require.Equal(t, int64(8765), cfg.serviceDefault.Memory)
+
+ require.Contains(t, cfg.service, "A")
+ require.Equal(t, withMemoryLimit(cfg.serviceDefault, 8192), cfg.service["A"])
+ require.Contains(t, cfg.service, "B")
+ require.Equal(t, cfg.serviceDefault, cfg.service["B"])
+ require.Contains(t, cfg.service, "C")
+ require.Equal(t, defaults.service["C"], cfg.service["C"])
+
+ require.Equal(t, int64(4096), cfg.peerDefault.Memory)
+ peerID, err := peer.Decode("12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS")
+ require.NoError(t, err)
+ require.Contains(t, cfg.peer, peerID)
+ require.Equal(t, int64(4097), cfg.peer[peerID].Memory)
+}
+
+func TestLimitConfigParser(t *testing.T) {
+ in, err := os.Open("limit_config_test.json")
+ require.NoError(t, err)
+ defer in.Close()
+
+ defaultScaledLimits := DefaultLimits
+ defaultScaledLimits.AddServiceLimit("C", DefaultLimits.ServiceBaseLimit, BaseLimitIncrease{})
+ defaultScaledLimits.AddProtocolPeerLimit("C", DefaultLimits.ServiceBaseLimit, BaseLimitIncrease{})
+ defaults := defaultScaledLimits.AutoScale()
+ cfg, err := readLimiterConfigFromJSON(in, defaults)
+ require.NoError(t, err)
+
+ require.Equal(t, int64(65536), cfg.system.Memory)
+ require.Equal(t, defaults.system.Streams, cfg.system.Streams)
+ require.Equal(t, defaults.system.StreamsInbound, cfg.system.StreamsInbound)
+ require.Equal(t, defaults.system.StreamsOutbound, cfg.system.StreamsOutbound)
+ require.Equal(t, 16, cfg.system.Conns)
+ require.Equal(t, 8, cfg.system.ConnsInbound)
+ require.Equal(t, 16, cfg.system.ConnsOutbound)
+ require.Equal(t, 16, cfg.system.FD)
+
+ require.Equal(t, defaults.transient, cfg.transient)
+ require.Equal(t, int64(8765), cfg.serviceDefault.Memory)
+
+ require.Contains(t, cfg.service, "A")
+ require.Equal(t, withMemoryLimit(cfg.serviceDefault, 8192), cfg.service["A"])
+ require.Contains(t, cfg.service, "B")
+ require.Equal(t, cfg.serviceDefault, cfg.service["B"])
+ require.Contains(t, cfg.service, "C")
+ require.Equal(t, defaults.service["C"], cfg.service["C"])
+
+ require.Equal(t, int64(4096), cfg.peerDefault.Memory)
+ peerID, err := peer.Decode("12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS")
+ require.NoError(t, err)
+ require.Contains(t, cfg.peer, peerID)
+ require.Equal(t, int64(4097), cfg.peer[peerID].Memory)
+
+ // Roundtrip
+ limitConfig := cfg.ToPartialLimitConfig()
+ jsonBytes, err := json.Marshal(&limitConfig)
+ require.NoError(t, err)
+ cfgAfterRoundTrip, err := readLimiterConfigFromJSON(bytes.NewReader(jsonBytes), defaults)
+ require.NoError(t, err)
+ require.Equal(t, limitConfig, cfgAfterRoundTrip.ToPartialLimitConfig())
+}
+
+func TestLimitConfigRoundTrip(t *testing.T) {
+ // Tests that we can roundtrip a PartialLimitConfig to a ConcreteLimitConfig and back.
+ in, err := os.Open("limit_config_test.json")
+ require.NoError(t, err)
+ defer in.Close()
+
+ defaults := DefaultLimits
+ defaults.AddServiceLimit("C", DefaultLimits.ServiceBaseLimit, BaseLimitIncrease{})
+ defaults.AddProtocolPeerLimit("C", DefaultLimits.ServiceBaseLimit, BaseLimitIncrease{})
+ concreteCfg, err := readLimiterConfigFromJSON(in, defaults.AutoScale())
+ require.NoError(t, err)
+
+ // Roundtrip
+ limitConfig := concreteCfg.ToPartialLimitConfig()
+ // Using InfiniteLimits because it's different then the defaults used above.
+ // If anything was marked "default" in the round trip, it would show up as a
+ // difference here.
+ concreteCfgRT := limitConfig.Build(InfiniteLimits)
+ require.Equal(t, concreteCfg, concreteCfgRT)
+}
+
+func TestDefaultsDontChange(t *testing.T) {
+ concrete := DefaultLimits.Scale(8<<30, 16<<10) // 8GB, 16k fds
+ jsonBytes, err := json.MarshalIndent(concrete.ToPartialLimitConfig(), "", " ")
+ require.NoError(t, err)
+
+ // Uncomment to update the defaults file
+ // err = os.WriteFile("limit_config_test_default.json", jsonBytes, 0644)
+ // require.NoError(t, err)
+
+ defaultsFromFile, err := os.ReadFile("limit_config_test_default.json")
+ require.NoError(t, err)
+
+ // replace crlf with lf because of windows
+ defaultsFromFile = bytes.ReplaceAll(defaultsFromFile, []byte("\r\n"), []byte("\n"))
+ jsonBytes = bytes.ReplaceAll(jsonBytes, []byte("\r\n"), []byte("\n"))
+
+ require.Equal(t, string(defaultsFromFile), string(jsonBytes))
+}
+
+func TestReadmeLimitConfigSerialization(t *testing.T) {
+ noisyNeighbor, _ := peer.Decode("QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf")
+ cfg := PartialLimitConfig{
+ System: ResourceLimits{
+ // Allow unlimited outbound streams
+ StreamsOutbound: Unlimited,
+ },
+ Peer: map[peer.ID]ResourceLimits{
+ noisyNeighbor: {
+ // No inbound connections from this peer
+ ConnsInbound: BlockAllLimit,
+ // But let me open connections to them
+ Conns: DefaultLimit,
+ ConnsOutbound: DefaultLimit,
+ // No inbound streams from this peer
+ StreamsInbound: BlockAllLimit,
+ // And let me open unlimited (by me) outbound streams (the peer may have their own limits on me)
+ StreamsOutbound: Unlimited,
+ },
+ },
+ }
+ jsonBytes, err := json.Marshal(&cfg)
+ require.NoError(t, err)
+ require.Equal(t, `{"Peer":{"QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf":{"StreamsInbound":"blockAll","StreamsOutbound":"unlimited","ConnsInbound":"blockAll"}},"System":{"StreamsOutbound":"unlimited"}}`, string(jsonBytes))
+}
diff --git a/p2p/host/resource-manager/limit_config_test.json b/p2p/host/resource-manager/limit_config_test.json
new file mode 100644
index 0000000000..b7758baf1e
--- /dev/null
+++ b/p2p/host/resource-manager/limit_config_test.json
@@ -0,0 +1,45 @@
+{
+ "System": {
+ "Memory": 65536,
+ "Conns": 16,
+ "ConnsInbound": 8,
+ "ConnsOutbound": 16,
+ "FD": 16
+ },
+ "ServiceDefault": {
+ "Memory": 8765
+ },
+ "Service": {
+ "A": {
+ "Memory": 8192
+ },
+ "B": {}
+ },
+ "ServicePeerDefault": {
+ "Memory": 2048
+ },
+ "ServicePeer": {
+ "A": {
+ "Memory": 4096
+ }
+ },
+ "ProtocolDefault": {
+ "Memory": 2048
+ },
+ "ProtocolPeerDefault": {
+ "Memory": 1024
+ },
+ "Protocol": {
+ "/A": {
+ "Memory": 8192
+ }
+ },
+ "PeerDefault": {
+ "Memory": 4096
+ },
+ "Peer": {
+ "12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS": {
+ "Memory": 4097
+ }
+ }
+}
diff --git a/p2p/host/resource-manager/limit_config_test_default.json b/p2p/host/resource-manager/limit_config_test_default.json
new file mode 100644
index 0000000000..51fcba17c0
--- /dev/null
+++ b/p2p/host/resource-manager/limit_config_test_default.json
@@ -0,0 +1,112 @@
+{
+ "System": {
+ "Streams": 18432,
+ "StreamsInbound": 9216,
+ "StreamsOutbound": 18432,
+ "Conns": 1152,
+ "ConnsInbound": 576,
+ "ConnsOutbound": 1152,
+ "FD": 16384,
+ "Memory": "8724152320"
+ },
+ "Transient": {
+ "Streams": 2304,
+ "StreamsInbound": 1152,
+ "StreamsOutbound": 2304,
+ "Conns": 320,
+ "ConnsInbound": 160,
+ "ConnsOutbound": 320,
+ "FD": 4096,
+ "Memory": "1107296256"
+ },
+ "AllowlistedSystem": {
+ "Streams": 18432,
+ "StreamsInbound": 9216,
+ "StreamsOutbound": 18432,
+ "Conns": 1152,
+ "ConnsInbound": 576,
+ "ConnsOutbound": 1152,
+ "FD": 16384,
+ "Memory": "8724152320"
+ },
+ "AllowlistedTransient": {
+ "Streams": 2304,
+ "StreamsInbound": 1152,
+ "StreamsOutbound": 2304,
+ "Conns": 320,
+ "ConnsInbound": 160,
+ "ConnsOutbound": 320,
+ "FD": 4096,
+ "Memory": "1107296256"
+ },
+ "ServiceDefault": {
+ "Streams": 20480,
+ "StreamsInbound": 5120,
+ "StreamsOutbound": 20480,
+ "Conns": "blockAll",
+ "ConnsInbound": "blockAll",
+ "ConnsOutbound": "blockAll",
+ "FD": "blockAll",
+ "Memory": "1140850688"
+ },
+ "ServicePeerDefault": {
+ "Streams": 320,
+ "StreamsInbound": 160,
+ "StreamsOutbound": 320,
+ "Conns": "blockAll",
+ "ConnsInbound": "blockAll",
+ "ConnsOutbound": "blockAll",
+ "FD": "blockAll",
+ "Memory": "50331648"
+ },
+ "ProtocolDefault": {
+ "Streams": 6144,
+ "StreamsInbound": 2560,
+ "StreamsOutbound": 6144,
+ "Conns": "blockAll",
+ "ConnsInbound": "blockAll",
+ "ConnsOutbound": "blockAll",
+ "FD": "blockAll",
+ "Memory": "1442840576"
+ },
+ "ProtocolPeerDefault": {
+ "Streams": 384,
+ "StreamsInbound": 96,
+ "StreamsOutbound": 192,
+ "Conns": "blockAll",
+ "ConnsInbound": "blockAll",
+ "ConnsOutbound": "blockAll",
+ "FD": "blockAll",
+ "Memory": "16777248"
+ },
+ "PeerDefault": {
+ "Streams": 2560,
+ "StreamsInbound": 1280,
+ "StreamsOutbound": 2560,
+ "Conns": 8,
+ "ConnsInbound": 8,
+ "ConnsOutbound": 8,
+ "FD": 256,
+ "Memory": "1140850688"
+ },
+ "Conn": {
+ "Streams": "blockAll",
+ "StreamsInbound": "blockAll",
+ "StreamsOutbound": "blockAll",
+ "Conns": 1,
+ "ConnsInbound": 1,
+ "ConnsOutbound": 1,
+ "FD": 1,
+ "Memory": "33554432"
+ },
+ "Stream": {
+ "Streams": 1,
+ "StreamsInbound": 1,
+ "StreamsOutbound": 1,
+ "Conns": "blockAll",
+ "ConnsInbound": "blockAll",
+ "ConnsOutbound": "blockAll",
+ "FD": "blockAll",
+ "Memory": "16777216"
+ }
+}
\ No newline at end of file
diff --git a/p2p/host/resource-manager/limit_defaults.go b/p2p/host/resource-manager/limit_defaults.go
new file mode 100644
index 0000000000..e7489c45d1
--- /dev/null
+++ b/p2p/host/resource-manager/limit_defaults.go
@@ -0,0 +1,879 @@
+package rcmgr
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ "github.com/pbnjay/memory"
+)
+
+type baseLimitConfig struct {
+ BaseLimit BaseLimit
+ BaseLimitIncrease BaseLimitIncrease
+}
+
+// ScalingLimitConfig is a struct for configuring default limits.
+// {}BaseLimit is the limits that Apply for a minimal node (128 MB of memory for libp2p) and 256 file descriptors.
+// {}LimitIncrease is the additional limit granted for every additional 1 GB of RAM.
+type ScalingLimitConfig struct {
+ SystemBaseLimit BaseLimit
+ SystemLimitIncrease BaseLimitIncrease
+
+ TransientBaseLimit BaseLimit
+ TransientLimitIncrease BaseLimitIncrease
+
+ AllowlistedSystemBaseLimit BaseLimit
+ AllowlistedSystemLimitIncrease BaseLimitIncrease
+
+ AllowlistedTransientBaseLimit BaseLimit
+ AllowlistedTransientLimitIncrease BaseLimitIncrease
+
+ ServiceBaseLimit BaseLimit
+ ServiceLimitIncrease BaseLimitIncrease
+ ServiceLimits map[string]baseLimitConfig // use AddServiceLimit to modify
+
+ ServicePeerBaseLimit BaseLimit
+ ServicePeerLimitIncrease BaseLimitIncrease
+ ServicePeerLimits map[string]baseLimitConfig // use AddServicePeerLimit to modify
+
+ ProtocolBaseLimit BaseLimit
+ ProtocolLimitIncrease BaseLimitIncrease
+ ProtocolLimits map[protocol.ID]baseLimitConfig // use AddProtocolLimit to modify
+
+ ProtocolPeerBaseLimit BaseLimit
+ ProtocolPeerLimitIncrease BaseLimitIncrease
+ ProtocolPeerLimits map[protocol.ID]baseLimitConfig // use AddProtocolPeerLimit to modify
+
+ PeerBaseLimit BaseLimit
+ PeerLimitIncrease BaseLimitIncrease
+ PeerLimits map[peer.ID]baseLimitConfig // use AddPeerLimit to modify
+
+ ConnBaseLimit BaseLimit
+ ConnLimitIncrease BaseLimitIncrease
+
+ StreamBaseLimit BaseLimit
+ StreamLimitIncrease BaseLimitIncrease
+}
+
+func (cfg *ScalingLimitConfig) AddServiceLimit(svc string, base BaseLimit, inc BaseLimitIncrease) {
+ if cfg.ServiceLimits == nil {
+ cfg.ServiceLimits = make(map[string]baseLimitConfig)
+ }
+ cfg.ServiceLimits[svc] = baseLimitConfig{
+ BaseLimit: base,
+ BaseLimitIncrease: inc,
+ }
+}
+
+func (cfg *ScalingLimitConfig) AddProtocolLimit(proto protocol.ID, base BaseLimit, inc BaseLimitIncrease) {
+ if cfg.ProtocolLimits == nil {
+ cfg.ProtocolLimits = make(map[protocol.ID]baseLimitConfig)
+ }
+ cfg.ProtocolLimits[proto] = baseLimitConfig{
+ BaseLimit: base,
+ BaseLimitIncrease: inc,
+ }
+}
+
+func (cfg *ScalingLimitConfig) AddPeerLimit(p peer.ID, base BaseLimit, inc BaseLimitIncrease) {
+ if cfg.PeerLimits == nil {
+ cfg.PeerLimits = make(map[peer.ID]baseLimitConfig)
+ }
+ cfg.PeerLimits[p] = baseLimitConfig{
+ BaseLimit: base,
+ BaseLimitIncrease: inc,
+ }
+}
+
+func (cfg *ScalingLimitConfig) AddServicePeerLimit(svc string, base BaseLimit, inc BaseLimitIncrease) {
+ if cfg.ServicePeerLimits == nil {
+ cfg.ServicePeerLimits = make(map[string]baseLimitConfig)
+ }
+ cfg.ServicePeerLimits[svc] = baseLimitConfig{
+ BaseLimit: base,
+ BaseLimitIncrease: inc,
+ }
+}
+
+func (cfg *ScalingLimitConfig) AddProtocolPeerLimit(proto protocol.ID, base BaseLimit, inc BaseLimitIncrease) {
+ if cfg.ProtocolPeerLimits == nil {
+ cfg.ProtocolPeerLimits = make(map[protocol.ID]baseLimitConfig)
+ }
+ cfg.ProtocolPeerLimits[proto] = baseLimitConfig{
+ BaseLimit: base,
+ BaseLimitIncrease: inc,
+ }
+}
+
+type LimitVal int
+
+const (
+ // DefaultLimit is the default value for resources. The exact value depends on the context, but will get values from `DefaultLimits`.
+ DefaultLimit LimitVal = 0
+ // Unlimited is the value for unlimited resources. An arbitrarily high number will also work.
+ Unlimited LimitVal = -1
+ // BlockAllLimit is the LimitVal for allowing no amount of resources.
+ BlockAllLimit LimitVal = -2
+)
+
+func (l LimitVal) MarshalJSON() ([]byte, error) {
+ if l == Unlimited {
+ return json.Marshal("unlimited")
+ } else if l == DefaultLimit {
+ return json.Marshal("default")
+ } else if l == BlockAllLimit {
+ return json.Marshal("blockAll")
+ }
+ return json.Marshal(int(l))
+}
+
+func (l *LimitVal) UnmarshalJSON(b []byte) error {
+ if string(b) == `"default"` {
+ *l = DefaultLimit
+ return nil
+ } else if string(b) == `"unlimited"` {
+ *l = Unlimited
+ return nil
+ } else if string(b) == `"blockAll"` {
+ *l = BlockAllLimit
+ return nil
+ }
+
+ var val int
+ if err := json.Unmarshal(b, &val); err != nil {
+ return err
+ }
+
+ if val == 0 {
+ // If there is an explicit 0 in the JSON we should interpret this as block all.
+ *l = BlockAllLimit
+ return nil
+ }
+
+ *l = LimitVal(val)
+ return nil
+}
+
+func (l LimitVal) Build(defaultVal int) int {
+ if l == DefaultLimit {
+ return defaultVal
+ }
+ if l == Unlimited {
+ return math.MaxInt
+ }
+ if l == BlockAllLimit {
+ return 0
+ }
+ return int(l)
+}
+
+type LimitVal64 int64
+
+const (
+ // Default is the default value for resources.
+ DefaultLimit64 LimitVal64 = 0
+ // Unlimited is the value for unlimited resources.
+ Unlimited64 LimitVal64 = -1
+ // BlockAllLimit64 is the LimitVal for allowing no amount of resources.
+ BlockAllLimit64 LimitVal64 = -2
+)
+
+func (l LimitVal64) MarshalJSON() ([]byte, error) {
+ if l == Unlimited64 {
+ return json.Marshal("unlimited")
+ } else if l == DefaultLimit64 {
+ return json.Marshal("default")
+ } else if l == BlockAllLimit64 {
+ return json.Marshal("blockAll")
+ }
+
+ // Convert this to a string because JSON doesn't support 64-bit integers.
+ return json.Marshal(strconv.FormatInt(int64(l), 10))
+}
+
+func (l *LimitVal64) UnmarshalJSON(b []byte) error {
+ if string(b) == `"default"` {
+ *l = DefaultLimit64
+ return nil
+ } else if string(b) == `"unlimited"` {
+ *l = Unlimited64
+ return nil
+ } else if string(b) == `"blockAll"` {
+ *l = BlockAllLimit64
+ return nil
+ }
+
+ var val string
+ if err := json.Unmarshal(b, &val); err != nil {
+ // Is this an integer? Possible because of backwards compatibility.
+ var val int
+ if err := json.Unmarshal(b, &val); err != nil {
+ return fmt.Errorf("failed to unmarshal limit value: %w", err)
+ }
+
+ if val == 0 {
+ // If there is an explicit 0 in the JSON we should interpret this as block all.
+ *l = BlockAllLimit64
+ return nil
+ }
+
+ *l = LimitVal64(val)
+ return nil
+ }
+
+ i, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return err
+ }
+
+ if i == 0 {
+ // If there is an explicit 0 in the JSON we should interpret this as block all.
+ *l = BlockAllLimit64
+ return nil
+ }
+
+ *l = LimitVal64(i)
+ return nil
+}
+
+func (l LimitVal64) Build(defaultVal int64) int64 {
+ if l == DefaultLimit64 {
+ return defaultVal
+ }
+ if l == Unlimited64 {
+ return math.MaxInt64
+ }
+ if l == BlockAllLimit64 {
+ return 0
+ }
+ return int64(l)
+}
+
+// ResourceLimits is the type for basic resource limits.
+type ResourceLimits struct {
+ Streams LimitVal `json:",omitempty"`
+ StreamsInbound LimitVal `json:",omitempty"`
+ StreamsOutbound LimitVal `json:",omitempty"`
+ Conns LimitVal `json:",omitempty"`
+ ConnsInbound LimitVal `json:",omitempty"`
+ ConnsOutbound LimitVal `json:",omitempty"`
+ FD LimitVal `json:",omitempty"`
+ Memory LimitVal64 `json:",omitempty"`
+}
+
+func (l *ResourceLimits) IsDefault() bool {
+ if l == nil {
+ return true
+ }
+
+ if l.Streams == DefaultLimit &&
+ l.StreamsInbound == DefaultLimit &&
+ l.StreamsOutbound == DefaultLimit &&
+ l.Conns == DefaultLimit &&
+ l.ConnsInbound == DefaultLimit &&
+ l.ConnsOutbound == DefaultLimit &&
+ l.FD == DefaultLimit &&
+ l.Memory == DefaultLimit64 {
+ return true
+ }
+ return false
+}
+
+func (l *ResourceLimits) ToMaybeNilPtr() *ResourceLimits {
+ if l.IsDefault() {
+ return nil
+ }
+ return l
+}
+
+// Apply overwrites all default limits with the values of l2
+func (l *ResourceLimits) Apply(l2 ResourceLimits) {
+ if l.Streams == DefaultLimit {
+ l.Streams = l2.Streams
+ }
+ if l.StreamsInbound == DefaultLimit {
+ l.StreamsInbound = l2.StreamsInbound
+ }
+ if l.StreamsOutbound == DefaultLimit {
+ l.StreamsOutbound = l2.StreamsOutbound
+ }
+ if l.Conns == DefaultLimit {
+ l.Conns = l2.Conns
+ }
+ if l.ConnsInbound == DefaultLimit {
+ l.ConnsInbound = l2.ConnsInbound
+ }
+ if l.ConnsOutbound == DefaultLimit {
+ l.ConnsOutbound = l2.ConnsOutbound
+ }
+ if l.FD == DefaultLimit {
+ l.FD = l2.FD
+ }
+ if l.Memory == DefaultLimit64 {
+ l.Memory = l2.Memory
+ }
+}
+
+func (l *ResourceLimits) Build(defaults Limit) BaseLimit {
+ if l == nil {
+ return BaseLimit{
+ Streams: defaults.GetStreamTotalLimit(),
+ StreamsInbound: defaults.GetStreamLimit(network.DirInbound),
+ StreamsOutbound: defaults.GetStreamLimit(network.DirOutbound),
+ Conns: defaults.GetConnTotalLimit(),
+ ConnsInbound: defaults.GetConnLimit(network.DirInbound),
+ ConnsOutbound: defaults.GetConnLimit(network.DirOutbound),
+ FD: defaults.GetFDLimit(),
+ Memory: defaults.GetMemoryLimit(),
+ }
+ }
+
+ return BaseLimit{
+ Streams: l.Streams.Build(defaults.GetStreamTotalLimit()),
+ StreamsInbound: l.StreamsInbound.Build(defaults.GetStreamLimit(network.DirInbound)),
+ StreamsOutbound: l.StreamsOutbound.Build(defaults.GetStreamLimit(network.DirOutbound)),
+ Conns: l.Conns.Build(defaults.GetConnTotalLimit()),
+ ConnsInbound: l.ConnsInbound.Build(defaults.GetConnLimit(network.DirInbound)),
+ ConnsOutbound: l.ConnsOutbound.Build(defaults.GetConnLimit(network.DirOutbound)),
+ FD: l.FD.Build(defaults.GetFDLimit()),
+ Memory: l.Memory.Build(defaults.GetMemoryLimit()),
+ }
+}
+
+type PartialLimitConfig struct {
+ System ResourceLimits `json:",omitempty"`
+ Transient ResourceLimits `json:",omitempty"`
+
+ // Limits that are applied to resources with an allowlisted multiaddr.
+ // These will only be used if the normal System & Transient limits are
+ // reached.
+ AllowlistedSystem ResourceLimits `json:",omitempty"`
+ AllowlistedTransient ResourceLimits `json:",omitempty"`
+
+ ServiceDefault ResourceLimits `json:",omitempty"`
+ Service map[string]ResourceLimits `json:",omitempty"`
+
+ ServicePeerDefault ResourceLimits `json:",omitempty"`
+ ServicePeer map[string]ResourceLimits `json:",omitempty"`
+
+ ProtocolDefault ResourceLimits `json:",omitempty"`
+ Protocol map[protocol.ID]ResourceLimits `json:",omitempty"`
+
+ ProtocolPeerDefault ResourceLimits `json:",omitempty"`
+ ProtocolPeer map[protocol.ID]ResourceLimits `json:",omitempty"`
+
+ PeerDefault ResourceLimits `json:",omitempty"`
+ Peer map[peer.ID]ResourceLimits `json:",omitempty"`
+
+ Conn ResourceLimits `json:",omitempty"`
+ Stream ResourceLimits `json:",omitempty"`
+}
+
+func (cfg *PartialLimitConfig) MarshalJSON() ([]byte, error) {
+ // we want to marshal the encoded peer id
+ encodedPeerMap := make(map[string]ResourceLimits, len(cfg.Peer))
+ for p, v := range cfg.Peer {
+ encodedPeerMap[p.String()] = v
+ }
+
+ type Alias PartialLimitConfig
+ return json.Marshal(&struct {
+ *Alias
+ // String so we can have the properly marshalled peer id
+ Peer map[string]ResourceLimits `json:",omitempty"`
+
+ // The rest of the fields as pointers so that we omit empty values in the serialized result
+ System *ResourceLimits `json:",omitempty"`
+ Transient *ResourceLimits `json:",omitempty"`
+ AllowlistedSystem *ResourceLimits `json:",omitempty"`
+ AllowlistedTransient *ResourceLimits `json:",omitempty"`
+
+ ServiceDefault *ResourceLimits `json:",omitempty"`
+
+ ServicePeerDefault *ResourceLimits `json:",omitempty"`
+
+ ProtocolDefault *ResourceLimits `json:",omitempty"`
+
+ ProtocolPeerDefault *ResourceLimits `json:",omitempty"`
+
+ PeerDefault *ResourceLimits `json:",omitempty"`
+
+ Conn *ResourceLimits `json:",omitempty"`
+ Stream *ResourceLimits `json:",omitempty"`
+ }{
+ Alias: (*Alias)(cfg),
+ Peer: encodedPeerMap,
+
+ System: cfg.System.ToMaybeNilPtr(),
+ Transient: cfg.Transient.ToMaybeNilPtr(),
+ AllowlistedSystem: cfg.AllowlistedSystem.ToMaybeNilPtr(),
+ AllowlistedTransient: cfg.AllowlistedTransient.ToMaybeNilPtr(),
+ ServiceDefault: cfg.ServiceDefault.ToMaybeNilPtr(),
+ ServicePeerDefault: cfg.ServicePeerDefault.ToMaybeNilPtr(),
+ ProtocolDefault: cfg.ProtocolDefault.ToMaybeNilPtr(),
+ ProtocolPeerDefault: cfg.ProtocolPeerDefault.ToMaybeNilPtr(),
+ PeerDefault: cfg.PeerDefault.ToMaybeNilPtr(),
+ Conn: cfg.Conn.ToMaybeNilPtr(),
+ Stream: cfg.Stream.ToMaybeNilPtr(),
+ })
+}
+
+func applyResourceLimitsMap[K comparable](this *map[K]ResourceLimits, other map[K]ResourceLimits, fallbackDefault ResourceLimits) {
+ for k, l := range *this {
+ r := fallbackDefault
+ if l2, ok := other[k]; ok {
+ r = l2
+ }
+ l.Apply(r)
+ (*this)[k] = l
+ }
+ if *this == nil && other != nil {
+ *this = make(map[K]ResourceLimits)
+ }
+ for k, l := range other {
+ if _, ok := (*this)[k]; !ok {
+ (*this)[k] = l
+ }
+ }
+}
+
+func (cfg *PartialLimitConfig) Apply(c PartialLimitConfig) {
+ cfg.System.Apply(c.System)
+ cfg.Transient.Apply(c.Transient)
+ cfg.AllowlistedSystem.Apply(c.AllowlistedSystem)
+ cfg.AllowlistedTransient.Apply(c.AllowlistedTransient)
+ cfg.ServiceDefault.Apply(c.ServiceDefault)
+ cfg.ServicePeerDefault.Apply(c.ServicePeerDefault)
+ cfg.ProtocolDefault.Apply(c.ProtocolDefault)
+ cfg.ProtocolPeerDefault.Apply(c.ProtocolPeerDefault)
+ cfg.PeerDefault.Apply(c.PeerDefault)
+ cfg.Conn.Apply(c.Conn)
+ cfg.Stream.Apply(c.Stream)
+
+ applyResourceLimitsMap(&cfg.Service, c.Service, cfg.ServiceDefault)
+ applyResourceLimitsMap(&cfg.ServicePeer, c.ServicePeer, cfg.ServicePeerDefault)
+ applyResourceLimitsMap(&cfg.Protocol, c.Protocol, cfg.ProtocolDefault)
+ applyResourceLimitsMap(&cfg.ProtocolPeer, c.ProtocolPeer, cfg.ProtocolPeerDefault)
+ applyResourceLimitsMap(&cfg.Peer, c.Peer, cfg.PeerDefault)
+}
+
+func (cfg PartialLimitConfig) Build(defaults ConcreteLimitConfig) ConcreteLimitConfig {
+ out := defaults
+
+ out.system = cfg.System.Build(defaults.system)
+ out.transient = cfg.Transient.Build(defaults.transient)
+ out.allowlistedSystem = cfg.AllowlistedSystem.Build(defaults.allowlistedSystem)
+ out.allowlistedTransient = cfg.AllowlistedTransient.Build(defaults.allowlistedTransient)
+ out.serviceDefault = cfg.ServiceDefault.Build(defaults.serviceDefault)
+ out.servicePeerDefault = cfg.ServicePeerDefault.Build(defaults.servicePeerDefault)
+ out.protocolDefault = cfg.ProtocolDefault.Build(defaults.protocolDefault)
+ out.protocolPeerDefault = cfg.ProtocolPeerDefault.Build(defaults.protocolPeerDefault)
+ out.peerDefault = cfg.PeerDefault.Build(defaults.peerDefault)
+ out.conn = cfg.Conn.Build(defaults.conn)
+ out.stream = cfg.Stream.Build(defaults.stream)
+
+ out.service = buildMapWithDefault(cfg.Service, defaults.service, out.serviceDefault)
+ out.servicePeer = buildMapWithDefault(cfg.ServicePeer, defaults.servicePeer, out.servicePeerDefault)
+ out.protocol = buildMapWithDefault(cfg.Protocol, defaults.protocol, out.protocolDefault)
+ out.protocolPeer = buildMapWithDefault(cfg.ProtocolPeer, defaults.protocolPeer, out.protocolPeerDefault)
+ out.peer = buildMapWithDefault(cfg.Peer, defaults.peer, out.peerDefault)
+
+ return out
+}
+
+func buildMapWithDefault[K comparable](definedLimits map[K]ResourceLimits, defaults map[K]BaseLimit, fallbackDefault BaseLimit) map[K]BaseLimit {
+ if definedLimits == nil && defaults == nil {
+ return nil
+ }
+
+ out := make(map[K]BaseLimit)
+ for k, l := range defaults {
+ out[k] = l
+ }
+
+ for k, l := range definedLimits {
+ if defaultForKey, ok := out[k]; ok {
+ out[k] = l.Build(defaultForKey)
+ } else {
+ out[k] = l.Build(fallbackDefault)
+ }
+ }
+
+ return out
+}
+
+// ConcreteLimitConfig is similar to PartialLimitConfig, but all values are defined.
+// There is no unset "default" value. Commonly constructed by calling
+// PartialLimitConfig.Build(rcmgr.DefaultLimits.AutoScale())
+type ConcreteLimitConfig struct {
+ system BaseLimit
+ transient BaseLimit
+
+ // Limits that are applied to resources with an allowlisted multiaddr.
+ // These will only be used if the normal System & Transient limits are
+ // reached.
+ allowlistedSystem BaseLimit
+ allowlistedTransient BaseLimit
+
+ serviceDefault BaseLimit
+ service map[string]BaseLimit
+
+ servicePeerDefault BaseLimit
+ servicePeer map[string]BaseLimit
+
+ protocolDefault BaseLimit
+ protocol map[protocol.ID]BaseLimit
+
+ protocolPeerDefault BaseLimit
+ protocolPeer map[protocol.ID]BaseLimit
+
+ peerDefault BaseLimit
+ peer map[peer.ID]BaseLimit
+
+ conn BaseLimit
+ stream BaseLimit
+}
+
+func resourceLimitsMapFromBaseLimitMap[K comparable](baseLimitMap map[K]BaseLimit) map[K]ResourceLimits {
+ if baseLimitMap == nil {
+ return nil
+ }
+
+ out := make(map[K]ResourceLimits)
+ for k, l := range baseLimitMap {
+ out[k] = l.ToResourceLimits()
+ }
+
+ return out
+}
+
+// ToPartialLimitConfig converts a ConcreteLimitConfig to a PartialLimitConfig.
+// The returned PartialLimitConfig will have no default values.
+func (cfg ConcreteLimitConfig) ToPartialLimitConfig() PartialLimitConfig {
+ return PartialLimitConfig{
+ System: cfg.system.ToResourceLimits(),
+ Transient: cfg.transient.ToResourceLimits(),
+ AllowlistedSystem: cfg.allowlistedSystem.ToResourceLimits(),
+ AllowlistedTransient: cfg.allowlistedTransient.ToResourceLimits(),
+ ServiceDefault: cfg.serviceDefault.ToResourceLimits(),
+ Service: resourceLimitsMapFromBaseLimitMap(cfg.service),
+ ServicePeerDefault: cfg.servicePeerDefault.ToResourceLimits(),
+ ServicePeer: resourceLimitsMapFromBaseLimitMap(cfg.servicePeer),
+ ProtocolDefault: cfg.protocolDefault.ToResourceLimits(),
+ Protocol: resourceLimitsMapFromBaseLimitMap(cfg.protocol),
+ ProtocolPeerDefault: cfg.protocolPeerDefault.ToResourceLimits(),
+ ProtocolPeer: resourceLimitsMapFromBaseLimitMap(cfg.protocolPeer),
+ PeerDefault: cfg.peerDefault.ToResourceLimits(),
+ Peer: resourceLimitsMapFromBaseLimitMap(cfg.peer),
+ Conn: cfg.conn.ToResourceLimits(),
+ Stream: cfg.stream.ToResourceLimits(),
+ }
+}
+
+// Scale scales up a limit configuration.
+// memory is the amount of memory that the stack is allowed to consume,
+// for a dedicated node it's recommended to use 1/8 of the installed system memory.
+// If memory is smaller than 128 MB, the base configuration will be used.
+func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) ConcreteLimitConfig {
+ lc := ConcreteLimitConfig{
+ system: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD),
+ transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD),
+ allowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD),
+ allowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD),
+ serviceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD),
+ servicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD),
+ protocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD),
+ protocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD),
+ peerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD),
+ conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
+ stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD),
+ }
+ if cfg.ServiceLimits != nil {
+ lc.service = make(map[string]BaseLimit)
+ for svc, l := range cfg.ServiceLimits {
+ lc.service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ }
+ }
+ if cfg.ProtocolLimits != nil {
+ lc.protocol = make(map[protocol.ID]BaseLimit)
+ for proto, l := range cfg.ProtocolLimits {
+ lc.protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ }
+ }
+ if cfg.PeerLimits != nil {
+ lc.peer = make(map[peer.ID]BaseLimit)
+ for p, l := range cfg.PeerLimits {
+ lc.peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ }
+ }
+ if cfg.ServicePeerLimits != nil {
+ lc.servicePeer = make(map[string]BaseLimit)
+ for svc, l := range cfg.ServicePeerLimits {
+ lc.servicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ }
+ }
+ if cfg.ProtocolPeerLimits != nil {
+ lc.protocolPeer = make(map[protocol.ID]BaseLimit)
+ for p, l := range cfg.ProtocolPeerLimits {
+ lc.protocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD)
+ }
+ }
+ return lc
+}
+
+func (cfg *ScalingLimitConfig) AutoScale() ConcreteLimitConfig {
+ return cfg.Scale(
+ int64(memory.TotalMemory())/8,
+ getNumFDs()/2,
+ )
+}
+
+func scale(base BaseLimit, inc BaseLimitIncrease, memory int64, numFD int) BaseLimit {
+ // mebibytesAvailable represents how many MiBs we're allowed to use. Used to
+ // scale the limits. If this is below 128MiB we set it to 0 to just use the
+ // base amounts.
+ var mebibytesAvailable int
+ if memory > 128<<20 {
+ mebibytesAvailable = int((memory) >> 20)
+ }
+ l := BaseLimit{
+ StreamsInbound: base.StreamsInbound + (inc.StreamsInbound*mebibytesAvailable)>>10,
+ StreamsOutbound: base.StreamsOutbound + (inc.StreamsOutbound*mebibytesAvailable)>>10,
+ Streams: base.Streams + (inc.Streams*mebibytesAvailable)>>10,
+ ConnsInbound: base.ConnsInbound + (inc.ConnsInbound*mebibytesAvailable)>>10,
+ ConnsOutbound: base.ConnsOutbound + (inc.ConnsOutbound*mebibytesAvailable)>>10,
+ Conns: base.Conns + (inc.Conns*mebibytesAvailable)>>10,
+ Memory: base.Memory + (inc.Memory*int64(mebibytesAvailable))>>10,
+ FD: base.FD,
+ }
+ if inc.FDFraction > 0 && numFD > 0 {
+ l.FD = int(inc.FDFraction * float64(numFD))
+ if l.FD < base.FD {
+ // Use at least the base amount
+ l.FD = base.FD
+ }
+ }
+ return l
+}
+
+// DefaultLimits are the limits used by the default limiter constructors.
+var DefaultLimits = ScalingLimitConfig{
+ SystemBaseLimit: BaseLimit{
+ ConnsInbound: 64,
+ ConnsOutbound: 128,
+ Conns: 128,
+ StreamsInbound: 64 * 16,
+ StreamsOutbound: 128 * 16,
+ Streams: 128 * 16,
+ Memory: 128 << 20,
+ FD: 256,
+ },
+
+ SystemLimitIncrease: BaseLimitIncrease{
+ ConnsInbound: 64,
+ ConnsOutbound: 128,
+ Conns: 128,
+ StreamsInbound: 64 * 16,
+ StreamsOutbound: 128 * 16,
+ Streams: 128 * 16,
+ Memory: 1 << 30,
+ FDFraction: 1,
+ },
+
+ TransientBaseLimit: BaseLimit{
+ ConnsInbound: 32,
+ ConnsOutbound: 64,
+ Conns: 64,
+ StreamsInbound: 128,
+ StreamsOutbound: 256,
+ Streams: 256,
+ Memory: 32 << 20,
+ FD: 64,
+ },
+
+ TransientLimitIncrease: BaseLimitIncrease{
+ ConnsInbound: 16,
+ ConnsOutbound: 32,
+ Conns: 32,
+ StreamsInbound: 128,
+ StreamsOutbound: 256,
+ Streams: 256,
+ Memory: 128 << 20,
+ FDFraction: 0.25,
+ },
+
+ // Setting the allowlisted limits to be the same as the normal limits. The
+ // allowlist only activates when you reach your normal system/transient
+ // limits. So it's okay if these limits err on the side of being too big,
+ // since most of the time you won't even use any of these. Tune these down
+ // if you want to manage your resources against an allowlisted endpoint.
+ AllowlistedSystemBaseLimit: BaseLimit{
+ ConnsInbound: 64,
+ ConnsOutbound: 128,
+ Conns: 128,
+ StreamsInbound: 64 * 16,
+ StreamsOutbound: 128 * 16,
+ Streams: 128 * 16,
+ Memory: 128 << 20,
+ FD: 256,
+ },
+
+ AllowlistedSystemLimitIncrease: BaseLimitIncrease{
+ ConnsInbound: 64,
+ ConnsOutbound: 128,
+ Conns: 128,
+ StreamsInbound: 64 * 16,
+ StreamsOutbound: 128 * 16,
+ Streams: 128 * 16,
+ Memory: 1 << 30,
+ FDFraction: 1,
+ },
+
+ AllowlistedTransientBaseLimit: BaseLimit{
+ ConnsInbound: 32,
+ ConnsOutbound: 64,
+ Conns: 64,
+ StreamsInbound: 128,
+ StreamsOutbound: 256,
+ Streams: 256,
+ Memory: 32 << 20,
+ FD: 64,
+ },
+
+ AllowlistedTransientLimitIncrease: BaseLimitIncrease{
+ ConnsInbound: 16,
+ ConnsOutbound: 32,
+ Conns: 32,
+ StreamsInbound: 128,
+ StreamsOutbound: 256,
+ Streams: 256,
+ Memory: 128 << 20,
+ FDFraction: 0.25,
+ },
+
+ ServiceBaseLimit: BaseLimit{
+ StreamsInbound: 1024,
+ StreamsOutbound: 4096,
+ Streams: 4096,
+ Memory: 64 << 20,
+ },
+
+ ServiceLimitIncrease: BaseLimitIncrease{
+ StreamsInbound: 512,
+ StreamsOutbound: 2048,
+ Streams: 2048,
+ Memory: 128 << 20,
+ },
+
+ ServicePeerBaseLimit: BaseLimit{
+ StreamsInbound: 128,
+ StreamsOutbound: 256,
+ Streams: 256,
+ Memory: 16 << 20,
+ },
+
+ ServicePeerLimitIncrease: BaseLimitIncrease{
+ StreamsInbound: 4,
+ StreamsOutbound: 8,
+ Streams: 8,
+ Memory: 4 << 20,
+ },
+
+ ProtocolBaseLimit: BaseLimit{
+ StreamsInbound: 512,
+ StreamsOutbound: 2048,
+ Streams: 2048,
+ Memory: 64 << 20,
+ },
+
+ ProtocolLimitIncrease: BaseLimitIncrease{
+ StreamsInbound: 256,
+ StreamsOutbound: 512,
+ Streams: 512,
+ Memory: 164 << 20,
+ },
+
+ ProtocolPeerBaseLimit: BaseLimit{
+ StreamsInbound: 64,
+ StreamsOutbound: 128,
+ Streams: 256,
+ Memory: 16 << 20,
+ },
+
+ ProtocolPeerLimitIncrease: BaseLimitIncrease{
+ StreamsInbound: 4,
+ StreamsOutbound: 8,
+ Streams: 16,
+ Memory: 4,
+ },
+
+ PeerBaseLimit: BaseLimit{
+ // 8 for now so that it matches the number of concurrent dials we may do
+ // in swarm_dial.go. With future smart dialing work we should bring this
+ // down
+ ConnsInbound: 8,
+ ConnsOutbound: 8,
+ Conns: 8,
+ StreamsInbound: 256,
+ StreamsOutbound: 512,
+ Streams: 512,
+ Memory: 64 << 20,
+ FD: 4,
+ },
+
+ PeerLimitIncrease: BaseLimitIncrease{
+ StreamsInbound: 128,
+ StreamsOutbound: 256,
+ Streams: 256,
+ Memory: 128 << 20,
+ FDFraction: 1.0 / 64,
+ },
+
+ ConnBaseLimit: BaseLimit{
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ Memory: 32 << 20,
+ },
+
+ StreamBaseLimit: BaseLimit{
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ Memory: 16 << 20,
+ },
+}
+
+var infiniteBaseLimit = BaseLimit{
+ Streams: math.MaxInt,
+ StreamsInbound: math.MaxInt,
+ StreamsOutbound: math.MaxInt,
+ Conns: math.MaxInt,
+ ConnsInbound: math.MaxInt,
+ ConnsOutbound: math.MaxInt,
+ FD: math.MaxInt,
+ Memory: math.MaxInt64,
+}
+
+// InfiniteLimits are a limiter configuration that uses unlimited limits, thus effectively not limiting anything.
+// Keep in mind that the operating system limits the number of file descriptors that an application can use.
+var InfiniteLimits = ConcreteLimitConfig{
+ system: infiniteBaseLimit,
+ transient: infiniteBaseLimit,
+ allowlistedSystem: infiniteBaseLimit,
+ allowlistedTransient: infiniteBaseLimit,
+ serviceDefault: infiniteBaseLimit,
+ servicePeerDefault: infiniteBaseLimit,
+ protocolDefault: infiniteBaseLimit,
+ protocolPeerDefault: infiniteBaseLimit,
+ peerDefault: infiniteBaseLimit,
+ conn: infiniteBaseLimit,
+ stream: infiniteBaseLimit,
+}
diff --git a/p2p/host/resource-manager/limit_test.go b/p2p/host/resource-manager/limit_test.go
new file mode 100644
index 0000000000..8d9f3919e1
--- /dev/null
+++ b/p2p/host/resource-manager/limit_test.go
@@ -0,0 +1,249 @@
+package rcmgr
+
+import (
+ "encoding/json"
+ "math"
+ "runtime"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestFileDescriptorCounting(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("can't read file descriptors on Windows")
+ }
+ n := getNumFDs()
+ require.NotZero(t, n)
+ require.Less(t, n, int(1e7))
+}
+
+func TestScaling(t *testing.T) {
+ base := BaseLimit{
+ Streams: 100,
+ StreamsInbound: 200,
+ StreamsOutbound: 400,
+ Conns: 10,
+ ConnsInbound: 20,
+ ConnsOutbound: 40,
+ FD: 1,
+ Memory: 1 << 20,
+ }
+
+ t.Run("no scaling if no increase is defined", func(t *testing.T) {
+ cfg := ScalingLimitConfig{ServiceBaseLimit: base}
+ scaled := cfg.Scale(8<<30, 100)
+ require.Equal(t, base, scaled.serviceDefault)
+ })
+
+ t.Run("scaling", func(t *testing.T) {
+ cfg := ScalingLimitConfig{
+ TransientBaseLimit: base,
+ TransientLimitIncrease: BaseLimitIncrease{
+ Streams: 1,
+ StreamsInbound: 2,
+ StreamsOutbound: 3,
+ Conns: 4,
+ ConnsInbound: 5,
+ ConnsOutbound: 6,
+ Memory: 7,
+ FDFraction: 0.5,
+ },
+ }
+ scaled := cfg.Scale(128<<20+4<<30, 1000)
+ require.Equal(t, 500, scaled.transient.FD)
+ require.Equal(t, base.Streams+4, scaled.transient.Streams)
+ require.Equal(t, base.StreamsInbound+4*2, scaled.transient.StreamsInbound)
+ require.Equal(t, base.StreamsOutbound+4*3, scaled.transient.StreamsOutbound)
+ require.Equal(t, base.Conns+4*4, scaled.transient.Conns)
+ require.Equal(t, base.ConnsInbound+4*5, scaled.transient.ConnsInbound)
+ require.Equal(t, base.ConnsOutbound+4*6, scaled.transient.ConnsOutbound)
+ require.Equal(t, base.Memory+4*7, scaled.transient.Memory)
+ })
+
+ t.Run("scaling and using the base amounts", func(t *testing.T) {
+ cfg := ScalingLimitConfig{
+ TransientBaseLimit: base,
+ TransientLimitIncrease: BaseLimitIncrease{
+ Streams: 1,
+ StreamsInbound: 2,
+ StreamsOutbound: 3,
+ Conns: 4,
+ ConnsInbound: 5,
+ ConnsOutbound: 6,
+ Memory: 7,
+ FDFraction: 0.01,
+ },
+ }
+ scaled := cfg.Scale(1, 10)
+ require.Equal(t, 1, scaled.transient.FD)
+ require.Equal(t, base.Streams, scaled.transient.Streams)
+ require.Equal(t, base.StreamsInbound, scaled.transient.StreamsInbound)
+ require.Equal(t, base.StreamsOutbound, scaled.transient.StreamsOutbound)
+ require.Equal(t, base.Conns, scaled.transient.Conns)
+ require.Equal(t, base.ConnsInbound, scaled.transient.ConnsInbound)
+ require.Equal(t, base.ConnsOutbound, scaled.transient.ConnsOutbound)
+ require.Equal(t, base.Memory, scaled.transient.Memory)
+ })
+
+ t.Run("scaling limits in maps", func(t *testing.T) {
+ cfg := ScalingLimitConfig{
+ ServiceLimits: map[string]baseLimitConfig{
+ "A": {
+ BaseLimit: BaseLimit{Streams: 10, Memory: 100, FD: 9},
+ },
+ "B": {
+ BaseLimit: BaseLimit{Streams: 20, Memory: 200, FD: 10},
+ BaseLimitIncrease: BaseLimitIncrease{Streams: 2, Memory: 3, FDFraction: 0.4},
+ },
+ },
+ }
+ scaled := cfg.Scale(128<<20+4<<30, 1000)
+
+ require.Len(t, scaled.service, 2)
+ require.Contains(t, scaled.service, "A")
+ require.Equal(t, 10, scaled.service["A"].Streams)
+ require.Equal(t, int64(100), scaled.service["A"].Memory)
+ require.Equal(t, 9, scaled.service["A"].FD)
+
+ require.Contains(t, scaled.service, "B")
+ require.Equal(t, 20+4*2, scaled.service["B"].Streams)
+ require.Equal(t, int64(200+4*3), scaled.service["B"].Memory)
+ require.Equal(t, 400, scaled.service["B"].FD)
+ })
+}
+
+func TestReadmeExample(t *testing.T) {
+ scalingLimits := ScalingLimitConfig{
+ SystemBaseLimit: BaseLimit{
+ ConnsInbound: 64,
+ ConnsOutbound: 128,
+ Conns: 128,
+ StreamsInbound: 512,
+ StreamsOutbound: 1024,
+ Streams: 1024,
+ Memory: 128 << 20,
+ FD: 256,
+ },
+ SystemLimitIncrease: BaseLimitIncrease{
+ ConnsInbound: 32,
+ ConnsOutbound: 64,
+ Conns: 64,
+ StreamsInbound: 256,
+ StreamsOutbound: 512,
+ Streams: 512,
+ Memory: 256 << 20,
+ FDFraction: 1,
+ },
+ }
+
+ limitConf := scalingLimits.Scale(4<<30, 1000)
+
+ require.Equal(t, 384, limitConf.system.Conns)
+ require.Equal(t, 1000, limitConf.system.FD)
+}
+
+func TestJSONMarshalling(t *testing.T) {
+ bl := ResourceLimits{
+ Streams: DefaultLimit,
+ StreamsInbound: 10,
+ StreamsOutbound: BlockAllLimit,
+ Conns: 10,
+ // ConnsInbound: DefaultLimit,
+ ConnsOutbound: Unlimited,
+ Memory: Unlimited64,
+ }
+
+ jsonEncoded, err := json.Marshal(bl)
+ require.NoError(t, err)
+ require.Equal(t, `{"StreamsInbound":10,"StreamsOutbound":"blockAll","Conns":10,"ConnsOutbound":"unlimited","Memory":"unlimited"}`, string(jsonEncoded))
+
+ // Roundtrip
+ var blDecoded ResourceLimits
+ err = json.Unmarshal(jsonEncoded, &blDecoded)
+ require.NoError(t, err)
+
+ require.Equal(t, bl, blDecoded)
+}
+
+func TestJSONRoundTripInt64(t *testing.T) {
+ bl := ResourceLimits{
+ Memory: math.MaxInt64,
+ }
+
+ jsonEncoded, err := json.Marshal(bl)
+ require.NoError(t, err)
+
+ require.Equal(t, `{"Memory":"9223372036854775807"}`, string(jsonEncoded))
+
+ // Roundtrip
+ var blDecoded ResourceLimits
+ err = json.Unmarshal(jsonEncoded, &blDecoded)
+ require.NoError(t, err)
+
+ require.Equal(t, bl, blDecoded)
+}
+
+func TestRoundTripFromConcreteAndBack(t *testing.T) {
+ l := PartialLimitConfig{
+ System: ResourceLimits{
+ Conns: 1234,
+ Memory: 54321,
+ },
+
+ ServiceDefault: ResourceLimits{
+ Conns: 2,
+ },
+
+ Service: map[string]ResourceLimits{
+ "foo": {
+ Conns: 3,
+ },
+ },
+ }
+
+ concrete := l.Build(InfiniteLimits)
+
+ // Roundtrip
+ fromConcrete := concrete.ToPartialLimitConfig().Build(InfiniteLimits)
+ require.Equal(t, concrete, fromConcrete)
+}
+
+func TestSerializeJSON(t *testing.T) {
+ bl := BaseLimit{
+ Streams: 10,
+ }
+
+ out, err := json.Marshal(bl)
+ require.NoError(t, err)
+ require.Equal(t, "{\"Streams\":10}", string(out))
+
+ bli := BaseLimitIncrease{
+ Streams: 10,
+ }
+
+ out, err = json.Marshal(bli)
+ require.NoError(t, err)
+ require.Equal(t, "{\"Streams\":10}", string(out))
+}
+
+func TestWhatIsZeroInResourceLimits(t *testing.T) {
+ l := ResourceLimits{
+ Streams: BlockAllLimit,
+ Memory: BlockAllLimit64,
+ }
+
+ out, err := json.Marshal(l)
+ require.NoError(t, err)
+ require.Equal(t, `{"Streams":"blockAll","Memory":"blockAll"}`, string(out))
+
+ l2 := ResourceLimits{}
+ err = json.Unmarshal([]byte(`{"Streams":0,"Memory":0}`), &l2)
+ require.NoError(t, err)
+ require.Equal(t, l, l2)
+
+ l3 := ResourceLimits{}
+ err = json.Unmarshal([]byte(`{"Streams":0,"Memory":"0"}`), &l3)
+ require.NoError(t, err)
+ require.Equal(t, l, l3)
+}
diff --git a/p2p/host/resource-manager/metrics.go b/p2p/host/resource-manager/metrics.go
new file mode 100644
index 0000000000..2f9e6b31b7
--- /dev/null
+++ b/p2p/host/resource-manager/metrics.go
@@ -0,0 +1,168 @@
+package rcmgr
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// MetricsReporter is an interface for collecting metrics from resource manager actions
+type MetricsReporter interface {
+ // AllowConn is invoked when opening a connection is allowed
+ AllowConn(dir network.Direction, usefd bool)
+ // BlockConn is invoked when opening a connection is blocked
+ BlockConn(dir network.Direction, usefd bool)
+
+ // AllowStream is invoked when opening a stream is allowed
+ AllowStream(p peer.ID, dir network.Direction)
+ // BlockStream is invoked when opening a stream is blocked
+ BlockStream(p peer.ID, dir network.Direction)
+
+ // AllowPeer is invoked when attaching ac onnection to a peer is allowed
+ AllowPeer(p peer.ID)
+ // BlockPeer is invoked when attaching ac onnection to a peer is blocked
+ BlockPeer(p peer.ID)
+
+ // AllowProtocol is invoked when setting the protocol for a stream is allowed
+ AllowProtocol(proto protocol.ID)
+ // BlockProtocol is invoked when setting the protocol for a stream is blocked
+ BlockProtocol(proto protocol.ID)
+ // BlockProtocolPeer is invoked when setting the protocol for a stream is blocked at the per protocol peer scope
+ BlockProtocolPeer(proto protocol.ID, p peer.ID)
+
+ // AllowService is invoked when setting the protocol for a stream is allowed
+ AllowService(svc string)
+ // BlockService is invoked when setting the protocol for a stream is blocked
+ BlockService(svc string)
+ // BlockServicePeer is invoked when setting the service for a stream is blocked at the per service peer scope
+ BlockServicePeer(svc string, p peer.ID)
+
+ // AllowMemory is invoked when a memory reservation is allowed
+ AllowMemory(size int)
+ // BlockMemory is invoked when a memory reservation is blocked
+ BlockMemory(size int)
+}
+
+type metrics struct {
+ reporter MetricsReporter
+}
+
+// WithMetrics is a resource manager option to enable metrics collection
+func WithMetrics(reporter MetricsReporter) Option {
+ return func(r *resourceManager) error {
+ r.metrics = &metrics{reporter: reporter}
+ return nil
+ }
+}
+
+func (m *metrics) AllowConn(dir network.Direction, usefd bool) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.AllowConn(dir, usefd)
+}
+
+func (m *metrics) BlockConn(dir network.Direction, usefd bool) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockConn(dir, usefd)
+}
+
+func (m *metrics) AllowStream(p peer.ID, dir network.Direction) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.AllowStream(p, dir)
+}
+
+func (m *metrics) BlockStream(p peer.ID, dir network.Direction) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockStream(p, dir)
+}
+
+func (m *metrics) AllowPeer(p peer.ID) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.AllowPeer(p)
+}
+
+func (m *metrics) BlockPeer(p peer.ID) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockPeer(p)
+}
+
+func (m *metrics) AllowProtocol(proto protocol.ID) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.AllowProtocol(proto)
+}
+
+func (m *metrics) BlockProtocol(proto protocol.ID) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockProtocol(proto)
+}
+
+func (m *metrics) BlockProtocolPeer(proto protocol.ID, p peer.ID) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockProtocolPeer(proto, p)
+}
+
+func (m *metrics) AllowService(svc string) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.AllowService(svc)
+}
+
+func (m *metrics) BlockService(svc string) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockService(svc)
+}
+
+func (m *metrics) BlockServicePeer(svc string, p peer.ID) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockServicePeer(svc, p)
+}
+
+func (m *metrics) AllowMemory(size int) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.AllowMemory(size)
+}
+
+func (m *metrics) BlockMemory(size int) {
+ if m == nil {
+ return
+ }
+
+ m.reporter.BlockMemory(size)
+}
diff --git a/p2p/host/resource-manager/noalloc_test.go b/p2p/host/resource-manager/noalloc_test.go
new file mode 100644
index 0000000000..461a001f9c
--- /dev/null
+++ b/p2p/host/resource-manager/noalloc_test.go
@@ -0,0 +1,111 @@
+//go:build nocover
+
+package rcmgr
+
+import (
+ "math/rand"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/require"
+)
+
+func randomTraceEvt(rng *rand.Rand) TraceEvt {
+ // Possibly non-sensical
+ typs := []TraceEvtTyp{
+ TraceStartEvt,
+ TraceCreateScopeEvt,
+ TraceDestroyScopeEvt,
+ TraceReserveMemoryEvt,
+ TraceBlockReserveMemoryEvt,
+ TraceReleaseMemoryEvt,
+ TraceAddStreamEvt,
+ TraceBlockAddStreamEvt,
+ TraceRemoveStreamEvt,
+ TraceAddConnEvt,
+ TraceBlockAddConnEvt,
+ TraceRemoveConnEvt,
+ }
+
+ names := []string{
+ "conn-1",
+ "stream-2",
+ "peer:abc",
+ "system",
+ "transient",
+ "peer:12D3Koo",
+ "protocol:/libp2p/autonat/1.0.0",
+ "protocol:/libp2p/autonat/1.0.0.peer:12D3Koo",
+ "service:libp2p.autonat",
+ "service:libp2p.autonat.peer:12D3Koo",
+ }
+
+ return TraceEvt{
+ Type: typs[rng.Intn(len(typs))],
+ Name: names[rng.Intn(len(names))],
+ DeltaOut: rng.Intn(5),
+ DeltaIn: rng.Intn(5),
+ Delta: int64(rng.Intn(5)),
+ Memory: int64(rng.Intn(10000)),
+ StreamsIn: rng.Intn(100),
+ StreamsOut: rng.Intn(100),
+ ConnsIn: rng.Intn(100),
+ ConnsOut: rng.Intn(100),
+ FD: rng.Intn(100),
+ Time: time.Now().Format(time.RFC3339Nano),
+ }
+
+}
+
+var regOnce sync.Once
+
+func BenchmarkMetricsRecording(b *testing.B) {
+ b.ReportAllocs()
+
+ registerOnce.Do(func() {
+ MustRegisterWith(prometheus.DefaultRegisterer)
+ })
+
+ evtCount := 10000
+ evts := make([]TraceEvt, evtCount)
+ rng := rand.New(rand.NewSource(int64(b.N)))
+ for i := 0; i < evtCount; i++ {
+ evts[i] = randomTraceEvt(rng)
+ }
+
+ str, err := NewStatsTraceReporter()
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ str.ConsumeEvent(evts[i%len(evts)])
+ }
+}
+
+func TestNoAllocsNoCover(t *testing.T) {
+ str, err := NewStatsTraceReporter()
+ require.NoError(t, err)
+
+ evtCount := 10_000
+ evts := make([]TraceEvt, 0, evtCount)
+ rng := rand.New(rand.NewSource(1))
+
+ for i := 0; i < evtCount; i++ {
+ evts = append(evts, randomTraceEvt(rng))
+ }
+
+ tagSlice := make([]string, 0, 10)
+ allocs := testing.AllocsPerRun(100, func() {
+ for i := 0; i < evtCount; i++ {
+ str.consumeEventWithLabelSlice(evts[i], &tagSlice)
+ }
+ })
+
+ if allocs > 10 {
+ t.Fatalf("expected less than 10 heap bytes, got %f", allocs)
+ }
+}
diff --git a/p2p/host/resource-manager/obs/obs.go b/p2p/host/resource-manager/obs/obs.go
new file mode 100644
index 0000000000..3484fae4b2
--- /dev/null
+++ b/p2p/host/resource-manager/obs/obs.go
@@ -0,0 +1,18 @@
+// Package obs implements metrics tracing for resource manager
+//
+// Deprecated: obs is deprecated and the exported types and methods
+// are moved to rcmgr package. Use the corresponding identifier in
+// the rcmgr package, for example
+// obs.NewStatsTraceReporter => rcmgr.NewStatsTraceReporter
+package obs
+
+import (
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+)
+
+var MustRegisterWith = rcmgr.MustRegisterWith
+
+// StatsTraceReporter reports stats on the resource manager using its traces.
+type StatsTraceReporter = rcmgr.StatsTraceReporter
+
+var NewStatsTraceReporter = rcmgr.NewStatsTraceReporter
diff --git a/p2p/host/resource-manager/rcmgr.go b/p2p/host/resource-manager/rcmgr.go
new file mode 100644
index 0000000000..efacb4547d
--- /dev/null
+++ b/p2p/host/resource-manager/rcmgr.go
@@ -0,0 +1,970 @@
+package rcmgr
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/x/rate"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var log = logging.Logger("rcmgr")
+
+type resourceManager struct {
+ limits Limiter
+
+ connLimiter *connLimiter
+ connRateLimiter *rate.Limiter
+ verifySourceAddressRateLimiter *rate.Limiter
+
+ trace *trace
+ metrics *metrics
+ disableMetrics bool
+
+ allowlist *Allowlist
+
+ system *systemScope
+ transient *transientScope
+
+ allowlistedSystem *systemScope
+ allowlistedTransient *transientScope
+
+ cancelCtx context.Context
+ cancel func()
+ wg sync.WaitGroup
+
+ mx sync.Mutex
+ svc map[string]*serviceScope
+ proto map[protocol.ID]*protocolScope
+ peer map[peer.ID]*peerScope
+
+ stickyProto map[protocol.ID]struct{}
+ stickyPeer map[peer.ID]struct{}
+
+ connId, streamId int64
+}
+
+var _ network.ResourceManager = (*resourceManager)(nil)
+
+type systemScope struct {
+ *resourceScope
+}
+
+var _ network.ResourceScope = (*systemScope)(nil)
+
+type transientScope struct {
+ *resourceScope
+
+ system *systemScope
+}
+
+var _ network.ResourceScope = (*transientScope)(nil)
+
+type serviceScope struct {
+ *resourceScope
+
+ service string
+ rcmgr *resourceManager
+
+ peers map[peer.ID]*resourceScope
+}
+
+var _ network.ServiceScope = (*serviceScope)(nil)
+
+type protocolScope struct {
+ *resourceScope
+
+ proto protocol.ID
+ rcmgr *resourceManager
+
+ peers map[peer.ID]*resourceScope
+}
+
+var _ network.ProtocolScope = (*protocolScope)(nil)
+
+type peerScope struct {
+ *resourceScope
+
+ peer peer.ID
+ rcmgr *resourceManager
+}
+
+var _ network.PeerScope = (*peerScope)(nil)
+
+type connectionScope struct {
+ *resourceScope
+
+ dir network.Direction
+ usefd bool
+ isAllowlisted bool
+ rcmgr *resourceManager
+ peer *peerScope
+ endpoint multiaddr.Multiaddr
+ ip netip.Addr
+}
+
+var _ network.ConnScope = (*connectionScope)(nil)
+var _ network.ConnManagementScope = (*connectionScope)(nil)
+
+type streamScope struct {
+ *resourceScope
+
+ dir network.Direction
+ rcmgr *resourceManager
+ peer *peerScope
+ svc *serviceScope
+ proto *protocolScope
+
+ peerProtoScope *resourceScope
+ peerSvcScope *resourceScope
+}
+
+var _ network.StreamScope = (*streamScope)(nil)
+var _ network.StreamManagementScope = (*streamScope)(nil)
+
+type Option func(*resourceManager) error
+
+func NewResourceManager(limits Limiter, opts ...Option) (network.ResourceManager, error) {
+ allowlist := newAllowlist()
+ r := &resourceManager{
+ limits: limits,
+ connLimiter: newConnLimiter(),
+ allowlist: &allowlist,
+ svc: make(map[string]*serviceScope),
+ proto: make(map[protocol.ID]*protocolScope),
+ peer: make(map[peer.ID]*peerScope),
+ connRateLimiter: newConnRateLimiter(),
+ }
+
+ for _, opt := range opts {
+ if err := opt(r); err != nil {
+ return nil, err
+ }
+ }
+
+ registeredConnLimiterPrefixes := make(map[string]struct{})
+ for _, npLimit := range r.connLimiter.networkPrefixLimitV4 {
+ registeredConnLimiterPrefixes[npLimit.Network.String()] = struct{}{}
+ }
+ for _, npLimit := range r.connLimiter.networkPrefixLimitV6 {
+ registeredConnLimiterPrefixes[npLimit.Network.String()] = struct{}{}
+ }
+ for _, network := range allowlist.allowedNetworks {
+ prefix, err := netip.ParsePrefix(network.String())
+ if err != nil {
+ log.Debug("failed to parse prefix from allowlist", "network", network.String(), "err", err)
+ continue
+ }
+ if _, ok := registeredConnLimiterPrefixes[prefix.String()]; !ok {
+ // connlimiter doesn't know about this network. Let's fix that
+ r.connLimiter.addNetworkPrefixLimit(prefix.Addr().Is6(), NetworkPrefixLimit{
+ Network: prefix,
+ ConnCount: r.limits.GetAllowlistedSystemLimits().GetConnTotalLimit(),
+ })
+ }
+ }
+ r.verifySourceAddressRateLimiter = newVerifySourceAddressRateLimiter(r.connLimiter)
+
+ if !r.disableMetrics {
+ var sr TraceReporter
+ sr, err := NewStatsTraceReporter()
+ if err != nil {
+ log.Error("failed to initialise StatsTraceReporter", "err", err)
+ } else {
+ if r.trace == nil {
+ r.trace = &trace{}
+ }
+ found := false
+ for _, rep := range r.trace.reporters {
+ if rep == sr {
+ found = true
+ break
+ }
+ }
+ if !found {
+ r.trace.reporters = append(r.trace.reporters, sr)
+ }
+ }
+ }
+
+ if err := r.trace.Start(limits); err != nil {
+ return nil, err
+ }
+
+ r.system = newSystemScope(limits.GetSystemLimits(), r, "system")
+ r.system.IncRef()
+ r.transient = newTransientScope(limits.GetTransientLimits(), r, "transient", r.system.resourceScope)
+ r.transient.IncRef()
+
+ r.allowlistedSystem = newSystemScope(limits.GetAllowlistedSystemLimits(), r, "allowlistedSystem")
+ r.allowlistedSystem.IncRef()
+ r.allowlistedTransient = newTransientScope(limits.GetAllowlistedTransientLimits(), r, "allowlistedTransient", r.allowlistedSystem.resourceScope)
+ r.allowlistedTransient.IncRef()
+
+ r.cancelCtx, r.cancel = context.WithCancel(context.Background())
+
+ r.wg.Add(1)
+ go r.background()
+
+ return r, nil
+}
+
+func (r *resourceManager) GetAllowlist() *Allowlist {
+ return r.allowlist
+}
+
+// GetAllowlist tries to get the allowlist from the given resourcemanager
+// interface by checking to see if its concrete type is a resourceManager.
+// Returns nil if it fails to get the allowlist.
+func GetAllowlist(rcmgr network.ResourceManager) *Allowlist {
+ r, ok := rcmgr.(*resourceManager)
+ if !ok {
+ return nil
+ }
+
+ return r.allowlist
+}
+
+func (r *resourceManager) ViewSystem(f func(network.ResourceScope) error) error {
+ return f(r.system)
+}
+
+func (r *resourceManager) ViewTransient(f func(network.ResourceScope) error) error {
+ return f(r.transient)
+}
+
+func (r *resourceManager) ViewService(srv string, f func(network.ServiceScope) error) error {
+ s := r.getServiceScope(srv)
+ defer s.DecRef()
+
+ return f(s)
+}
+
+func (r *resourceManager) ViewProtocol(proto protocol.ID, f func(network.ProtocolScope) error) error {
+ s := r.getProtocolScope(proto)
+ defer s.DecRef()
+
+ return f(s)
+}
+
+func (r *resourceManager) ViewPeer(p peer.ID, f func(network.PeerScope) error) error {
+ s := r.getPeerScope(p)
+ defer s.DecRef()
+
+ return f(s)
+}
+
+func (r *resourceManager) getServiceScope(svc string) *serviceScope {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ s, ok := r.svc[svc]
+ if !ok {
+ s = newServiceScope(svc, r.limits.GetServiceLimits(svc), r)
+ r.svc[svc] = s
+ }
+
+ s.IncRef()
+ return s
+}
+
+func (r *resourceManager) getProtocolScope(proto protocol.ID) *protocolScope {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ s, ok := r.proto[proto]
+ if !ok {
+ s = newProtocolScope(proto, r.limits.GetProtocolLimits(proto), r)
+ r.proto[proto] = s
+ }
+
+ s.IncRef()
+ return s
+}
+
+func (r *resourceManager) setStickyProtocol(proto protocol.ID) {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ if r.stickyProto == nil {
+ r.stickyProto = make(map[protocol.ID]struct{})
+ }
+ r.stickyProto[proto] = struct{}{}
+}
+
+func (r *resourceManager) getPeerScope(p peer.ID) *peerScope {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ s, ok := r.peer[p]
+ if !ok {
+ s = newPeerScope(p, r.limits.GetPeerLimits(p), r)
+ r.peer[p] = s
+ }
+
+ s.IncRef()
+ return s
+}
+
+func (r *resourceManager) setStickyPeer(p peer.ID) {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ if r.stickyPeer == nil {
+ r.stickyPeer = make(map[peer.ID]struct{})
+ }
+
+ r.stickyPeer[p] = struct{}{}
+}
+
+func (r *resourceManager) nextConnId() int64 {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ r.connId++
+ return r.connId
+}
+
+func (r *resourceManager) nextStreamId() int64 {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ r.streamId++
+ return r.streamId
+}
+
+// VerifySourceAddress tells the transport to verify the peer's IP address before
+// initiating a handshake.
+func (r *resourceManager) VerifySourceAddress(addr net.Addr) bool {
+ if r.verifySourceAddressRateLimiter == nil {
+ return false
+ }
+ ipPort, err := netip.ParseAddrPort(addr.String())
+ if err != nil {
+ return true
+ }
+ return !r.verifySourceAddressRateLimiter.Allow(ipPort.Addr())
+}
+
+// OpenConnectionNoIP is deprecated and will be removed in the next release
+//
+// Deprecated: Use OpenConnection instead
+func (r *resourceManager) OpenConnectionNoIP(dir network.Direction, usefd bool, endpoint multiaddr.Multiaddr) (network.ConnManagementScope, error) {
+ return r.openConnection(dir, usefd, endpoint, netip.Addr{})
+}
+
+func (r *resourceManager) OpenConnection(dir network.Direction, usefd bool, endpoint multiaddr.Multiaddr) (network.ConnManagementScope, error) {
+ ip, err := manet.ToIP(endpoint)
+ if err != nil {
+ // No IP address
+ return r.openConnection(dir, usefd, endpoint, netip.Addr{})
+ }
+
+ ipAddr, ok := netip.AddrFromSlice(ip)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert ip to netip.Addr")
+ }
+ return r.openConnection(dir, usefd, endpoint, ipAddr)
+}
+
+func (r *resourceManager) openConnection(dir network.Direction, usefd bool, endpoint multiaddr.Multiaddr, ip netip.Addr) (network.ConnManagementScope, error) {
+ if !r.connRateLimiter.Allow(ip) {
+ return nil, errors.New("rate limit exceeded")
+ }
+
+ if ip.IsValid() {
+ if ok := r.connLimiter.addConn(ip); !ok {
+ return nil, fmt.Errorf("connections per ip limit exceeded for %s", endpoint)
+ }
+ }
+
+ var conn *connectionScope
+ conn = newConnectionScope(dir, usefd, r.limits.GetConnLimits(), r, endpoint, ip)
+
+ err := conn.AddConn(dir, usefd)
+ if err != nil && ip.IsValid() {
+ // Try again if this is an allowlisted connection
+ // Failed to open connection, let's see if this was allowlisted and try again
+ allowed := r.allowlist.Allowed(endpoint)
+ if allowed {
+ conn.Done()
+ conn = newAllowListedConnectionScope(dir, usefd, r.limits.GetConnLimits(), r, endpoint)
+ err = conn.AddConn(dir, usefd)
+ }
+ }
+
+ if err != nil {
+ conn.Done()
+ r.metrics.BlockConn(dir, usefd)
+ return nil, err
+ }
+
+ r.metrics.AllowConn(dir, usefd)
+ return conn, nil
+}
+
+func (r *resourceManager) OpenStream(p peer.ID, dir network.Direction) (network.StreamManagementScope, error) {
+ peer := r.getPeerScope(p)
+ stream := newStreamScope(dir, r.limits.GetStreamLimits(p), peer, r)
+ peer.DecRef() // we have the reference in edges
+
+ err := stream.AddStream(dir)
+ if err != nil {
+ stream.Done()
+ r.metrics.BlockStream(p, dir)
+ return nil, err
+ }
+
+ r.metrics.AllowStream(p, dir)
+ return stream, nil
+}
+
+func (r *resourceManager) Close() error {
+ r.cancel()
+ r.wg.Wait()
+ r.trace.Close()
+
+ return nil
+}
+
+func (r *resourceManager) background() {
+ defer r.wg.Done()
+
+ // periodically garbage collects unused peer and protocol scopes
+ ticker := time.NewTicker(time.Minute)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ r.gc()
+ case <-r.cancelCtx.Done():
+ return
+ }
+ }
+}
+
+func (r *resourceManager) gc() {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ for proto, s := range r.proto {
+ _, sticky := r.stickyProto[proto]
+ if sticky {
+ continue
+ }
+ if s.IsUnused() {
+ s.Done()
+ delete(r.proto, proto)
+ }
+ }
+
+ var deadPeers []peer.ID
+ for p, s := range r.peer {
+ _, sticky := r.stickyPeer[p]
+ if sticky {
+ continue
+ }
+
+ if s.IsUnused() {
+ s.Done()
+ delete(r.peer, p)
+ deadPeers = append(deadPeers, p)
+ }
+ }
+
+ for _, s := range r.svc {
+ s.Lock()
+ for _, p := range deadPeers {
+ ps, ok := s.peers[p]
+ if ok {
+ ps.Done()
+ delete(s.peers, p)
+ }
+ }
+ s.Unlock()
+ }
+
+ for _, s := range r.proto {
+ s.Lock()
+ for _, p := range deadPeers {
+ ps, ok := s.peers[p]
+ if ok {
+ ps.Done()
+ delete(s.peers, p)
+ }
+ }
+ s.Unlock()
+ }
+}
+
+func newSystemScope(limit Limit, rcmgr *resourceManager, name string) *systemScope {
+ return &systemScope{
+ resourceScope: newResourceScope(limit, nil, name, rcmgr.trace, rcmgr.metrics),
+ }
+}
+
+func newTransientScope(limit Limit, rcmgr *resourceManager, name string, systemScope *resourceScope) *transientScope {
+ return &transientScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{systemScope},
+ name, rcmgr.trace, rcmgr.metrics),
+ system: rcmgr.system,
+ }
+}
+
+func newServiceScope(service string, limit Limit, rcmgr *resourceManager) *serviceScope {
+ return &serviceScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{rcmgr.system.resourceScope},
+ fmt.Sprintf("service:%s", service), rcmgr.trace, rcmgr.metrics),
+ service: service,
+ rcmgr: rcmgr,
+ }
+}
+
+func newProtocolScope(proto protocol.ID, limit Limit, rcmgr *resourceManager) *protocolScope {
+ return &protocolScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{rcmgr.system.resourceScope},
+ fmt.Sprintf("protocol:%s", proto), rcmgr.trace, rcmgr.metrics),
+ proto: proto,
+ rcmgr: rcmgr,
+ }
+}
+
+func newPeerScope(p peer.ID, limit Limit, rcmgr *resourceManager) *peerScope {
+ return &peerScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{rcmgr.system.resourceScope},
+ peerScopeName(p), rcmgr.trace, rcmgr.metrics),
+ peer: p,
+ rcmgr: rcmgr,
+ }
+}
+
+func newConnectionScope(dir network.Direction, usefd bool, limit Limit, rcmgr *resourceManager, endpoint multiaddr.Multiaddr, ip netip.Addr) *connectionScope {
+ return &connectionScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{rcmgr.transient.resourceScope, rcmgr.system.resourceScope},
+ connScopeName(rcmgr.nextConnId()), rcmgr.trace, rcmgr.metrics),
+ dir: dir,
+ usefd: usefd,
+ rcmgr: rcmgr,
+ endpoint: endpoint,
+ ip: ip,
+ }
+}
+
+func newAllowListedConnectionScope(dir network.Direction, usefd bool, limit Limit, rcmgr *resourceManager, endpoint multiaddr.Multiaddr) *connectionScope {
+ return &connectionScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{rcmgr.allowlistedTransient.resourceScope, rcmgr.allowlistedSystem.resourceScope},
+ connScopeName(rcmgr.nextConnId()), rcmgr.trace, rcmgr.metrics),
+ dir: dir,
+ usefd: usefd,
+ rcmgr: rcmgr,
+ endpoint: endpoint,
+ isAllowlisted: true,
+ }
+}
+
+func newStreamScope(dir network.Direction, limit Limit, peer *peerScope, rcmgr *resourceManager) *streamScope {
+ return &streamScope{
+ resourceScope: newResourceScope(limit,
+ []*resourceScope{peer.resourceScope, rcmgr.transient.resourceScope, rcmgr.system.resourceScope},
+ streamScopeName(rcmgr.nextStreamId()), rcmgr.trace, rcmgr.metrics),
+ dir: dir,
+ rcmgr: peer.rcmgr,
+ peer: peer,
+ }
+}
+
+func IsSystemScope(name string) bool {
+ return name == "system"
+}
+
+func IsTransientScope(name string) bool {
+ return name == "transient"
+}
+
+func streamScopeName(streamId int64) string {
+ return fmt.Sprintf("stream-%d", streamId)
+}
+
+func IsStreamScope(name string) bool {
+ return strings.HasPrefix(name, "stream-") && !IsSpan(name)
+}
+
+func connScopeName(streamId int64) string {
+ return fmt.Sprintf("conn-%d", streamId)
+}
+
+func IsConnScope(name string) bool {
+ return strings.HasPrefix(name, "conn-") && !IsSpan(name)
+}
+
+func peerScopeName(p peer.ID) string {
+ return fmt.Sprintf("peer:%s", p)
+}
+
+// PeerStrInScopeName returns "" if name is not a peerScopeName. Returns a string to avoid allocating a peer ID object
+func PeerStrInScopeName(name string) string {
+ if !strings.HasPrefix(name, "peer:") || IsSpan(name) {
+ return ""
+ }
+ // Index to avoid allocating a new string
+ peerSplitIdx := strings.Index(name, "peer:")
+ if peerSplitIdx == -1 {
+ return ""
+ }
+ p := (name[peerSplitIdx+len("peer:"):])
+ return p
+}
+
+// ParseProtocolScopeName returns the service name if name is a serviceScopeName.
+// Otherwise returns ""
+func ParseProtocolScopeName(name string) string {
+ if strings.HasPrefix(name, "protocol:") && !IsSpan(name) {
+ if strings.Contains(name, "peer:") {
+ // This is a protocol peer scope
+ return ""
+ }
+
+ // Index to avoid allocating a new string
+ separatorIdx := strings.Index(name, ":")
+ if separatorIdx == -1 {
+ return ""
+ }
+ return name[separatorIdx+1:]
+ }
+ return ""
+}
+
+func (s *serviceScope) Name() string {
+ return s.service
+}
+
+func (s *serviceScope) getPeerScope(p peer.ID) *resourceScope {
+ s.Lock()
+ defer s.Unlock()
+
+ ps, ok := s.peers[p]
+ if ok {
+ ps.IncRef()
+ return ps
+ }
+
+ l := s.rcmgr.limits.GetServicePeerLimits(s.service)
+
+ if s.peers == nil {
+ s.peers = make(map[peer.ID]*resourceScope)
+ }
+
+ ps = newResourceScope(l, nil, fmt.Sprintf("%s.peer:%s", s.name, p), s.rcmgr.trace, s.rcmgr.metrics)
+ s.peers[p] = ps
+
+ ps.IncRef()
+ return ps
+}
+
+func (s *protocolScope) Protocol() protocol.ID {
+ return s.proto
+}
+
+func (s *protocolScope) getPeerScope(p peer.ID) *resourceScope {
+ s.Lock()
+ defer s.Unlock()
+
+ ps, ok := s.peers[p]
+ if ok {
+ ps.IncRef()
+ return ps
+ }
+
+ l := s.rcmgr.limits.GetProtocolPeerLimits(s.proto)
+
+ if s.peers == nil {
+ s.peers = make(map[peer.ID]*resourceScope)
+ }
+
+ ps = newResourceScope(l, nil, fmt.Sprintf("%s.peer:%s", s.name, p), s.rcmgr.trace, s.rcmgr.metrics)
+ s.peers[p] = ps
+
+ ps.IncRef()
+ return ps
+}
+
+func (s *peerScope) Peer() peer.ID {
+ return s.peer
+}
+
+func (s *connectionScope) PeerScope() network.PeerScope {
+ s.Lock()
+ defer s.Unlock()
+
+ // avoid nil is not nil footgun; go....
+ if s.peer == nil {
+ return nil
+ }
+
+ return s.peer
+}
+
+func (s *connectionScope) Done() {
+ s.Lock()
+ defer s.Unlock()
+ if s.done {
+ return
+ }
+ if s.ip.IsValid() {
+ s.rcmgr.connLimiter.rmConn(s.ip)
+ }
+ s.resourceScope.doneUnlocked()
+}
+
+// transferAllowedToStandard transfers this connection scope from being part of
+// the allowlist set of scopes to being part of the standard set of scopes.
+// Happens when we first allowlisted this connection due to its IP, but later
+// discovered that the peer id not what we expected.
+func (s *connectionScope) transferAllowedToStandard() (err error) {
+
+ systemScope := s.rcmgr.system.resourceScope
+ transientScope := s.rcmgr.transient.resourceScope
+
+ stat := s.resourceScope.rc.stat()
+
+ for _, scope := range s.edges {
+ scope.ReleaseForChild(stat)
+ scope.DecRef() // removed from edges
+ }
+ s.edges = nil
+
+ if err := systemScope.ReserveForChild(stat); err != nil {
+ return err
+ }
+ systemScope.IncRef()
+
+ // Undo this if we fail later
+ defer func() {
+ if err != nil {
+ systemScope.ReleaseForChild(stat)
+ systemScope.DecRef()
+ }
+ }()
+
+ if err := transientScope.ReserveForChild(stat); err != nil {
+ return err
+ }
+ transientScope.IncRef()
+
+ // Update edges
+ s.edges = []*resourceScope{
+ systemScope,
+ transientScope,
+ }
+ return nil
+}
+
+func (s *connectionScope) SetPeer(p peer.ID) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.peer != nil {
+ return fmt.Errorf("connection scope already attached to a peer")
+ }
+
+ system := s.rcmgr.system
+ transient := s.rcmgr.transient
+
+ if s.isAllowlisted {
+ system = s.rcmgr.allowlistedSystem
+ transient = s.rcmgr.allowlistedTransient
+
+ if !s.rcmgr.allowlist.AllowedPeerAndMultiaddr(p, s.endpoint) {
+ s.isAllowlisted = false
+
+ // This is not an allowed peer + multiaddr combination. We need to
+ // transfer this connection to the general scope. We'll do this first by
+ // transferring the connection to the system and transient scopes, then
+ // continue on with this function. The idea is that a connection
+ // shouldn't get the benefit of evading the transient scope because it
+ // was _almost_ an allowlisted connection.
+ if err := s.transferAllowedToStandard(); err != nil {
+ // Failed to transfer this connection to the standard scopes
+ return err
+ }
+
+ // set the system and transient scopes to the non-allowlisted ones
+ system = s.rcmgr.system
+ transient = s.rcmgr.transient
+ }
+ }
+
+ s.peer = s.rcmgr.getPeerScope(p)
+
+ // juggle resources from transient scope to peer scope
+ stat := s.resourceScope.rc.stat()
+ if err := s.peer.ReserveForChild(stat); err != nil {
+ s.peer.DecRef()
+ s.peer = nil
+ s.rcmgr.metrics.BlockPeer(p)
+ return err
+ }
+
+ transient.ReleaseForChild(stat)
+ transient.DecRef() // removed from edges
+
+ // update edges
+ edges := []*resourceScope{
+ s.peer.resourceScope,
+ system.resourceScope,
+ }
+ s.resourceScope.edges = edges
+
+ s.rcmgr.metrics.AllowPeer(p)
+ return nil
+}
+
+func (s *streamScope) ProtocolScope() network.ProtocolScope {
+ s.Lock()
+ defer s.Unlock()
+
+ // avoid nil is not nil footgun; go....
+ if s.proto == nil {
+ return nil
+ }
+
+ return s.proto
+}
+
+func (s *streamScope) SetProtocol(proto protocol.ID) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.proto != nil {
+ return fmt.Errorf("stream scope already attached to a protocol")
+ }
+
+ s.proto = s.rcmgr.getProtocolScope(proto)
+
+ // juggle resources from transient scope to protocol scope
+ stat := s.resourceScope.rc.stat()
+ if err := s.proto.ReserveForChild(stat); err != nil {
+ s.proto.DecRef()
+ s.proto = nil
+ s.rcmgr.metrics.BlockProtocol(proto)
+ return err
+ }
+
+ s.peerProtoScope = s.proto.getPeerScope(s.peer.peer)
+ if err := s.peerProtoScope.ReserveForChild(stat); err != nil {
+ s.proto.ReleaseForChild(stat)
+ s.proto.DecRef()
+ s.proto = nil
+ s.peerProtoScope.DecRef()
+ s.peerProtoScope = nil
+ s.rcmgr.metrics.BlockProtocolPeer(proto, s.peer.peer)
+ return err
+ }
+
+ s.rcmgr.transient.ReleaseForChild(stat)
+ s.rcmgr.transient.DecRef() // removed from edges
+
+ // update edges
+ edges := []*resourceScope{
+ s.peer.resourceScope,
+ s.peerProtoScope,
+ s.proto.resourceScope,
+ s.rcmgr.system.resourceScope,
+ }
+ s.resourceScope.edges = edges
+
+ s.rcmgr.metrics.AllowProtocol(proto)
+ return nil
+}
+
+func (s *streamScope) ServiceScope() network.ServiceScope {
+ s.Lock()
+ defer s.Unlock()
+
+ // avoid nil is not nil footgun; go....
+ if s.svc == nil {
+ return nil
+ }
+
+ return s.svc
+}
+
+func (s *streamScope) SetService(svc string) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.svc != nil {
+ return fmt.Errorf("stream scope already attached to a service")
+ }
+ if s.proto == nil {
+ return fmt.Errorf("stream scope not attached to a protocol")
+ }
+
+ s.svc = s.rcmgr.getServiceScope(svc)
+
+ // reserve resources in service
+ stat := s.resourceScope.rc.stat()
+ if err := s.svc.ReserveForChild(stat); err != nil {
+ s.svc.DecRef()
+ s.svc = nil
+ s.rcmgr.metrics.BlockService(svc)
+ return err
+ }
+
+ // get the per peer service scope constraint, if any
+ s.peerSvcScope = s.svc.getPeerScope(s.peer.peer)
+ if err := s.peerSvcScope.ReserveForChild(stat); err != nil {
+ s.svc.ReleaseForChild(stat)
+ s.svc.DecRef()
+ s.svc = nil
+ s.peerSvcScope.DecRef()
+ s.peerSvcScope = nil
+ s.rcmgr.metrics.BlockServicePeer(svc, s.peer.peer)
+ return err
+ }
+
+ // update edges
+ edges := []*resourceScope{
+ s.peer.resourceScope,
+ s.peerProtoScope,
+ s.peerSvcScope,
+ s.proto.resourceScope,
+ s.svc.resourceScope,
+ s.rcmgr.system.resourceScope,
+ }
+ s.resourceScope.edges = edges
+
+ s.rcmgr.metrics.AllowService(svc)
+ return nil
+}
+
+func (s *streamScope) PeerScope() network.PeerScope {
+ s.Lock()
+ defer s.Unlock()
+
+ // avoid nil is not nil footgun; go....
+ if s.peer == nil {
+ return nil
+ }
+
+ return s.peer
+}
diff --git a/p2p/host/resource-manager/rcmgr_test.go b/p2p/host/resource-manager/rcmgr_test.go
new file mode 100644
index 0000000000..97756eaf6d
--- /dev/null
+++ b/p2p/host/resource-manager/rcmgr_test.go
@@ -0,0 +1,1175 @@
+package rcmgr
+
+import (
+ "net"
+ "net/netip"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/x/rate"
+ "github.com/stretchr/testify/require"
+
+ "github.com/multiformats/go-multiaddr"
+)
+
+var dummyMA = multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234")
+
+func TestResourceManager(t *testing.T) {
+ peerA := peer.ID("A")
+ peerB := peer.ID("B")
+ protoA := protocol.ID("/A")
+ protoB := protocol.ID("/B")
+ svcA := "A.svc"
+ svcB := "B.svc"
+ nmgr, err := NewResourceManager(
+ NewFixedLimiter(ConcreteLimitConfig{
+ system: BaseLimit{
+ Memory: 16384,
+ StreamsInbound: 3,
+ StreamsOutbound: 3,
+ Streams: 6,
+ ConnsInbound: 3,
+ ConnsOutbound: 3,
+ Conns: 6,
+ FD: 2,
+ },
+ transient: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 2,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 2,
+ FD: 1,
+ },
+ serviceDefault: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 2,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 2,
+ FD: 1,
+ },
+ servicePeerDefault: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 5,
+ StreamsOutbound: 5,
+ Streams: 10,
+ },
+ service: map[string]BaseLimit{
+ svcA: {
+ Memory: 8192,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 4,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 4,
+ FD: 1,
+ },
+ svcB: {
+ Memory: 8192,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 4,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 4,
+ FD: 1,
+ },
+ },
+ servicePeer: map[string]BaseLimit{
+ svcB: {
+ Memory: 8192,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 2,
+ },
+ },
+ protocolDefault: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 2,
+ },
+ protocol: map[protocol.ID]BaseLimit{
+ protoA: {
+ Memory: 8192,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 2,
+ },
+ },
+ protocolPeer: map[protocol.ID]BaseLimit{
+ protoB: {
+ Memory: 8192,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 2,
+ },
+ },
+ peerDefault: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 2,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 2,
+ FD: 1,
+ },
+ protocolPeerDefault: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 5,
+ StreamsOutbound: 5,
+ Streams: 10,
+ },
+ peer: map[peer.ID]BaseLimit{
+ peerA: {
+ Memory: 8192,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 4,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 4,
+ FD: 1,
+ },
+ },
+ conn: BaseLimit{
+ Memory: 4096,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ },
+ stream: BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ },
+ }),
+ )
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mgr := nmgr.(*resourceManager)
+ defer mgr.Close()
+
+ checkRefCnt := func(s *resourceScope, count int) {
+ t.Helper()
+ if refCnt := s.refCnt; refCnt != count {
+ t.Fatalf("expected refCnt of %d, got %d", count, refCnt)
+ }
+ }
+ checkSystem := func(check func(s *resourceScope)) {
+ if err := mgr.ViewSystem(func(s network.ResourceScope) error {
+ check(s.(*systemScope).resourceScope)
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+ checkTransient := func(check func(s *resourceScope)) {
+ if err := mgr.ViewTransient(func(s network.ResourceScope) error {
+ check(s.(*transientScope).resourceScope)
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+ checkService := func(svc string, check func(s *resourceScope)) {
+ if err := mgr.ViewService(svc, func(s network.ServiceScope) error {
+ check(s.(*serviceScope).resourceScope)
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+ checkProtocol := func(p protocol.ID, check func(s *resourceScope)) {
+ if err := mgr.ViewProtocol(p, func(s network.ProtocolScope) error {
+ check(s.(*protocolScope).resourceScope)
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+ checkPeer := func(p peer.ID, check func(s *resourceScope)) {
+ if err := mgr.ViewPeer(p, func(s network.PeerScope) error {
+ check(s.(*peerScope).resourceScope)
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // open an inbound connection, using an fd
+ conn, err := mgr.OpenConnection(network.DirInbound, true, dummyMA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+
+ // the connection is transient, we shouldn't be able to open a second one
+ if _, err := mgr.OpenConnection(network.DirInbound, true, dummyMA); err == nil {
+ t.Fatal("expected OpenConnection to fail")
+ }
+ if _, err := mgr.OpenConnection(network.DirInbound, false, dummyMA); err == nil {
+ t.Fatal("expected OpenConnection to fail")
+ }
+
+ // close it to check resources are reclaimed
+ conn.Done()
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // open another inbound connection, using an fd
+ conn1, err := mgr.OpenConnection(network.DirInbound, true, dummyMA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+
+ // check nility of current peer scope
+ if conn1.PeerScope() != nil {
+ t.Fatal("peer scope should be nil")
+ }
+
+ // attach to a peer
+ if err := conn1.SetPeer(peerA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 4)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // we should be able to open a second transient connection now
+ conn2, err := mgr.OpenConnection(network.DirInbound, true, dummyMA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+
+ // but we shouldn't be able to attach it to the same peer due to the fd limit
+ if err := conn2.SetPeer(peerA); err == nil {
+ t.Fatal("expected SetPeer to fail")
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ })
+
+ // close it and reopen without using an FD -- we should be able to attach now
+ conn2.Done()
+
+ conn2, err = mgr.OpenConnection(network.DirInbound, false, dummyMA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 0})
+ })
+
+ if err := conn2.SetPeer(peerA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // open a stream
+ stream, err := mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 4)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 6)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ // the stream is transient we shouldn't be able to open a second one
+ if _, err := mgr.OpenStream(peerA, network.DirInbound); err == nil {
+ t.Fatal("expected OpenStream to fail")
+ }
+
+ // close the stream to check resource reclamation
+ stream.Done()
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // open another stream, but this time attach it to a protocol
+ stream1, err := mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 4)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 6)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ // check nility of protocol scope
+ if stream1.ProtocolScope() != nil {
+ t.Fatal("protocol scope should be nil")
+ }
+
+ if err := stream1.SetProtocol(protoA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 4)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 7)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // and now we should be able to open another stream and attach it to the protocol
+ stream2, err := mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 8)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ if err := stream2.SetProtocol(protoA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 8)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // open a 3rd stream, and try to attach it to the same protocol
+ stream3, err := mgr.OpenStream(peerB, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 10)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 3, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ if err := stream3.SetProtocol(protoA); err == nil {
+ t.Fatal("expected SetProtocol to fail")
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 10)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 3, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ // but we should be able to set to another protocol
+ if err := stream3.SetProtocol(protoB); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 11)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 3, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // check nility of current service scope
+ if stream1.ServiceScope() != nil {
+ t.Fatal("service scope should be nil")
+ }
+
+ // we should be able to attach stream1 and stream2 to svcA, but stream3 should fail due to limit
+ if err := stream1.SetService(svcA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkService(svcA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 12)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 3, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ if err := stream2.SetService(svcA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkService(svcA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 12)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 3, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ if err := stream3.SetService(svcA); err == nil {
+ t.Fatal("expected SetService to fail")
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkService(svcA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 12)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 3, NumConnsInbound: 2, NumFD: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // and now let's reclaim our resources to make sure we can gc unused peer and proto scopes
+ // but first check internal refs
+ mgr.mx.Lock()
+ _, okProtoA := mgr.proto[protoA]
+ _, okProtoB := mgr.proto[protoB]
+ _, okPeerA := mgr.peer[peerA]
+ _, okPeerB := mgr.peer[peerB]
+ mgr.mx.Unlock()
+
+ if !okProtoA {
+ t.Fatal("protocol scope is not stored")
+ }
+ if !okProtoB {
+ t.Fatal("protocol scope is not stored")
+ }
+ if !okPeerA {
+ t.Fatal("peer scope is not stored")
+ }
+ if !okPeerB {
+ t.Fatal("peer scope is not stored")
+ }
+
+ // ok, reclaim
+ stream1.Done()
+ stream2.Done()
+ stream3.Done()
+ conn1.Done()
+ conn2.Done()
+
+ // check everything released
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkPeer(peerB, func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkService(svcA, func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 7)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ mgr.gc()
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ mgr.mx.Lock()
+ lenProto := len(mgr.proto)
+ lenPeer := len(mgr.peer)
+ mgr.mx.Unlock()
+
+ if lenProto != 0 {
+ t.Fatal("protocols were not gc'ed")
+ }
+ if lenPeer != 0 {
+ t.Fatal("perrs were not gc'ed")
+ }
+
+ // check that per protocol peer scopes work as intended
+ stream1, err = mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 5)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ if err := stream1.SetProtocol(protoB); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 6)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ stream2, err = mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 7)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ if err := stream2.SetProtocol(protoB); err == nil {
+ t.Fatal("expected SetProtocol to fail")
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 7)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ stream1.Done()
+ stream2.Done()
+
+ // check that per service peer scopes work as intended
+ stream1, err = mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 6)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ if err := stream1.SetProtocol(protoA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 7)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ stream2, err = mgr.OpenStream(peerA, network.DirInbound)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 8)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+
+ if err := stream2.SetProtocol(protoA); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 8)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ if err := stream1.SetService(svcB); err != nil {
+ t.Fatal(err)
+ }
+
+ checkPeer(peerA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkService(svcB, func(s *resourceScope) {
+ checkRefCnt(s, 2)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+ })
+ checkProtocol(protoA, func(s *resourceScope) {
+ checkRefCnt(s, 3)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 9)
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 2})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ // now we should fail to set the service for stream2 to svcB because of the service peer limit
+ if err := stream2.SetService(svcB); err == nil {
+ t.Fatal("expected SetService to fail")
+ }
+
+ // now release resources and check interior gc of per service peer scopes
+ stream1.Done()
+ stream2.Done()
+
+ mgr.gc()
+
+ checkSystem(func(s *resourceScope) {
+ checkRefCnt(s, 4)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+ checkTransient(func(s *resourceScope) {
+ checkRefCnt(s, 1)
+ checkResources(t, &s.rc, network.ScopeStat{})
+ })
+
+ mgr.mx.Lock()
+ lenProto = len(mgr.proto)
+ lenPeer = len(mgr.peer)
+ mgr.mx.Unlock()
+
+ svc := mgr.svc[svcB]
+ svc.Lock()
+ lenSvcPeer := len(svc.peers)
+ svc.Unlock()
+
+ if lenProto != 0 {
+ t.Fatal("protocols were not gc'ed")
+ }
+ if lenPeer != 0 {
+ t.Fatal("peers were not gc'ed")
+ }
+ if lenSvcPeer != 0 {
+ t.Fatal("service peers were not gc'ed")
+ }
+
+}
+
+func TestResourceManagerWithAllowlist(t *testing.T) {
+ peerA := test.RandPeerIDFatal(t)
+
+ limits := DefaultLimits.AutoScale()
+ limits.system.Conns = 0
+ limits.transient.Conns = 0
+
+ baseLimit := BaseLimit{
+ Conns: 2,
+ ConnsInbound: 2,
+ ConnsOutbound: 1,
+ }
+ baseLimit.Apply(limits.allowlistedSystem)
+ limits.allowlistedSystem = baseLimit
+
+ baseLimit = BaseLimit{
+ Conns: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ }
+ baseLimit.Apply(limits.allowlistedTransient)
+ limits.allowlistedTransient = baseLimit
+
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{
+ multiaddr.StringCast("/ip4/1.2.3.4"),
+ multiaddr.StringCast("/ip4/4.3.2.1/p2p/" + peerA.String()),
+ }))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+
+ ableToGetAllowlist := GetAllowlist(rcmgr)
+ if ableToGetAllowlist == nil {
+ t.Fatal("Expected to be able to get the allowlist")
+ }
+
+ // A connection comes in from a non-allowlisted ip address
+ _, err = rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/1.2.3.5"))
+ if err == nil {
+ t.Fatalf("Expected this to fail. err=%v", err)
+ }
+
+ // A connection comes in from an allowlisted ip address
+ connScope, err := rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/1.2.3.4"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = connScope.SetPeer(test.RandPeerIDFatal(t))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // A connection comes in that looks like it should be allowlisted, but then has the wrong peer id.
+ connScope, err = rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/4.3.2.1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = connScope.SetPeer(test.RandPeerIDFatal(t))
+ if err == nil {
+ t.Fatalf("Expected this to fail. err=%v", err)
+ }
+
+ // A connection comes in that looks like it should be allowlisted, and it has the allowlisted peer id
+ connScope, err = rcmgr.OpenConnection(network.DirInbound, true, multiaddr.StringCast("/ip4/4.3.2.1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = connScope.SetPeer(peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestAllowlistAndConnLimiterPlayNice checks that the connLimiter learns about network prefix limits from the allowlist.
+func TestAllowlistAndConnLimiterPlayNice(t *testing.T) {
+ limits := DefaultLimits.AutoScale()
+ limits.allowlistedSystem.Conns = 8
+ limits.allowlistedSystem.ConnsInbound = 8
+ limits.allowlistedSystem.ConnsOutbound = 8
+ t.Run("IPv4", func(t *testing.T) {
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{
+ multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24"),
+ }), WithNetworkPrefixLimit([]NetworkPrefixLimit{}, []NetworkPrefixLimit{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+
+ // The connLimiter should have the allowlisted network prefix
+ require.Equal(t, netip.MustParsePrefix("1.2.3.0/24"), rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV4[0].Network)
+
+ // The connLimiter should use the limit from the allowlist
+ require.Equal(t, 8, rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV4[0].ConnCount)
+ })
+ t.Run("IPv6", func(t *testing.T) {
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{
+ multiaddr.StringCast("/ip6/1:2:3::/ipcidr/58"),
+ }), WithNetworkPrefixLimit([]NetworkPrefixLimit{}, []NetworkPrefixLimit{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+
+ // The connLimiter should have the allowlisted network prefix
+ require.Equal(t, netip.MustParsePrefix("1:2:3::/58"), rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV6[0].Network)
+
+ // The connLimiter should use the limit from the allowlist
+ require.Equal(t, 8, rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV6[0].ConnCount)
+ })
+
+ t.Run("Does not override if you set a limit directly", func(t *testing.T) {
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithAllowlistedMultiaddrs([]multiaddr.Multiaddr{
+ multiaddr.StringCast("/ip4/1.2.3.0/ipcidr/24"),
+ }), WithNetworkPrefixLimit([]NetworkPrefixLimit{
+ {Network: netip.MustParsePrefix("1.2.3.0/24"), ConnCount: 1},
+ }, []NetworkPrefixLimit{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+
+ // The connLimiter should have it because we set it
+ require.Equal(t, netip.MustParsePrefix("1.2.3.0/24"), rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV4[0].Network)
+ // should only have one network prefix limit
+ require.Equal(t, 1, len(rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV4))
+
+ // The connLimiter should use the limit we defined explicitly
+ require.Equal(t, 1, rcmgr.(*resourceManager).connLimiter.networkPrefixLimitV4[0].ConnCount)
+ })
+}
+
+func TestResourceManagerRateLimiting(t *testing.T) {
+ // Create a resource manager with very low rate limits
+ limits := DefaultLimits.AutoScale()
+ limits.system.Conns = 100 // High enough to not be the limiting factor
+ limits.transient.Conns = 100
+
+ // Create limiters with very low RPS
+ limiter := &rate.Limiter{
+ GlobalLimit: rate.Limit{RPS: 0.00001, Burst: 2},
+ }
+
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithConnRateLimiters(limiter))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+
+ addr := multiaddr.StringCast("/ip4/1.2.3.4")
+
+ connScope, err := rcmgr.OpenConnection(network.DirInbound, true, addr)
+ require.NoError(t, err)
+ connScope.Done()
+
+ connScope, err = rcmgr.OpenConnection(network.DirInbound, true, addr)
+ require.NoError(t, err)
+ connScope.Done()
+
+ _, err = rcmgr.OpenConnection(network.DirInbound, true, addr)
+ require.ErrorContains(t, err, "rate limit exceeded")
+}
+
+func TestVerifySourceAddressRateLimiter(t *testing.T) {
+ limits := DefaultLimits.AutoScale()
+ limits.allowlistedSystem.Conns = 100
+ limits.allowlistedSystem.ConnsInbound = 100
+ limits.allowlistedSystem.ConnsOutbound = 100
+
+ rcmgr, err := NewResourceManager(NewFixedLimiter(limits), WithLimitPerSubnet([]ConnLimitPerSubnet{
+ {PrefixLength: 32, ConnCount: 2},
+ }, []ConnLimitPerSubnet{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+
+ na1 := &net.UDPAddr{
+ IP: net.ParseIP("1.2.3.4"),
+ Port: 1234,
+ }
+ require.False(t, rcmgr.VerifySourceAddress(na1))
+ require.True(t, rcmgr.VerifySourceAddress(na1))
+
+ na2 := &net.UDPAddr{
+ IP: net.ParseIP("1.2.3.5"),
+ Port: 1234,
+ }
+ require.False(t, rcmgr.VerifySourceAddress(na2))
+ require.True(t, rcmgr.VerifySourceAddress(na2))
+}
diff --git a/p2p/host/resource-manager/scope.go b/p2p/host/resource-manager/scope.go
new file mode 100644
index 0000000000..3233f4cbb2
--- /dev/null
+++ b/p2p/host/resource-manager/scope.go
@@ -0,0 +1,815 @@
+package rcmgr
+
+import (
+ "fmt"
+ "math"
+ "math/big"
+ "strings"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+// resources tracks the current state of resource consumption
+type resources struct {
+ limit Limit
+
+ nconnsIn, nconnsOut int
+ nstreamsIn, nstreamsOut int
+ nfd int
+
+ memory int64
+}
+
+// A resourceScope can be a DAG, where a downstream node is not allowed to outlive an upstream node
+// (ie cannot call Done in the upstream node before the downstream node) and account for resources
+// using a linearized parent set.
+// A resourceScope can be a span scope, where it has a specific owner; span scopes create a tree rooted
+// at the owner (which can be a DAG scope) and can outlive their parents -- this is important because
+// span scopes are the main *user* interface for memory management, and the user may call
+// Done in a span scope after the system has closed the root of the span tree in some background
+// goroutine.
+// If we didn't make this distinction we would have a double release problem in that case.
+type resourceScope struct {
+ sync.Mutex
+ done bool
+ refCnt int
+
+ spanID int
+
+ rc resources
+ owner *resourceScope // set in span scopes, which define trees
+ edges []*resourceScope // set in DAG scopes, it's the linearized parent set
+
+ name string // for debugging purposes
+ trace *trace // debug tracing
+ metrics *metrics // metrics collection
+}
+
+var _ network.ResourceScope = (*resourceScope)(nil)
+var _ network.ResourceScopeSpan = (*resourceScope)(nil)
+
+func newResourceScope(limit Limit, edges []*resourceScope, name string, trace *trace, metrics *metrics) *resourceScope {
+ for _, e := range edges {
+ e.IncRef()
+ }
+ r := &resourceScope{
+ rc: resources{limit: limit},
+ edges: edges,
+ name: name,
+ trace: trace,
+ metrics: metrics,
+ }
+ r.trace.CreateScope(name, limit)
+ return r
+}
+
+func newResourceScopeSpan(owner *resourceScope, id int) *resourceScope {
+ r := &resourceScope{
+ rc: resources{limit: owner.rc.limit},
+ owner: owner,
+ name: fmt.Sprintf("%s.span-%d", owner.name, id),
+ trace: owner.trace,
+ metrics: owner.metrics,
+ }
+ r.trace.CreateScope(r.name, r.rc.limit)
+ return r
+}
+
+// IsSpan will return true if this name was created by newResourceScopeSpan
+func IsSpan(name string) bool {
+ return strings.Contains(name, ".span-")
+}
+
+func addInt64WithOverflow(a int64, b int64) (c int64, ok bool) {
+ c = a + b
+ return c, (c > a) == (b > 0)
+}
+
+// mulInt64WithOverflow checks for overflow in multiplying two int64s. See
+// https://groups.google.com/g/golang-nuts/c/h5oSN5t3Au4/m/KaNQREhZh0QJ
+func mulInt64WithOverflow(a, b int64) (c int64, ok bool) {
+ const mostPositive = 1<<63 - 1
+ const mostNegative = -(mostPositive + 1)
+ c = a * b
+ if a == 0 || b == 0 || a == 1 || b == 1 {
+ return c, true
+ }
+ if a == mostNegative || b == mostNegative {
+ return c, false
+ }
+ return c, c/b == a
+}
+
+// Resources implementation
+func (rc *resources) checkMemory(rsvp int64, prio uint8) error {
+ if rsvp < 0 {
+ return fmt.Errorf("can't reserve negative memory. rsvp=%v", rsvp)
+ }
+
+ limit := rc.limit.GetMemoryLimit()
+ if limit == math.MaxInt64 {
+ // Special case where we've set max limits.
+ return nil
+ }
+
+ newmem, addOk := addInt64WithOverflow(rc.memory, rsvp)
+
+ threshold, mulOk := mulInt64WithOverflow(1+int64(prio), limit)
+ if !mulOk {
+ thresholdBig := big.NewInt(limit)
+ thresholdBig.Mul(thresholdBig, big.NewInt(1+int64(prio)))
+ thresholdBig.Rsh(thresholdBig, 8) // Divide 256
+ // necessarily a Int64 since we multiplied a int64 != MaxInt64 with
+ // a uint8+1 (max 255+1 = 256) and divided by 256
+ threshold = thresholdBig.Int64()
+ } else {
+ threshold = threshold / 256
+ }
+
+ if !addOk || newmem > threshold {
+ return &ErrMemoryLimitExceeded{
+ current: rc.memory,
+ attempted: rsvp,
+ limit: limit,
+ priority: prio,
+ err: network.ErrResourceLimitExceeded,
+ }
+ }
+ return nil
+}
+
+func (rc *resources) reserveMemory(size int64, prio uint8) error {
+ if err := rc.checkMemory(size, prio); err != nil {
+ return err
+ }
+
+ rc.memory += size
+ return nil
+}
+
+func (rc *resources) releaseMemory(size int64) {
+ rc.memory -= size
+
+ // sanity check for bugs upstream
+ if rc.memory < 0 {
+ log.Warn("BUG: too much memory released")
+ rc.memory = 0
+ }
+}
+
+func (rc *resources) addStream(dir network.Direction) error {
+ if dir == network.DirInbound {
+ return rc.addStreams(1, 0)
+ }
+ return rc.addStreams(0, 1)
+}
+
+func (rc *resources) addStreams(incount, outcount int) error {
+ if incount > 0 {
+ limit := rc.limit.GetStreamLimit(network.DirInbound)
+ if rc.nstreamsIn+incount > limit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nstreamsIn,
+ attempted: incount,
+ limit: limit,
+ err: fmt.Errorf("cannot reserve inbound stream: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+ }
+ if outcount > 0 {
+ limit := rc.limit.GetStreamLimit(network.DirOutbound)
+ if rc.nstreamsOut+outcount > limit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nstreamsOut,
+ attempted: outcount,
+ limit: limit,
+ err: fmt.Errorf("cannot reserve outbound stream: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+ }
+
+ if limit := rc.limit.GetStreamTotalLimit(); rc.nstreamsIn+incount+rc.nstreamsOut+outcount > limit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nstreamsIn + rc.nstreamsOut,
+ attempted: incount + outcount,
+ limit: limit,
+ err: fmt.Errorf("cannot reserve stream: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+
+ rc.nstreamsIn += incount
+ rc.nstreamsOut += outcount
+ return nil
+}
+
+func (rc *resources) removeStream(dir network.Direction) {
+ if dir == network.DirInbound {
+ rc.removeStreams(1, 0)
+ } else {
+ rc.removeStreams(0, 1)
+ }
+}
+
+func (rc *resources) removeStreams(incount, outcount int) {
+ rc.nstreamsIn -= incount
+ rc.nstreamsOut -= outcount
+
+ if rc.nstreamsIn < 0 {
+ log.Warn("BUG: too many inbound streams released")
+ rc.nstreamsIn = 0
+ }
+ if rc.nstreamsOut < 0 {
+ log.Warn("BUG: too many outbound streams released")
+ rc.nstreamsOut = 0
+ }
+}
+
+func (rc *resources) addConn(dir network.Direction, usefd bool) error {
+ var fd int
+ if usefd {
+ fd = 1
+ }
+
+ if dir == network.DirInbound {
+ return rc.addConns(1, 0, fd)
+ }
+
+ return rc.addConns(0, 1, fd)
+}
+
+func (rc *resources) addConns(incount, outcount, fdcount int) error {
+ if incount > 0 {
+ limit := rc.limit.GetConnLimit(network.DirInbound)
+ if rc.nconnsIn+incount > limit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nconnsIn,
+ attempted: incount,
+ limit: limit,
+ err: fmt.Errorf("cannot reserve inbound connection: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+ }
+ if outcount > 0 {
+ limit := rc.limit.GetConnLimit(network.DirOutbound)
+ if rc.nconnsOut+outcount > limit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nconnsOut,
+ attempted: outcount,
+ limit: limit,
+ err: fmt.Errorf("cannot reserve outbound connection: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+ }
+
+ if connLimit := rc.limit.GetConnTotalLimit(); rc.nconnsIn+incount+rc.nconnsOut+outcount > connLimit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nconnsIn + rc.nconnsOut,
+ attempted: incount + outcount,
+ limit: connLimit,
+ err: fmt.Errorf("cannot reserve connection: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+ if fdcount > 0 {
+ limit := rc.limit.GetFDLimit()
+ if rc.nfd+fdcount > limit {
+ return &ErrStreamOrConnLimitExceeded{
+ current: rc.nfd,
+ attempted: fdcount,
+ limit: limit,
+ err: fmt.Errorf("cannot reserve file descriptor: %w", network.ErrResourceLimitExceeded),
+ }
+ }
+ }
+
+ rc.nconnsIn += incount
+ rc.nconnsOut += outcount
+ rc.nfd += fdcount
+ return nil
+}
+
+func (rc *resources) removeConn(dir network.Direction, usefd bool) {
+ var fd int
+ if usefd {
+ fd = 1
+ }
+
+ if dir == network.DirInbound {
+ rc.removeConns(1, 0, fd)
+ } else {
+ rc.removeConns(0, 1, fd)
+ }
+}
+
+func (rc *resources) removeConns(incount, outcount, fdcount int) {
+ rc.nconnsIn -= incount
+ rc.nconnsOut -= outcount
+ rc.nfd -= fdcount
+
+ if rc.nconnsIn < 0 {
+ log.Warn("BUG: too many inbound connections released")
+ rc.nconnsIn = 0
+ }
+ if rc.nconnsOut < 0 {
+ log.Warn("BUG: too many outbound connections released")
+ rc.nconnsOut = 0
+ }
+ if rc.nfd < 0 {
+ log.Warn("BUG: too many file descriptors released")
+ rc.nfd = 0
+ }
+}
+
+func (rc *resources) stat() network.ScopeStat {
+ return network.ScopeStat{
+ Memory: rc.memory,
+ NumStreamsInbound: rc.nstreamsIn,
+ NumStreamsOutbound: rc.nstreamsOut,
+ NumConnsInbound: rc.nconnsIn,
+ NumConnsOutbound: rc.nconnsOut,
+ NumFD: rc.nfd,
+ }
+}
+
+// resourceScope implementation
+func (s *resourceScope) wrapError(err error) error {
+ return fmt.Errorf("%s: %w", s.name, err)
+}
+
+func (s *resourceScope) ReserveMemory(size int, prio uint8) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.reserveMemory(int64(size), prio); err != nil {
+ log.Debug("blocked memory reservation", logValuesMemoryLimit(s.name, "", s.rc.stat(), err)...)
+ s.trace.BlockReserveMemory(s.name, prio, int64(size), s.rc.memory)
+ s.metrics.BlockMemory(size)
+ return s.wrapError(err)
+ }
+
+ if err := s.reserveMemoryForEdges(size, prio); err != nil {
+ s.rc.releaseMemory(int64(size))
+ s.metrics.BlockMemory(size)
+ return s.wrapError(err)
+ }
+
+ s.trace.ReserveMemory(s.name, prio, int64(size), s.rc.memory)
+ s.metrics.AllowMemory(size)
+ return nil
+}
+
+func (s *resourceScope) reserveMemoryForEdges(size int, prio uint8) error {
+ if s.owner != nil {
+ return s.owner.ReserveMemory(size, prio)
+ }
+
+ var reserved int
+ var err error
+ for _, e := range s.edges {
+ var stat network.ScopeStat
+ stat, err = e.ReserveMemoryForChild(int64(size), prio)
+ if err != nil {
+ log.Debug("blocked memory reservation from constraining edge", logValuesMemoryLimit(s.name, e.name, stat, err)...)
+ break
+ }
+
+ reserved++
+ }
+
+ if err != nil {
+ // we failed because of a constraint; undo memory reservations
+ for _, e := range s.edges[:reserved] {
+ e.ReleaseMemoryForChild(int64(size))
+ }
+ }
+
+ return err
+}
+
+func (s *resourceScope) releaseMemoryForEdges(size int) {
+ if s.owner != nil {
+ s.owner.ReleaseMemory(size)
+ return
+ }
+
+ for _, e := range s.edges {
+ e.ReleaseMemoryForChild(int64(size))
+ }
+}
+
+func (s *resourceScope) ReserveMemoryForChild(size int64, prio uint8) (network.ScopeStat, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.rc.stat(), s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.reserveMemory(size, prio); err != nil {
+ s.trace.BlockReserveMemory(s.name, prio, size, s.rc.memory)
+ return s.rc.stat(), s.wrapError(err)
+ }
+
+ s.trace.ReserveMemory(s.name, prio, size, s.rc.memory)
+ return network.ScopeStat{}, nil
+}
+
+func (s *resourceScope) ReleaseMemory(size int) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.releaseMemory(int64(size))
+ s.releaseMemoryForEdges(size)
+ s.trace.ReleaseMemory(s.name, int64(size), s.rc.memory)
+}
+
+func (s *resourceScope) ReleaseMemoryForChild(size int64) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.releaseMemory(size)
+ s.trace.ReleaseMemory(s.name, size, s.rc.memory)
+}
+
+func (s *resourceScope) AddStream(dir network.Direction) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.addStream(dir); err != nil {
+ log.Debug("blocked stream", logValuesStreamLimit(s.name, "", dir, s.rc.stat(), err)...)
+ s.trace.BlockAddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ return s.wrapError(err)
+ }
+
+ if err := s.addStreamForEdges(dir); err != nil {
+ s.rc.removeStream(dir)
+ return s.wrapError(err)
+ }
+
+ s.trace.AddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ return nil
+}
+
+func (s *resourceScope) addStreamForEdges(dir network.Direction) error {
+ if s.owner != nil {
+ return s.owner.AddStream(dir)
+ }
+
+ var err error
+ var reserved int
+ for _, e := range s.edges {
+ var stat network.ScopeStat
+ stat, err = e.AddStreamForChild(dir)
+ if err != nil {
+ log.Debug("blocked stream from constraining edge", logValuesStreamLimit(s.name, e.name, dir, stat, err)...)
+ break
+ }
+ reserved++
+ }
+
+ if err != nil {
+ for _, e := range s.edges[:reserved] {
+ e.RemoveStreamForChild(dir)
+ }
+ }
+
+ return err
+}
+
+func (s *resourceScope) AddStreamForChild(dir network.Direction) (network.ScopeStat, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.rc.stat(), s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.addStream(dir); err != nil {
+ s.trace.BlockAddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ return s.rc.stat(), s.wrapError(err)
+ }
+
+ s.trace.AddStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ return network.ScopeStat{}, nil
+}
+
+func (s *resourceScope) RemoveStream(dir network.Direction) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.removeStream(dir)
+ s.removeStreamForEdges(dir)
+ s.trace.RemoveStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
+}
+
+func (s *resourceScope) removeStreamForEdges(dir network.Direction) {
+ if s.owner != nil {
+ s.owner.RemoveStream(dir)
+ return
+ }
+
+ for _, e := range s.edges {
+ e.RemoveStreamForChild(dir)
+ }
+}
+
+func (s *resourceScope) RemoveStreamForChild(dir network.Direction) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.removeStream(dir)
+ s.trace.RemoveStream(s.name, dir, s.rc.nstreamsIn, s.rc.nstreamsOut)
+}
+
+func (s *resourceScope) AddConn(dir network.Direction, usefd bool) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.addConn(dir, usefd); err != nil {
+ log.Debug("blocked connection", logValuesConnLimit(s.name, "", dir, usefd, s.rc.stat(), err)...)
+ s.trace.BlockAddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+ return s.wrapError(err)
+ }
+
+ if err := s.addConnForEdges(dir, usefd); err != nil {
+ s.rc.removeConn(dir, usefd)
+ return s.wrapError(err)
+ }
+
+ s.trace.AddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+ return nil
+}
+
+func (s *resourceScope) addConnForEdges(dir network.Direction, usefd bool) error {
+ if s.owner != nil {
+ return s.owner.AddConn(dir, usefd)
+ }
+
+ var err error
+ var reserved int
+ for _, e := range s.edges {
+ var stat network.ScopeStat
+ stat, err = e.AddConnForChild(dir, usefd)
+ if err != nil {
+ log.Debug("blocked connection from constraining edge", logValuesConnLimit(s.name, e.name, dir, usefd, stat, err)...)
+ break
+ }
+ reserved++
+ }
+
+ if err != nil {
+ for _, e := range s.edges[:reserved] {
+ e.RemoveConnForChild(dir, usefd)
+ }
+ }
+
+ return err
+}
+
+func (s *resourceScope) AddConnForChild(dir network.Direction, usefd bool) (network.ScopeStat, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.rc.stat(), s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.addConn(dir, usefd); err != nil {
+ s.trace.BlockAddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+ return s.rc.stat(), s.wrapError(err)
+ }
+
+ s.trace.AddConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+ return network.ScopeStat{}, nil
+}
+
+func (s *resourceScope) RemoveConn(dir network.Direction, usefd bool) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.removeConn(dir, usefd)
+ s.removeConnForEdges(dir, usefd)
+ s.trace.RemoveConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+}
+
+func (s *resourceScope) removeConnForEdges(dir network.Direction, usefd bool) {
+ if s.owner != nil {
+ s.owner.RemoveConn(dir, usefd)
+ }
+
+ for _, e := range s.edges {
+ e.RemoveConnForChild(dir, usefd)
+ }
+}
+
+func (s *resourceScope) RemoveConnForChild(dir network.Direction, usefd bool) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.removeConn(dir, usefd)
+ s.trace.RemoveConn(s.name, dir, usefd, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+}
+
+func (s *resourceScope) ReserveForChild(st network.ScopeStat) error {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ if err := s.rc.reserveMemory(st.Memory, network.ReservationPriorityAlways); err != nil {
+ s.trace.BlockReserveMemory(s.name, 255, st.Memory, s.rc.memory)
+ return s.wrapError(err)
+ }
+
+ if err := s.rc.addStreams(st.NumStreamsInbound, st.NumStreamsOutbound); err != nil {
+ s.trace.BlockAddStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ s.rc.releaseMemory(st.Memory)
+ return s.wrapError(err)
+ }
+
+ if err := s.rc.addConns(st.NumConnsInbound, st.NumConnsOutbound, st.NumFD); err != nil {
+ s.trace.BlockAddConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+
+ s.rc.releaseMemory(st.Memory)
+ s.rc.removeStreams(st.NumStreamsInbound, st.NumStreamsOutbound)
+ return s.wrapError(err)
+ }
+
+ s.trace.ReserveMemory(s.name, 255, st.Memory, s.rc.memory)
+ s.trace.AddStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ s.trace.AddConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+
+ return nil
+}
+
+func (s *resourceScope) ReleaseForChild(st network.ScopeStat) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.releaseMemory(st.Memory)
+ s.rc.removeStreams(st.NumStreamsInbound, st.NumStreamsOutbound)
+ s.rc.removeConns(st.NumConnsInbound, st.NumConnsOutbound, st.NumFD)
+
+ s.trace.ReleaseMemory(s.name, st.Memory, s.rc.memory)
+ s.trace.RemoveStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ s.trace.RemoveConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+}
+
+func (s *resourceScope) ReleaseResources(st network.ScopeStat) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return
+ }
+
+ s.rc.releaseMemory(st.Memory)
+ s.rc.removeStreams(st.NumStreamsInbound, st.NumStreamsOutbound)
+ s.rc.removeConns(st.NumConnsInbound, st.NumConnsOutbound, st.NumFD)
+
+ if s.owner != nil {
+ s.owner.ReleaseResources(st)
+ } else {
+ for _, e := range s.edges {
+ e.ReleaseForChild(st)
+ }
+ }
+
+ s.trace.ReleaseMemory(s.name, st.Memory, s.rc.memory)
+ s.trace.RemoveStreams(s.name, st.NumStreamsInbound, st.NumStreamsOutbound, s.rc.nstreamsIn, s.rc.nstreamsOut)
+ s.trace.RemoveConns(s.name, st.NumConnsInbound, st.NumConnsOutbound, st.NumFD, s.rc.nconnsIn, s.rc.nconnsOut, s.rc.nfd)
+}
+
+func (s *resourceScope) nextSpanID() int {
+ s.spanID++
+ return s.spanID
+}
+
+func (s *resourceScope) BeginSpan() (network.ResourceScopeSpan, error) {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return nil, s.wrapError(network.ErrResourceScopeClosed)
+ }
+
+ s.refCnt++
+ return newResourceScopeSpan(s, s.nextSpanID()), nil
+}
+
+func (s *resourceScope) Done() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.doneUnlocked()
+}
+
+func (s *resourceScope) doneUnlocked() {
+ if s.done {
+ return
+ }
+ stat := s.rc.stat()
+ if s.owner != nil {
+ s.owner.ReleaseResources(stat)
+ s.owner.DecRef()
+ } else {
+ for _, e := range s.edges {
+ e.ReleaseForChild(stat)
+ e.DecRef()
+ }
+ }
+
+ s.rc.nstreamsIn = 0
+ s.rc.nstreamsOut = 0
+ s.rc.nconnsIn = 0
+ s.rc.nconnsOut = 0
+ s.rc.nfd = 0
+ s.rc.memory = 0
+
+ s.done = true
+
+ s.trace.DestroyScope(s.name)
+}
+
+func (s *resourceScope) Stat() network.ScopeStat {
+ s.Lock()
+ defer s.Unlock()
+
+ return s.rc.stat()
+}
+
+func (s *resourceScope) IncRef() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.refCnt++
+}
+
+func (s *resourceScope) DecRef() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.refCnt--
+}
+
+func (s *resourceScope) IsUnused() bool {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.done {
+ return true
+ }
+
+ if s.refCnt > 0 {
+ return false
+ }
+
+ st := s.rc.stat()
+ return st.NumStreamsInbound == 0 &&
+ st.NumStreamsOutbound == 0 &&
+ st.NumConnsInbound == 0 &&
+ st.NumConnsOutbound == 0 &&
+ st.NumFD == 0
+}
diff --git a/p2p/host/resource-manager/scope_test.go b/p2p/host/resource-manager/scope_test.go
new file mode 100644
index 0000000000..7bf7ff721b
--- /dev/null
+++ b/p2p/host/resource-manager/scope_test.go
@@ -0,0 +1,1274 @@
+package rcmgr
+
+import (
+ "math"
+ "testing"
+ "testing/quick"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/stretchr/testify/require"
+)
+
+func checkResources(t *testing.T, rc *resources, st network.ScopeStat) {
+ t.Helper()
+
+ if rc.nconnsIn != st.NumConnsInbound {
+ t.Fatalf("expected %d inbound conns, got %d", st.NumConnsInbound, rc.nconnsIn)
+ }
+ if rc.nconnsOut != st.NumConnsOutbound {
+ t.Fatalf("expected %d outbound conns, got %d", st.NumConnsOutbound, rc.nconnsOut)
+ }
+ if rc.nstreamsIn != st.NumStreamsInbound {
+ t.Fatalf("expected %d inbound streams, got %d", st.NumStreamsInbound, rc.nstreamsIn)
+ }
+ if rc.nstreamsOut != st.NumStreamsOutbound {
+ t.Fatalf("expected %d outbound streams, got %d", st.NumStreamsOutbound, rc.nstreamsOut)
+ }
+ if rc.nfd != st.NumFD {
+ t.Fatalf("expected %d file descriptors, got %d", st.NumFD, rc.nfd)
+ }
+ if rc.memory != st.Memory {
+ t.Fatalf("expected %d reserved bytes of memory, got %d", st.Memory, rc.memory)
+ }
+}
+
+func TestCheckMemory(t *testing.T) {
+ t.Run("overflows", func(t *testing.T) {
+ rc := resources{limit: &BaseLimit{
+ Memory: math.MaxInt64 - 1,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ }}
+ rc.memory = math.MaxInt64 - 1
+ require.Error(t, rc.checkMemory(2, network.ReservationPriorityAlways))
+
+ rc.memory = 1024
+ require.NoError(t, rc.checkMemory(1, network.ReservationPriorityAlways))
+ })
+
+ t.Run("negative mem", func(t *testing.T) {
+ rc := resources{limit: &BaseLimit{
+ Memory: math.MaxInt64,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ }}
+ rc.memory = math.MaxInt64
+
+ require.Error(t, rc.checkMemory(-1, network.ReservationPriorityAlways))
+ })
+
+ f := func(limit uint64, res uint64, currentMem uint64, priShift uint8) bool {
+ limit = (limit % math.MaxInt64) + 1
+ if limit < 1024 {
+ // We set the min to 1KiB
+ limit = 1024
+ }
+ currentMem = (currentMem % limit) // We can't have reserved more than our limit
+ res = (res >> 14) // We won't reasonably ever have a reservation > 2^50
+ rc := resources{limit: &BaseLimit{
+ Memory: int64(limit),
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ }}
+ rc.memory = int64(currentMem)
+
+ priShift = (priShift % 9)
+ // Check different priorities at 2^0, 2^1,...2^8. This lets our math be correct in the check below (and avoid overflows).
+ pri := uint8((1 << priShift) - 1)
+
+ err := rc.checkMemory(int64(res), pri)
+ if limit == math.MaxInt64 && err == nil {
+ // Special case logic
+ return true
+ }
+
+ return (err != nil) == (res+uint64(rc.memory) > (limit >> uint64(8-priShift)))
+ }
+
+ require.NoError(t, quick.Check(f, nil))
+}
+
+func TestResources(t *testing.T) {
+ rc := resources{limit: &BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ }}
+
+ checkResources(t, &rc, network.ScopeStat{})
+
+ // test checkMemory
+ if err := rc.checkMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(2048, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(3072, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(4096, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(8192, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+
+ if err := rc.checkMemory(1024, network.ReservationPriorityLow); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(2048, network.ReservationPriorityLow); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+
+ if err := rc.checkMemory(2048, network.ReservationPriorityMedium); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(3072, network.ReservationPriorityMedium); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+
+ if err := rc.checkMemory(3072, network.ReservationPriorityHigh); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := rc.checkMemory(3584, network.ReservationPriorityHigh); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+
+ // test reserveMemory
+ if err := rc.reserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 1024})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 2048})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3072})
+
+ if err := rc.reserveMemory(512, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3584})
+
+ if err := rc.reserveMemory(4096, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected memory reservation to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3584})
+
+ rc.releaseMemory(2560)
+ checkResources(t, &rc, network.ScopeStat{Memory: 1024})
+
+ if err := rc.reserveMemory(2048, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3072})
+
+ rc.releaseMemory(3072)
+ checkResources(t, &rc, network.ScopeStat{})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityLow); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 1024})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityLow); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 1024})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityMedium); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 2048})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityMedium); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 2048})
+
+ if err := rc.reserveMemory(1024, network.ReservationPriorityHigh); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3072})
+
+ if err := rc.reserveMemory(512, network.ReservationPriorityHigh); err == nil {
+ t.Fatal("expected memory check to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3072})
+
+ if err := rc.reserveMemory(512, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{Memory: 3584})
+
+ rc.releaseMemory(3584)
+ checkResources(t, &rc, network.ScopeStat{})
+
+ // test addStream
+ if err := rc.addStream(network.DirInbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ if err := rc.addStream(network.DirInbound); err == nil {
+ t.Fatal("expected addStream to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ if err := rc.addStream(network.DirOutbound); err == nil {
+ t.Fatal("expected addStream to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ rc.removeStream(network.DirInbound)
+ checkResources(t, &rc, network.ScopeStat{})
+
+ if err := rc.addStream(network.DirOutbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ if err := rc.addStream(network.DirOutbound); err == nil {
+ t.Fatal("expected addStream to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ if err := rc.addStream(network.DirInbound); err == nil {
+ t.Fatal("expected addStream to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ rc.removeStream(network.DirOutbound)
+ checkResources(t, &rc, network.ScopeStat{})
+
+ // test addConn
+ if err := rc.addConn(network.DirInbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{NumConnsInbound: 1})
+
+ if err := rc.addConn(network.DirInbound, false); err == nil {
+ t.Fatal("expected addConn to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumConnsInbound: 1})
+
+ rc.removeConn(network.DirInbound, false)
+ checkResources(t, &rc, network.ScopeStat{})
+
+ if err := rc.addConn(network.DirOutbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{NumConnsOutbound: 1})
+
+ if err := rc.addConn(network.DirOutbound, false); err == nil {
+ t.Fatal("expected addConn to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumConnsOutbound: 1})
+
+ rc.removeConn(network.DirOutbound, false)
+ checkResources(t, &rc, network.ScopeStat{})
+
+ if err := rc.addConn(network.DirInbound, true); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+
+ if err := rc.addConn(network.DirOutbound, true); err == nil {
+ t.Fatal("expected addConn to fail")
+ }
+ checkResources(t, &rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+
+ rc.removeConn(network.DirInbound, true)
+ checkResources(t, &rc, network.ScopeStat{})
+}
+
+func TestResourceScopeSimple(t *testing.T) {
+ s := newResourceScope(
+ &BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ },
+ nil, "test", nil, nil,
+ )
+
+ s.IncRef()
+ if s.refCnt != 1 {
+ t.Fatal("expected refcnt of 1")
+ }
+ s.DecRef()
+ if s.refCnt != 0 {
+ t.Fatal("expected refcnt of 0")
+ }
+
+ testResourceScopeBasic(t, s)
+}
+
+func testResourceScopeBasic(t *testing.T, s *resourceScope) {
+ if err := s.ReserveMemory(2048, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 2048})
+
+ if err := s.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 3072})
+
+ if err := s.ReserveMemory(512, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 3584})
+
+ if err := s.ReserveMemory(512, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 4096})
+
+ if err := s.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 4096})
+
+ s.ReleaseMemory(4096)
+ checkResources(t, &s.rc, network.ScopeStat{})
+
+ if err := s.AddStream(network.DirInbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ if err := s.AddStream(network.DirInbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ if err := s.AddStream(network.DirOutbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ s.RemoveStream(network.DirInbound)
+ checkResources(t, &s.rc, network.ScopeStat{})
+
+ if err := s.AddStream(network.DirOutbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ if err := s.AddStream(network.DirOutbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ if err := s.AddStream(network.DirInbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ s.RemoveStream(network.DirOutbound)
+ checkResources(t, &s.rc, network.ScopeStat{})
+
+ if err := s.AddConn(network.DirInbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1})
+
+ if err := s.AddConn(network.DirInbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1})
+
+ s.RemoveConn(network.DirInbound, false)
+ checkResources(t, &s.rc, network.ScopeStat{})
+
+ if err := s.AddConn(network.DirOutbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsOutbound: 1})
+
+ if err := s.AddConn(network.DirOutbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsOutbound: 1})
+
+ s.RemoveConn(network.DirOutbound, false)
+ checkResources(t, &s.rc, network.ScopeStat{})
+
+ if err := s.AddConn(network.DirInbound, true); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+
+ if err := s.AddConn(network.DirOutbound, true); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ checkResources(t, &s.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+
+ s.RemoveConn(network.DirInbound, true)
+ checkResources(t, &s.rc, network.ScopeStat{})
+}
+
+func TestResourceScopeTxnBasic(t *testing.T) {
+ s := newResourceScope(
+ &BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ },
+ nil, "test", nil, nil,
+ )
+
+ txn, err := s.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testResourceScopeBasic(t, txn.(*resourceScope))
+ checkResources(t, &s.rc, network.ScopeStat{})
+
+ // check constraint propagation
+ if err := txn.ReserveMemory(4096, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &txn.(*resourceScope).rc, network.ScopeStat{Memory: 4096})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 4096})
+ txn.Done()
+ checkResources(t, &s.rc, network.ScopeStat{})
+ txn.Done() // idempotent
+ checkResources(t, &s.rc, network.ScopeStat{})
+}
+
+func TestResourceScopeTxnZombie(t *testing.T) {
+ s := newResourceScope(
+ &BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ },
+ nil, "test", nil, nil,
+ )
+
+ txn1, err := s.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn2, err := txn1.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := txn2.ReserveMemory(4096, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &txn2.(*resourceScope).rc, network.ScopeStat{Memory: 4096})
+ checkResources(t, &txn1.(*resourceScope).rc, network.ScopeStat{Memory: 4096})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 4096})
+
+ txn1.Done()
+ checkResources(t, &s.rc, network.ScopeStat{})
+ if err := txn2.ReserveMemory(4096, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+
+ txn2.Done()
+ checkResources(t, &s.rc, network.ScopeStat{})
+}
+
+func TestResourceScopeTxnTree(t *testing.T) {
+ s := newResourceScope(
+ &BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 1,
+ StreamsOutbound: 1,
+ Streams: 1,
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ FD: 1,
+ },
+ nil, "test", nil, nil,
+ )
+
+ txn1, err := s.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn2, err := txn1.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn3, err := txn1.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn4, err := txn2.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn5, err := txn2.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := txn3.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &txn3.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn1.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 1024})
+
+ if err := txn4.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &txn4.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn3.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn2.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn1.(*resourceScope).rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 2048})
+
+ if err := txn5.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &txn5.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn4.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn3.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn2.(*resourceScope).rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &txn1.(*resourceScope).rc, network.ScopeStat{Memory: 3072})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 3072})
+
+ if err := txn1.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &txn5.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn4.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn3.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn2.(*resourceScope).rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &txn1.(*resourceScope).rc, network.ScopeStat{Memory: 4096})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 4096})
+
+ if err := txn5.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ if err := txn4.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ if err := txn3.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ if err := txn2.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ checkResources(t, &txn5.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn4.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn3.(*resourceScope).rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &txn2.(*resourceScope).rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &txn1.(*resourceScope).rc, network.ScopeStat{Memory: 4096})
+ checkResources(t, &s.rc, network.ScopeStat{Memory: 4096})
+
+ txn1.Done()
+ checkResources(t, &s.rc, network.ScopeStat{})
+}
+
+func TestResourceScopeDAG(t *testing.T) {
+ // A small DAG of scopes
+ // s1
+ // +---> s2
+ // +------------> s5
+ // +----
+ // +---> s3 +. \
+ // | \ -----+-> s4 (a diamond!)
+ // | ------/
+ // \
+ // ------> s6
+ s1 := newResourceScope(
+ &BaseLimit{
+ Memory: 4096,
+ StreamsInbound: 4,
+ StreamsOutbound: 4,
+ Streams: 4,
+ ConnsInbound: 4,
+ ConnsOutbound: 4,
+ Conns: 4,
+ FD: 4,
+ },
+ nil, "test", nil, nil,
+ )
+ s2 := newResourceScope(
+ &BaseLimit{
+ Memory: 2048,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 2,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 2,
+ FD: 2,
+ },
+ []*resourceScope{s1}, "test", nil, nil,
+ )
+ s3 := newResourceScope(
+ &BaseLimit{
+ Memory: 2048,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 2,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 2,
+ FD: 2,
+ },
+ []*resourceScope{s1}, "test", nil, nil,
+ )
+ s4 := newResourceScope(
+ &BaseLimit{
+ Memory: 2048,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 2,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 2,
+ FD: 2,
+ },
+ []*resourceScope{s2, s3, s1}, "test", nil, nil,
+ )
+ s5 := newResourceScope(
+ &BaseLimit{
+ Memory: 2048,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 2,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 2,
+ FD: 2,
+ },
+ []*resourceScope{s2, s1}, "test", nil, nil,
+ )
+ s6 := newResourceScope(
+ &BaseLimit{
+ Memory: 2048,
+ StreamsInbound: 2,
+ StreamsOutbound: 2,
+ Streams: 2,
+ ConnsInbound: 2,
+ ConnsOutbound: 2,
+ Conns: 2,
+ FD: 2,
+ },
+ []*resourceScope{s3, s1}, "test", nil, nil,
+ )
+
+ if err := s4.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 1024})
+
+ if err := s5.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 2048})
+
+ if err := s6.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 3072})
+
+ if err := s4.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expcted ReserveMemory to fail")
+ }
+ if err := s5.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expcted ReserveMemory to fail")
+ }
+ if err := s6.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expcted ReserveMemory to fail")
+ }
+
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 3072})
+
+ s4.ReleaseMemory(1024)
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 2048})
+
+ s5.ReleaseMemory(1024)
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 1024})
+
+ s6.ReleaseMemory(1024)
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+
+ if err := s4.AddStream(network.DirInbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ if err := s5.AddStream(network.DirInbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsInbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsInbound: 2})
+
+ if err := s6.AddStream(network.DirInbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsInbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsInbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsInbound: 3})
+
+ if err := s4.AddStream(network.DirInbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ if err := s5.AddStream(network.DirInbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ if err := s6.AddStream(network.DirInbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsInbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsInbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsInbound: 3})
+
+ s4.RemoveStream(network.DirInbound)
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsInbound: 2})
+
+ s5.RemoveStream(network.DirInbound)
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsInbound: 1})
+
+ s6.RemoveStream(network.DirInbound)
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+
+ if err := s4.AddStream(network.DirOutbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ if err := s5.AddStream(network.DirOutbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsOutbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsOutbound: 2})
+
+ if err := s6.AddStream(network.DirOutbound); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsOutbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsOutbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsOutbound: 3})
+
+ if err := s4.AddStream(network.DirOutbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ if err := s5.AddStream(network.DirOutbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ if err := s6.AddStream(network.DirOutbound); err == nil {
+ t.Fatal("expected AddStream to fail")
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsOutbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsOutbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsOutbound: 3})
+
+ s4.RemoveStream(network.DirOutbound)
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsOutbound: 2})
+
+ s5.RemoveStream(network.DirOutbound)
+ checkResources(t, &s6.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumStreamsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{NumStreamsOutbound: 1})
+
+ s6.RemoveStream(network.DirOutbound)
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+
+ if err := s4.AddConn(network.DirInbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 1})
+
+ if err := s5.AddConn(network.DirInbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 2})
+
+ if err := s6.AddConn(network.DirInbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 3})
+
+ if err := s4.AddConn(network.DirInbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ if err := s5.AddConn(network.DirInbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ if err := s6.AddConn(network.DirInbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 3})
+
+ s4.RemoveConn(network.DirInbound, false)
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 2})
+
+ s5.RemoveConn(network.DirInbound, false)
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 1})
+
+ s6.RemoveConn(network.DirInbound, false)
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+
+ if err := s4.AddConn(network.DirOutbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsOutbound: 1})
+
+ if err := s5.AddConn(network.DirOutbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsOutbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsOutbound: 2})
+
+ if err := s6.AddConn(network.DirOutbound, false); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsOutbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsOutbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsOutbound: 3})
+
+ if err := s4.AddConn(network.DirOutbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ if err := s5.AddConn(network.DirOutbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ if err := s6.AddConn(network.DirOutbound, false); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsOutbound: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsOutbound: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsOutbound: 3})
+
+ s4.RemoveConn(network.DirOutbound, false)
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsOutbound: 2})
+
+ s5.RemoveConn(network.DirOutbound, false)
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsOutbound: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsOutbound: 1})
+
+ s6.RemoveConn(network.DirOutbound, false)
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+
+ if err := s4.AddConn(network.DirInbound, true); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+
+ if err := s5.AddConn(network.DirInbound, true); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+
+ if err := s6.AddConn(network.DirInbound, true); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 3, NumFD: 3})
+
+ if err := s4.AddConn(network.DirOutbound, true); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ if err := s5.AddConn(network.DirOutbound, true); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ if err := s6.AddConn(network.DirOutbound, true); err == nil {
+ t.Fatal("expected AddConn to fail")
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 3, NumFD: 3})
+
+ s4.RemoveConn(network.DirInbound, true)
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 2, NumFD: 2})
+
+ s5.RemoveConn(network.DirInbound, true)
+ checkResources(t, &s6.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{NumConnsInbound: 1, NumFD: 1})
+
+ s6.RemoveConn(network.DirInbound, true)
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+}
+
+func TestResourceScopeDAGTxn(t *testing.T) {
+ // A small DAG of scopes
+ // s1
+ // +---> s2
+ // +------------> s5
+ // +----
+ // +---> s3 +. \
+ // | \ -----+-> s4 (a diamond!)
+ // | ------/
+ // \
+ // ------> s6
+ s1 := newResourceScope(
+ &BaseLimit{Memory: 8192},
+ nil, "test", nil, nil,
+ )
+ s2 := newResourceScope(
+ &BaseLimit{Memory: 4096 + 2048},
+ []*resourceScope{s1}, "test", nil, nil,
+ )
+ s3 := newResourceScope(
+ &BaseLimit{Memory: 4096 + 2048},
+ []*resourceScope{s1}, "test", nil, nil,
+ )
+ s4 := newResourceScope(
+ &BaseLimit{Memory: 4096 + 1024},
+ []*resourceScope{s2, s3, s1}, "test", nil, nil,
+ )
+ s5 := newResourceScope(
+ &BaseLimit{Memory: 4096 + 1024},
+ []*resourceScope{s2, s1}, "test", nil, nil,
+ )
+ s6 := newResourceScope(
+ &BaseLimit{Memory: 4096 + 1024},
+ []*resourceScope{s3, s1}, "test", nil, nil,
+ )
+
+ txn4, err := s4.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn5, err := s5.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ txn6, err := s6.BeginSpan()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := txn4.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 1024})
+
+ if err := txn5.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 2048})
+
+ if err := txn6.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 3072})
+
+ if err := txn4.ReserveMemory(4096, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024 + 4096})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 2048 + 4096})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048 + 4096})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 3072 + 4096})
+
+ if err := txn4.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ if err := txn5.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ if err := txn6.ReserveMemory(1024, network.ReservationPriorityAlways); err == nil {
+ t.Fatal("expected ReserveMemory to fail")
+ }
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{Memory: 1024 + 4096})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 2048 + 4096})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048 + 4096})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 3072 + 4096})
+
+ txn4.Done()
+
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 1024})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 2048})
+
+ if err := txn5.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+ if err := txn6.ReserveMemory(1024, network.ReservationPriorityAlways); err != nil {
+ t.Fatal(err)
+ }
+
+ checkResources(t, &s6.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s5.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s2.rc, network.ScopeStat{Memory: 2048})
+ checkResources(t, &s1.rc, network.ScopeStat{Memory: 4096})
+
+ txn5.Done()
+ txn6.Done()
+
+ checkResources(t, &s6.rc, network.ScopeStat{})
+ checkResources(t, &s5.rc, network.ScopeStat{})
+ checkResources(t, &s4.rc, network.ScopeStat{})
+ checkResources(t, &s3.rc, network.ScopeStat{})
+ checkResources(t, &s2.rc, network.ScopeStat{})
+ checkResources(t, &s1.rc, network.ScopeStat{})
+}
diff --git a/p2p/host/resource-manager/stats.go b/p2p/host/resource-manager/stats.go
new file mode 100644
index 0000000000..fd0772948a
--- /dev/null
+++ b/p2p/host/resource-manager/stats.go
@@ -0,0 +1,390 @@
+package rcmgr
+
+import (
+ "strings"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_rcmgr"
+
+var (
+
+ // Conns
+ conns = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "connections",
+ Help: "Number of Connections",
+ }, []string{"dir", "scope"})
+
+ connsInboundSystem = conns.With(prometheus.Labels{"dir": "inbound", "scope": "system"})
+ connsInboundTransient = conns.With(prometheus.Labels{"dir": "inbound", "scope": "transient"})
+ connsOutboundSystem = conns.With(prometheus.Labels{"dir": "outbound", "scope": "system"})
+ connsOutboundTransient = conns.With(prometheus.Labels{"dir": "outbound", "scope": "transient"})
+
+ oneTenThenExpDistributionBuckets = []float64{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16, 32, 64, 128, 256,
+ }
+
+ // PeerConns
+ peerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "peer_connections",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of connections this peer has",
+ }, []string{"dir"})
+ peerConnsInbound = peerConns.With(prometheus.Labels{"dir": "inbound"})
+ peerConnsOutbound = peerConns.With(prometheus.Labels{"dir": "outbound"})
+
+ // Lets us build a histogram of our current state. See https://github.com/libp2p/go-libp2p-resource-manager/pull/54#discussion_r911244757 for more information.
+ previousPeerConns = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_peer_connections",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of connections this peer previously had. This is used to get the current connection number per peer histogram by subtracting this from the peer_connections histogram",
+ }, []string{"dir"})
+ previousPeerConnsInbound = previousPeerConns.With(prometheus.Labels{"dir": "inbound"})
+ previousPeerConnsOutbound = previousPeerConns.With(prometheus.Labels{"dir": "outbound"})
+
+ // Streams
+ streams = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "streams",
+ Help: "Number of Streams",
+ }, []string{"dir", "scope", "protocol"})
+
+ peerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "peer_streams",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of streams this peer has",
+ }, []string{"dir"})
+ peerStreamsInbound = peerStreams.With(prometheus.Labels{"dir": "inbound"})
+ peerStreamsOutbound = peerStreams.With(prometheus.Labels{"dir": "outbound"})
+
+ previousPeerStreams = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_peer_streams",
+ Buckets: oneTenThenExpDistributionBuckets,
+ Help: "Number of streams this peer has",
+ }, []string{"dir"})
+ previousPeerStreamsInbound = previousPeerStreams.With(prometheus.Labels{"dir": "inbound"})
+ previousPeerStreamsOutbound = previousPeerStreams.With(prometheus.Labels{"dir": "outbound"})
+
+ // Memory
+ memoryTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "memory",
+ Help: "Amount of memory reserved as reported to the Resource Manager",
+ }, []string{"scope", "protocol"})
+
+ // PeerMemory
+ peerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "peer_memory",
+ Buckets: memDistribution,
+ Help: "How many peers have reserved this bucket of memory, as reported to the Resource Manager",
+ })
+ previousPeerMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_peer_memory",
+ Buckets: memDistribution,
+ Help: "How many peers have previously reserved this bucket of memory, as reported to the Resource Manager",
+ })
+
+ // ConnMemory
+ connMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "conn_memory",
+ Buckets: memDistribution,
+ Help: "How many conns have reserved this bucket of memory, as reported to the Resource Manager",
+ })
+ previousConnMemory = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "previous_conn_memory",
+ Buckets: memDistribution,
+ Help: "How many conns have previously reserved this bucket of memory, as reported to the Resource Manager",
+ })
+
+ // FDs
+ fds = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "fds",
+ Help: "Number of file descriptors reserved as reported to the Resource Manager",
+ }, []string{"scope"})
+
+ fdsSystem = fds.With(prometheus.Labels{"scope": "system"})
+ fdsTransient = fds.With(prometheus.Labels{"scope": "transient"})
+
+ // Blocked resources
+ blockedResources = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "blocked_resources",
+ Help: "Number of blocked resources",
+ }, []string{"dir", "scope", "resource"})
+)
+
+var (
+ memDistribution = []float64{
+ 1 << 10, // 1KB
+ 4 << 10, // 4KB
+ 32 << 10, // 32KB
+ 1 << 20, // 1MB
+ 32 << 20, // 32MB
+ 256 << 20, // 256MB
+ 512 << 20, // 512MB
+ 1 << 30, // 1GB
+ 2 << 30, // 2GB
+ 4 << 30, // 4GB
+ }
+)
+
+func MustRegisterWith(reg prometheus.Registerer) {
+ metricshelper.RegisterCollectors(reg,
+ conns,
+ peerConns,
+ previousPeerConns,
+ streams,
+ peerStreams,
+
+ previousPeerStreams,
+
+ memoryTotal,
+ peerMemory,
+ previousPeerMemory,
+ connMemory,
+ previousConnMemory,
+ fds,
+ blockedResources,
+ )
+}
+
+func WithMetricsDisabled() Option {
+ return func(r *resourceManager) error {
+ r.disableMetrics = true
+ return nil
+ }
+}
+
+// StatsTraceReporter reports stats on the resource manager using its traces.
+type StatsTraceReporter struct{}
+
+func NewStatsTraceReporter() (StatsTraceReporter, error) {
+ // TODO tell prometheus the system limits
+ return StatsTraceReporter{}, nil
+}
+
+func (r StatsTraceReporter) ConsumeEvent(evt TraceEvt) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ r.consumeEventWithLabelSlice(evt, tags)
+}
+
+// Separate func so that we can test that this function does not allocate. The syncPool may allocate.
+func (r StatsTraceReporter) consumeEventWithLabelSlice(evt TraceEvt, tags *[]string) {
+ switch evt.Type {
+ case TraceAddStreamEvt, TraceRemoveStreamEvt:
+ if p := PeerStrInScopeName(evt.Name); p != "" {
+ // Aggregated peer stats. Counts how many peers have N number of streams open.
+ // Uses two buckets aggregations. One to count how many streams the
+ // peer has now. The other to count the negative value, or how many
+ // streams did the peer use to have. When looking at the data you
+ // take the difference from the two.
+
+ oldStreamsOut := int64(evt.StreamsOut - evt.DeltaOut)
+ peerStreamsOut := int64(evt.StreamsOut)
+ if oldStreamsOut != peerStreamsOut {
+ if oldStreamsOut != 0 {
+ previousPeerStreamsOutbound.Observe(float64(oldStreamsOut))
+ }
+ if peerStreamsOut != 0 {
+ peerStreamsOutbound.Observe(float64(peerStreamsOut))
+ }
+ }
+
+ oldStreamsIn := int64(evt.StreamsIn - evt.DeltaIn)
+ peerStreamsIn := int64(evt.StreamsIn)
+ if oldStreamsIn != peerStreamsIn {
+ if oldStreamsIn != 0 {
+ previousPeerStreamsInbound.Observe(float64(oldStreamsIn))
+ }
+ if peerStreamsIn != 0 {
+ peerStreamsInbound.Observe(float64(peerStreamsIn))
+ }
+ }
+ } else {
+ if evt.DeltaOut != 0 {
+ if IsSystemScope(evt.Name) || IsTransientScope(evt.Name) {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "outbound", evt.Name, "")
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsOut))
+ } else if proto := ParseProtocolScopeName(evt.Name); proto != "" {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "outbound", "protocol", proto)
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsOut))
+ } else {
+ // Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
+ // you can use aggregated peer stats + service stats to infer
+ // this.
+ break
+ }
+ }
+
+ if evt.DeltaIn != 0 {
+ if IsSystemScope(evt.Name) || IsTransientScope(evt.Name) {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "inbound", evt.Name, "")
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsIn))
+ } else if proto := ParseProtocolScopeName(evt.Name); proto != "" {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "inbound", "protocol", proto)
+ streams.WithLabelValues(*tags...).Set(float64(evt.StreamsIn))
+ } else {
+ // Not measuring service scope, connscope, servicepeer and protocolpeer. Lots of data, and
+ // you can use aggregated peer stats + service stats to infer
+ // this.
+ break
+ }
+ }
+ }
+
+ case TraceAddConnEvt, TraceRemoveConnEvt:
+ if p := PeerStrInScopeName(evt.Name); p != "" {
+ // Aggregated peer stats. Counts how many peers have N number of connections.
+ // Uses two buckets aggregations. One to count how many streams the
+ // peer has now. The other to count the negative value, or how many
+ // conns did the peer use to have. When looking at the data you
+ // take the difference from the two.
+
+ oldConnsOut := int64(evt.ConnsOut - evt.DeltaOut)
+ connsOut := int64(evt.ConnsOut)
+ if oldConnsOut != connsOut {
+ if oldConnsOut != 0 {
+ previousPeerConnsOutbound.Observe(float64(oldConnsOut))
+ }
+ if connsOut != 0 {
+ peerConnsOutbound.Observe(float64(connsOut))
+ }
+ }
+
+ oldConnsIn := int64(evt.ConnsIn - evt.DeltaIn)
+ connsIn := int64(evt.ConnsIn)
+ if oldConnsIn != connsIn {
+ if oldConnsIn != 0 {
+ previousPeerConnsInbound.Observe(float64(oldConnsIn))
+ }
+ if connsIn != 0 {
+ peerConnsInbound.Observe(float64(connsIn))
+ }
+ }
+ } else {
+ if IsConnScope(evt.Name) {
+ // Not measuring this. I don't think it's useful.
+ break
+ }
+
+ if IsSystemScope(evt.Name) {
+ connsInboundSystem.Set(float64(evt.ConnsIn))
+ connsOutboundSystem.Set(float64(evt.ConnsOut))
+ } else if IsTransientScope(evt.Name) {
+ connsInboundTransient.Set(float64(evt.ConnsIn))
+ connsOutboundTransient.Set(float64(evt.ConnsOut))
+ }
+
+ // Represents the delta in fds
+ if evt.Delta != 0 {
+ if IsSystemScope(evt.Name) {
+ fdsSystem.Set(float64(evt.FD))
+ } else if IsTransientScope(evt.Name) {
+ fdsTransient.Set(float64(evt.FD))
+ }
+ }
+ }
+
+ case TraceReserveMemoryEvt, TraceReleaseMemoryEvt:
+ if p := PeerStrInScopeName(evt.Name); p != "" {
+ oldMem := evt.Memory - evt.Delta
+ if oldMem != evt.Memory {
+ if oldMem != 0 {
+ previousPeerMemory.Observe(float64(oldMem))
+ }
+ if evt.Memory != 0 {
+ peerMemory.Observe(float64(evt.Memory))
+ }
+ }
+ } else if IsConnScope(evt.Name) {
+ oldMem := evt.Memory - evt.Delta
+ if oldMem != evt.Memory {
+ if oldMem != 0 {
+ previousConnMemory.Observe(float64(oldMem))
+ }
+ if evt.Memory != 0 {
+ connMemory.Observe(float64(evt.Memory))
+ }
+ }
+ } else {
+ if IsSystemScope(evt.Name) || IsTransientScope(evt.Name) {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, evt.Name, "")
+ memoryTotal.WithLabelValues(*tags...).Set(float64(evt.Memory))
+ } else if proto := ParseProtocolScopeName(evt.Name); proto != "" {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "protocol", proto)
+ memoryTotal.WithLabelValues(*tags...).Set(float64(evt.Memory))
+ } else {
+ // Not measuring connscope, servicepeer and protocolpeer. Lots of data, and
+ // you can use aggregated peer stats + service stats to infer
+ // this.
+ break
+ }
+ }
+
+ case TraceBlockAddConnEvt, TraceBlockAddStreamEvt, TraceBlockReserveMemoryEvt:
+ var resource string
+ if evt.Type == TraceBlockAddConnEvt {
+ resource = "connection"
+ } else if evt.Type == TraceBlockAddStreamEvt {
+ resource = "stream"
+ } else {
+ resource = "memory"
+ }
+
+ scopeName := evt.Name
+ // Only the top scopeName. We don't want to get the peerid here.
+ // Using indexes and slices to avoid allocating.
+ scopeSplitIdx := strings.IndexByte(scopeName, ':')
+ if scopeSplitIdx != -1 {
+ scopeName = evt.Name[0:scopeSplitIdx]
+ }
+ // Drop the connection or stream id
+ idSplitIdx := strings.IndexByte(scopeName, '-')
+ if idSplitIdx != -1 {
+ scopeName = scopeName[0:idSplitIdx]
+ }
+
+ if evt.DeltaIn != 0 {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "inbound", scopeName, resource)
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.DeltaIn))
+ }
+
+ if evt.DeltaOut != 0 {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "outbound", scopeName, resource)
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.DeltaOut))
+ }
+
+ if evt.Delta != 0 && resource == "connection" {
+ // This represents fds blocked
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "", scopeName, "fd")
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.Delta))
+ } else if evt.Delta != 0 {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, "", scopeName, resource)
+ blockedResources.WithLabelValues(*tags...).Add(float64(evt.Delta))
+ }
+ }
+}
diff --git a/p2p/host/resource-manager/stats_test.go b/p2p/host/resource-manager/stats_test.go
new file mode 100644
index 0000000000..b4f1ec996a
--- /dev/null
+++ b/p2p/host/resource-manager/stats_test.go
@@ -0,0 +1,39 @@
+package rcmgr
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var registerOnce sync.Once
+
+func TestTraceReporterStartAndClose(t *testing.T) {
+ rcmgr, err := NewResourceManager(NewFixedLimiter(DefaultLimits.AutoScale()), WithTraceReporter(StatsTraceReporter{}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer rcmgr.Close()
+}
+
+func TestConsumeEvent(t *testing.T) {
+ evt := TraceEvt{
+ Type: TraceBlockAddStreamEvt,
+ Name: "conn-1",
+ DeltaOut: 1,
+ Time: time.Now().Format(time.RFC3339Nano),
+ }
+
+ registerOnce.Do(func() {
+ MustRegisterWith(prometheus.DefaultRegisterer)
+ })
+
+ str, err := NewStatsTraceReporter()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ str.ConsumeEvent(evt)
+}
diff --git a/p2p/host/resource-manager/sys_not_unix.go b/p2p/host/resource-manager/sys_not_unix.go
new file mode 100644
index 0000000000..7d539c9011
--- /dev/null
+++ b/p2p/host/resource-manager/sys_not_unix.go
@@ -0,0 +1,11 @@
+//go:build !linux && !darwin && !windows
+
+package rcmgr
+
+import "runtime"
+
+// TODO: figure out how to get the number of file descriptors on Windows and other systems
+func getNumFDs() int {
+ log.Warn("cannot determine number of file descriptors", "os", runtime.GOOS)
+ return 0
+}
diff --git a/p2p/host/resource-manager/sys_unix.go b/p2p/host/resource-manager/sys_unix.go
new file mode 100644
index 0000000000..72e9ff276f
--- /dev/null
+++ b/p2p/host/resource-manager/sys_unix.go
@@ -0,0 +1,16 @@
+//go:build linux || darwin
+
+package rcmgr
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func getNumFDs() int {
+ var l unix.Rlimit
+ if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &l); err != nil {
+ log.Error("failed to get fd limit", "err", err)
+ return 0
+ }
+ return int(l.Cur)
+}
diff --git a/p2p/host/resource-manager/sys_windows.go b/p2p/host/resource-manager/sys_windows.go
new file mode 100644
index 0000000000..7387eb8a3c
--- /dev/null
+++ b/p2p/host/resource-manager/sys_windows.go
@@ -0,0 +1,11 @@
+//go:build windows
+
+package rcmgr
+
+import (
+ "math"
+)
+
+func getNumFDs() int {
+ return math.MaxInt
+}
diff --git a/p2p/host/resource-manager/trace.go b/p2p/host/resource-manager/trace.go
new file mode 100644
index 0000000000..abcecd8e01
--- /dev/null
+++ b/p2p/host/resource-manager/trace.go
@@ -0,0 +1,698 @@
+package rcmgr
+
+import (
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+type trace struct {
+ path string
+
+ ctx context.Context
+ cancel func()
+ wg sync.WaitGroup
+
+ mx sync.Mutex
+ done bool
+ pendingWrites []interface{}
+ reporters []TraceReporter
+}
+
+type TraceReporter interface {
+ // ConsumeEvent consumes a trace event. This is called synchronously,
+ // implementations should process the event quickly.
+ ConsumeEvent(TraceEvt)
+}
+
+func WithTrace(path string) Option {
+ return func(r *resourceManager) error {
+ if r.trace == nil {
+ r.trace = &trace{path: path}
+ } else {
+ r.trace.path = path
+ }
+ return nil
+ }
+}
+
+func WithTraceReporter(reporter TraceReporter) Option {
+ return func(r *resourceManager) error {
+ if r.trace == nil {
+ r.trace = &trace{}
+ }
+ r.trace.reporters = append(r.trace.reporters, reporter)
+ return nil
+ }
+}
+
+type TraceEvtTyp string
+
+const (
+ TraceStartEvt TraceEvtTyp = "start"
+ TraceCreateScopeEvt TraceEvtTyp = "create_scope"
+ TraceDestroyScopeEvt TraceEvtTyp = "destroy_scope"
+ TraceReserveMemoryEvt TraceEvtTyp = "reserve_memory"
+ TraceBlockReserveMemoryEvt TraceEvtTyp = "block_reserve_memory"
+ TraceReleaseMemoryEvt TraceEvtTyp = "release_memory"
+ TraceAddStreamEvt TraceEvtTyp = "add_stream"
+ TraceBlockAddStreamEvt TraceEvtTyp = "block_add_stream"
+ TraceRemoveStreamEvt TraceEvtTyp = "remove_stream"
+ TraceAddConnEvt TraceEvtTyp = "add_conn"
+ TraceBlockAddConnEvt TraceEvtTyp = "block_add_conn"
+ TraceRemoveConnEvt TraceEvtTyp = "remove_conn"
+)
+
+type scopeClass struct {
+ name string
+}
+
+func (s scopeClass) MarshalJSON() ([]byte, error) {
+ name := s.name
+ var span string
+ if idx := strings.Index(name, "span:"); idx > -1 {
+ name = name[:idx-1]
+ span = name[idx+5:]
+ }
+ // System and Transient scope
+ if name == "system" || name == "transient" || name == "allowlistedSystem" || name == "allowlistedTransient" {
+ return json.Marshal(struct {
+ Class string
+ Span string `json:",omitempty"`
+ }{
+ Class: name,
+ Span: span,
+ })
+ }
+ // Connection scope
+ if strings.HasPrefix(name, "conn-") {
+ return json.Marshal(struct {
+ Class string
+ Conn string
+ Span string `json:",omitempty"`
+ }{
+ Class: "conn",
+ Conn: name[5:],
+ Span: span,
+ })
+ }
+ // Stream scope
+ if strings.HasPrefix(name, "stream-") {
+ return json.Marshal(struct {
+ Class string
+ Stream string
+ Span string `json:",omitempty"`
+ }{
+ Class: "stream",
+ Stream: name[7:],
+ Span: span,
+ })
+ }
+ // Peer scope
+ if strings.HasPrefix(name, "peer:") {
+ return json.Marshal(struct {
+ Class string
+ Peer string
+ Span string `json:",omitempty"`
+ }{
+ Class: "peer",
+ Peer: name[5:],
+ Span: span,
+ })
+ }
+
+ if strings.HasPrefix(name, "service:") {
+ if idx := strings.Index(name, "peer:"); idx > 0 { // Peer-Service scope
+ return json.Marshal(struct {
+ Class string
+ Service string
+ Peer string
+ Span string `json:",omitempty"`
+ }{
+ Class: "service-peer",
+ Service: name[8 : idx-1],
+ Peer: name[idx+5:],
+ Span: span,
+ })
+ } else { // Service scope
+ return json.Marshal(struct {
+ Class string
+ Service string
+ Span string `json:",omitempty"`
+ }{
+ Class: "service",
+ Service: name[8:],
+ Span: span,
+ })
+ }
+ }
+
+ if strings.HasPrefix(name, "protocol:") {
+ if idx := strings.Index(name, "peer:"); idx > -1 { // Peer-Protocol scope
+ return json.Marshal(struct {
+ Class string
+ Protocol string
+ Peer string
+ Span string `json:",omitempty"`
+ }{
+ Class: "protocol-peer",
+ Protocol: name[9 : idx-1],
+ Peer: name[idx+5:],
+ Span: span,
+ })
+ } else { // Protocol scope
+ return json.Marshal(struct {
+ Class string
+ Protocol string
+ Span string `json:",omitempty"`
+ }{
+ Class: "protocol",
+ Protocol: name[9:],
+ Span: span,
+ })
+ }
+ }
+
+ return nil, fmt.Errorf("unrecognized scope: %s", name)
+}
+
+type TraceEvt struct {
+ Time string
+ Type TraceEvtTyp
+
+ Scope *scopeClass `json:",omitempty"`
+ Name string `json:",omitempty"`
+
+ Limit interface{} `json:",omitempty"`
+
+ Priority uint8 `json:",omitempty"`
+
+ Delta int64 `json:",omitempty"`
+ DeltaIn int `json:",omitempty"`
+ DeltaOut int `json:",omitempty"`
+
+ Memory int64 `json:",omitempty"`
+
+ StreamsIn int `json:",omitempty"`
+ StreamsOut int `json:",omitempty"`
+
+ ConnsIn int `json:",omitempty"`
+ ConnsOut int `json:",omitempty"`
+
+ FD int `json:",omitempty"`
+}
+
+func (t *trace) push(evt TraceEvt) {
+ t.mx.Lock()
+ defer t.mx.Unlock()
+
+ if t.done {
+ return
+ }
+ evt.Time = time.Now().Format(time.RFC3339Nano)
+ if evt.Name != "" {
+ evt.Scope = &scopeClass{name: evt.Name}
+ }
+
+ for _, reporter := range t.reporters {
+ reporter.ConsumeEvent(evt)
+ }
+
+ if t.path != "" {
+ t.pendingWrites = append(t.pendingWrites, evt)
+ }
+}
+
+func (t *trace) backgroundWriter(out io.WriteCloser) {
+ defer t.wg.Done()
+ defer out.Close()
+
+ gzOut := gzip.NewWriter(out)
+ defer gzOut.Close()
+
+ jsonOut := json.NewEncoder(gzOut)
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ var pend []interface{}
+
+ getEvents := func() {
+ t.mx.Lock()
+ tmp := t.pendingWrites
+ t.pendingWrites = pend[:0]
+ pend = tmp
+ t.mx.Unlock()
+ }
+
+ for {
+ select {
+ case <-ticker.C:
+ getEvents()
+
+ if len(pend) == 0 {
+ continue
+ }
+
+ if err := t.writeEvents(pend, jsonOut); err != nil {
+ log.Warn("error writing rcmgr trace", "err", err)
+ t.mx.Lock()
+ t.done = true
+ t.mx.Unlock()
+ return
+ }
+
+ if err := gzOut.Flush(); err != nil {
+ log.Warn("error flushing rcmgr trace", "err", err)
+ t.mx.Lock()
+ t.done = true
+ t.mx.Unlock()
+ return
+ }
+
+ case <-t.ctx.Done():
+ getEvents()
+
+ if len(pend) == 0 {
+ return
+ }
+
+ if err := t.writeEvents(pend, jsonOut); err != nil {
+ log.Warn("error writing rcmgr trace", "err", err)
+ return
+ }
+
+ if err := gzOut.Flush(); err != nil {
+ log.Warn("error flushing rcmgr trace", "err", err)
+ }
+
+ return
+ }
+ }
+}
+
+func (t *trace) writeEvents(pend []interface{}, jout *json.Encoder) error {
+ for _, e := range pend {
+ if err := jout.Encode(e); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *trace) Start(limits Limiter) error {
+ if t == nil {
+ return nil
+ }
+
+ t.ctx, t.cancel = context.WithCancel(context.Background())
+
+ if t.path != "" {
+ out, err := os.OpenFile(t.path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return nil
+ }
+
+ t.wg.Add(1)
+ go t.backgroundWriter(out)
+ }
+
+ t.push(TraceEvt{
+ Type: TraceStartEvt,
+ Limit: limits,
+ })
+
+ return nil
+}
+
+func (t *trace) Close() error {
+ if t == nil {
+ return nil
+ }
+
+ t.mx.Lock()
+
+ if t.done {
+ t.mx.Unlock()
+ return nil
+ }
+
+ t.cancel()
+ t.done = true
+ t.mx.Unlock()
+
+ t.wg.Wait()
+ return nil
+}
+
+func (t *trace) CreateScope(scope string, limit Limit) {
+ if t == nil {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceCreateScopeEvt,
+ Name: scope,
+ Limit: limit,
+ })
+}
+
+func (t *trace) DestroyScope(scope string) {
+ if t == nil {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceDestroyScopeEvt,
+ Name: scope,
+ })
+}
+
+func (t *trace) ReserveMemory(scope string, prio uint8, size, mem int64) {
+ if t == nil {
+ return
+ }
+
+ if size == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceReserveMemoryEvt,
+ Name: scope,
+ Priority: prio,
+ Delta: size,
+ Memory: mem,
+ })
+}
+
+func (t *trace) BlockReserveMemory(scope string, prio uint8, size, mem int64) {
+ if t == nil {
+ return
+ }
+
+ if size == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceBlockReserveMemoryEvt,
+ Name: scope,
+ Priority: prio,
+ Delta: size,
+ Memory: mem,
+ })
+}
+
+func (t *trace) ReleaseMemory(scope string, size, mem int64) {
+ if t == nil {
+ return
+ }
+
+ if size == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceReleaseMemoryEvt,
+ Name: scope,
+ Delta: -size,
+ Memory: mem,
+ })
+}
+
+func (t *trace) AddStream(scope string, dir network.Direction, nstreamsIn, nstreamsOut int) {
+ if t == nil {
+ return
+ }
+
+ var deltaIn, deltaOut int
+ if dir == network.DirInbound {
+ deltaIn = 1
+ } else {
+ deltaOut = 1
+ }
+
+ t.push(TraceEvt{
+ Type: TraceAddStreamEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ StreamsIn: nstreamsIn,
+ StreamsOut: nstreamsOut,
+ })
+}
+
+func (t *trace) BlockAddStream(scope string, dir network.Direction, nstreamsIn, nstreamsOut int) {
+ if t == nil {
+ return
+ }
+
+ var deltaIn, deltaOut int
+ if dir == network.DirInbound {
+ deltaIn = 1
+ } else {
+ deltaOut = 1
+ }
+
+ t.push(TraceEvt{
+ Type: TraceBlockAddStreamEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ StreamsIn: nstreamsIn,
+ StreamsOut: nstreamsOut,
+ })
+}
+
+func (t *trace) RemoveStream(scope string, dir network.Direction, nstreamsIn, nstreamsOut int) {
+ if t == nil {
+ return
+ }
+
+ var deltaIn, deltaOut int
+ if dir == network.DirInbound {
+ deltaIn = -1
+ } else {
+ deltaOut = -1
+ }
+
+ t.push(TraceEvt{
+ Type: TraceRemoveStreamEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ StreamsIn: nstreamsIn,
+ StreamsOut: nstreamsOut,
+ })
+}
+
+func (t *trace) AddStreams(scope string, deltaIn, deltaOut, nstreamsIn, nstreamsOut int) {
+ if t == nil {
+ return
+ }
+
+ if deltaIn == 0 && deltaOut == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceAddStreamEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ StreamsIn: nstreamsIn,
+ StreamsOut: nstreamsOut,
+ })
+}
+
+func (t *trace) BlockAddStreams(scope string, deltaIn, deltaOut, nstreamsIn, nstreamsOut int) {
+ if t == nil {
+ return
+ }
+
+ if deltaIn == 0 && deltaOut == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceBlockAddStreamEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ StreamsIn: nstreamsIn,
+ StreamsOut: nstreamsOut,
+ })
+}
+
+func (t *trace) RemoveStreams(scope string, deltaIn, deltaOut, nstreamsIn, nstreamsOut int) {
+ if t == nil {
+ return
+ }
+
+ if deltaIn == 0 && deltaOut == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceRemoveStreamEvt,
+ Name: scope,
+ DeltaIn: -deltaIn,
+ DeltaOut: -deltaOut,
+ StreamsIn: nstreamsIn,
+ StreamsOut: nstreamsOut,
+ })
+}
+
+func (t *trace) AddConn(scope string, dir network.Direction, usefd bool, nconnsIn, nconnsOut, nfd int) {
+ if t == nil {
+ return
+ }
+
+ var deltaIn, deltaOut, deltafd int
+ if dir == network.DirInbound {
+ deltaIn = 1
+ } else {
+ deltaOut = 1
+ }
+ if usefd {
+ deltafd = 1
+ }
+
+ t.push(TraceEvt{
+ Type: TraceAddConnEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ Delta: int64(deltafd),
+ ConnsIn: nconnsIn,
+ ConnsOut: nconnsOut,
+ FD: nfd,
+ })
+}
+
+func (t *trace) BlockAddConn(scope string, dir network.Direction, usefd bool, nconnsIn, nconnsOut, nfd int) {
+ if t == nil {
+ return
+ }
+
+ var deltaIn, deltaOut, deltafd int
+ if dir == network.DirInbound {
+ deltaIn = 1
+ } else {
+ deltaOut = 1
+ }
+ if usefd {
+ deltafd = 1
+ }
+
+ t.push(TraceEvt{
+ Type: TraceBlockAddConnEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ Delta: int64(deltafd),
+ ConnsIn: nconnsIn,
+ ConnsOut: nconnsOut,
+ FD: nfd,
+ })
+}
+
+func (t *trace) RemoveConn(scope string, dir network.Direction, usefd bool, nconnsIn, nconnsOut, nfd int) {
+ if t == nil {
+ return
+ }
+
+ var deltaIn, deltaOut, deltafd int
+ if dir == network.DirInbound {
+ deltaIn = -1
+ } else {
+ deltaOut = -1
+ }
+ if usefd {
+ deltafd = -1
+ }
+
+ t.push(TraceEvt{
+ Type: TraceRemoveConnEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ Delta: int64(deltafd),
+ ConnsIn: nconnsIn,
+ ConnsOut: nconnsOut,
+ FD: nfd,
+ })
+}
+
+func (t *trace) AddConns(scope string, deltaIn, deltaOut, deltafd, nconnsIn, nconnsOut, nfd int) {
+ if t == nil {
+ return
+ }
+
+ if deltaIn == 0 && deltaOut == 0 && deltafd == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceAddConnEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ Delta: int64(deltafd),
+ ConnsIn: nconnsIn,
+ ConnsOut: nconnsOut,
+ FD: nfd,
+ })
+}
+
+func (t *trace) BlockAddConns(scope string, deltaIn, deltaOut, deltafd, nconnsIn, nconnsOut, nfd int) {
+ if t == nil {
+ return
+ }
+
+ if deltaIn == 0 && deltaOut == 0 && deltafd == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceBlockAddConnEvt,
+ Name: scope,
+ DeltaIn: deltaIn,
+ DeltaOut: deltaOut,
+ Delta: int64(deltafd),
+ ConnsIn: nconnsIn,
+ ConnsOut: nconnsOut,
+ FD: nfd,
+ })
+}
+
+func (t *trace) RemoveConns(scope string, deltaIn, deltaOut, deltafd, nconnsIn, nconnsOut, nfd int) {
+ if t == nil {
+ return
+ }
+
+ if deltaIn == 0 && deltaOut == 0 && deltafd == 0 {
+ return
+ }
+
+ t.push(TraceEvt{
+ Type: TraceRemoveConnEvt,
+ Name: scope,
+ DeltaIn: -deltaIn,
+ DeltaOut: -deltaOut,
+ Delta: -int64(deltafd),
+ ConnsIn: nconnsIn,
+ ConnsOut: nconnsOut,
+ FD: nfd,
+ })
+}
diff --git a/p2p/host/routed/routed.go b/p2p/host/routed/routed.go
index df837fc7a9..ebfe02286b 100644
--- a/p2p/host/routed/routed.go
+++ b/p2p/host/routed/routed.go
@@ -5,17 +5,17 @@ import (
"fmt"
"time"
- host "github.com/libp2p/go-libp2p-host"
-
- logging "github.com/ipfs/go-log"
- ifconnmgr "github.com/libp2p/go-libp2p-interface-connmgr"
- lgbl "github.com/libp2p/go-libp2p-loggables"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- protocol "github.com/libp2p/go-libp2p-protocol"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+
ma "github.com/multiformats/go-multiaddr"
- msmux "github.com/multiformats/go-multistream"
)
var log = logging.Logger("routedhost")
@@ -33,7 +33,7 @@ type RoutedHost struct {
}
type Routing interface {
- FindPeer(context.Context, peer.ID) (pstore.PeerInfo, error)
+ FindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
}
func Wrap(h host.Host, r Routing) *RoutedHost {
@@ -45,15 +45,20 @@ func Wrap(h host.Host, r Routing) *RoutedHost {
//
// RoutedHost's Connect differs in that if the host has no addresses for a
// given peer, it will use its routing system to try to find some.
-func (rh *RoutedHost) Connect(ctx context.Context, pi pstore.PeerInfo) error {
- // first, check if we're already connected.
- if len(rh.Network().ConnsToPeer(pi.ID)) > 0 {
- return nil
+func (rh *RoutedHost) Connect(ctx context.Context, pi peer.AddrInfo) error {
+ // first, check if we're already connected unless force direct dial.
+ forceDirect, _ := network.GetForceDirectDial(ctx)
+ canUseLimitedConn, _ := network.GetAllowLimitedConn(ctx)
+ if !forceDirect {
+ connectedness := rh.Network().Connectedness(pi.ID)
+ if connectedness == network.Connected || (canUseLimitedConn && connectedness == network.Limited) {
+ return nil
+ }
}
// if we were given some addresses, keep + use them.
if len(pi.Addrs) > 0 {
- rh.Peerstore().AddAddrs(pi.ID, pi.Addrs, pstore.TempAddrTTL)
+ rh.Peerstore().AddAddrs(pi.ID, pi.Addrs, peerstore.TempAddrTTL)
}
// Check if we have some addresses in our recent memory.
@@ -67,9 +72,76 @@ func (rh *RoutedHost) Connect(ctx context.Context, pi pstore.PeerInfo) error {
}
}
+ // Issue 448: if our address set includes routed specific relay addrs,
+ // we need to make sure the relay's addr itself is in the peerstore or else
+ // we won't be able to dial it.
+ for _, addr := range addrs {
+ if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil {
+ // not a relay address
+ continue
+ }
+
+ if addr.Protocols()[0].Code != ma.P_P2P {
+ // not a routed relay specific address
+ continue
+ }
+
+ relay, _ := addr.ValueForProtocol(ma.P_P2P)
+ relayID, err := peer.Decode(relay)
+ if err != nil {
+ log.Debug("failed to parse relay ID in address", "relay", relay, "err", err)
+ continue
+ }
+
+ if len(rh.Peerstore().Addrs(relayID)) > 0 {
+ // we already have addrs for this relay
+ continue
+ }
+
+ relayAddrs, err := rh.findPeerAddrs(ctx, relayID)
+ if err != nil {
+ log.Debug("failed to find relay", "relay", relay, "err", err)
+ continue
+ }
+
+ rh.Peerstore().AddAddrs(relayID, relayAddrs, peerstore.TempAddrTTL)
+ }
+
// if we're here, we got some addrs. let's use our wrapped host to connect.
pi.Addrs = addrs
- return rh.host.Connect(ctx, pi)
+ if cerr := rh.host.Connect(ctx, pi); cerr != nil {
+ // We couldn't connect. Let's check if we have the most
+ // up-to-date addresses for the given peer. If there
+ // are addresses we didn't know about previously, we
+ // try to connect again.
+ newAddrs, err := rh.findPeerAddrs(ctx, pi.ID)
+ if err != nil {
+ log.Debug("failed to find more peer addresses", "peer", pi.ID, "err", err)
+ return cerr
+ }
+
+ // Build lookup map
+ lookup := make(map[string]struct{}, len(addrs))
+ for _, addr := range addrs {
+ lookup[string(addr.Bytes())] = struct{}{}
+ }
+
+ // if there's any address that's not in the previous set
+ // of addresses, try to connect again. If all addresses
+ // where known previously we return the original error.
+ for _, newAddr := range newAddrs {
+ if _, found := lookup[string(newAddr.Bytes())]; found {
+ continue
+ }
+
+ pi.Addrs = newAddrs
+ return rh.host.Connect(ctx, pi)
+ }
+ // No appropriate new address found.
+ // Return the original dial error.
+ return cerr
+ }
+ return nil
}
func (rh *RoutedHost) findPeerAddrs(ctx context.Context, id peer.ID) ([]ma.Multiaddr, error) {
@@ -80,26 +152,22 @@ func (rh *RoutedHost) findPeerAddrs(ctx context.Context, id peer.ID) ([]ma.Multi
if pi.ID != id {
err = fmt.Errorf("routing failure: provided addrs for different peer")
- logRoutingErrDifferentPeers(ctx, id, pi.ID, err)
+ log.Error("got wrong peer",
+ "error", err,
+ "wantedPeer", id,
+ "gotPeer", pi.ID,
+ )
return nil, err
}
return pi.Addrs, nil
}
-func logRoutingErrDifferentPeers(ctx context.Context, wanted, got peer.ID, err error) {
- lm := make(lgbl.DeferredMap)
- lm["error"] = err
- lm["wantedPeer"] = func() interface{} { return wanted.Pretty() }
- lm["gotPeer"] = func() interface{} { return got.Pretty() }
- log.Event(ctx, "routingError", lm)
-}
-
func (rh *RoutedHost) ID() peer.ID {
return rh.host.ID()
}
-func (rh *RoutedHost) Peerstore() pstore.Peerstore {
+func (rh *RoutedHost) Peerstore() peerstore.Peerstore {
return rh.host.Peerstore()
}
@@ -107,19 +175,23 @@ func (rh *RoutedHost) Addrs() []ma.Multiaddr {
return rh.host.Addrs()
}
-func (rh *RoutedHost) Network() inet.Network {
+func (rh *RoutedHost) Network() network.Network {
return rh.host.Network()
}
-func (rh *RoutedHost) Mux() *msmux.MultistreamMuxer {
+func (rh *RoutedHost) Mux() protocol.Switch {
return rh.host.Mux()
}
-func (rh *RoutedHost) SetStreamHandler(pid protocol.ID, handler inet.StreamHandler) {
+func (rh *RoutedHost) EventBus() event.Bus {
+ return rh.host.EventBus()
+}
+
+func (rh *RoutedHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
rh.host.SetStreamHandler(pid, handler)
}
-func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler inet.StreamHandler) {
+func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
rh.host.SetStreamHandlerMatch(pid, m, handler)
}
@@ -127,13 +199,16 @@ func (rh *RoutedHost) RemoveStreamHandler(pid protocol.ID) {
rh.host.RemoveStreamHandler(pid)
}
-func (rh *RoutedHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (inet.Stream, error) {
+func (rh *RoutedHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) {
// Ensure we have a connection, with peer addresses resolved by the routing system (#207)
// It is not sufficient to let the underlying host connect, it will most likely not have
// any addresses for the peer without any prior connections.
- err := rh.Connect(ctx, pstore.PeerInfo{ID: p})
- if err != nil {
- return nil, err
+ // If the caller wants to prevent the host from dialing, it should use the NoDial option.
+ if nodial, _ := network.GetNoDial(ctx); !nodial {
+ err := rh.Connect(ctx, peer.AddrInfo{ID: p})
+ if err != nil {
+ return nil, err
+ }
}
return rh.host.NewStream(ctx, p, pids...)
@@ -142,7 +217,7 @@ func (rh *RoutedHost) Close() error {
// no need to close IpfsRouting. we dont own it.
return rh.host.Close()
}
-func (rh *RoutedHost) ConnManager() ifconnmgr.ConnManager {
+func (rh *RoutedHost) ConnManager() connmgr.ConnManager {
return rh.host.ConnManager()
}
diff --git a/p2p/host/routed/routed_test.go b/p2p/host/routed/routed_test.go
new file mode 100644
index 0000000000..78ada28abb
--- /dev/null
+++ b/p2p/host/routed/routed_test.go
@@ -0,0 +1,92 @@
+package routedhost
+
+import (
+ "context"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ basic "github.com/libp2p/go-libp2p/p2p/host/basic"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+var _ Routing = (*mockRouting)(nil)
+
+type mockRouting struct {
+ callCount int
+ findPeerFn func(ctx context.Context, id peer.ID) (peer.AddrInfo, error)
+}
+
+func (m *mockRouting) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInfo, error) {
+ m.callCount++
+ return m.findPeerFn(ctx, pid)
+}
+
+func TestRoutedHostConnectToObsoleteAddresses(t *testing.T) {
+ h1, err := basic.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h1.Close()
+ h1.Start()
+
+ h2, err := basic.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h2.Close()
+ h2.Start()
+
+ // assemble the AddrInfo struct to use for the connection attempt
+ pi := peer.AddrInfo{
+ ID: h2.ID(),
+ // Use a wrong multi address for host 2, so that the initial connection attempt will fail
+ // (we have obsolete, old multi address information)
+ Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")},
+ }
+
+ // Build mock routing module and replace the FindPeer function.
+ // Now, that function will return the correct multi addresses for host 2
+ // (we have fetched the most up-to-date data from the DHT)
+ mr := &mockRouting{
+ findPeerFn: func(context.Context, peer.ID) (peer.AddrInfo, error) {
+ return peer.AddrInfo{
+ ID: h2.ID(),
+ Addrs: h2.Addrs(),
+ }, nil
+ },
+ }
+
+ // Build routed host
+ rh := Wrap(h1, mr)
+ // Connection establishment should have worked without an error
+ require.NoError(t, rh.Connect(context.Background(), pi))
+ require.Equal(t, 1, mr.callCount, "the mocked FindPeer function should have been called")
+}
+
+func TestRoutedHostConnectFindPeerNoUsefulAddrs(t *testing.T) {
+ h1, err := basic.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h1.Close()
+
+ h2, err := basic.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h2.Close()
+
+ // assemble the AddrInfo struct to use for the connection attempt
+ pi := peer.AddrInfo{
+ ID: h2.ID(),
+ // Use a wrong multi address for host 2, so that the initial connection attempt will fail
+ // (we have obsolete, old multi address information)
+ Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")},
+ }
+
+ // Build mock routing module and replace the FindPeer function.
+ // Now, that function will return the correct multi addresses for host 2
+ // (we have fetched the most up-to-date data from the DHT)
+ mr := &mockRouting{findPeerFn: func(context.Context, peer.ID) (peer.AddrInfo, error) { return pi, nil }}
+
+ // Build routed host
+ rh := Wrap(h1, mr)
+ // Connection establishment should fail, since we didn't provide any useful addresses in FindPeer.
+ require.Error(t, rh.Connect(context.Background(), pi))
+ require.Equal(t, 1, mr.callCount, "the mocked FindPeer function should have been called")
+}
diff --git a/p2p/http/auth/auth.go b/p2p/http/auth/auth.go
new file mode 100644
index 0000000000..6d3db1f4c7
--- /dev/null
+++ b/p2p/http/auth/auth.go
@@ -0,0 +1,11 @@
+package httppeeridauth
+
+import (
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/libp2p/go-libp2p/p2p/http/auth/internal/handshake"
+)
+
+const PeerIDAuthScheme = handshake.PeerIDAuthScheme
+const ProtocolID = "/http-peer-id-auth/1.0.0"
+
+var log = logging.Logger("http-peer-id-auth")
diff --git a/p2p/http/auth/auth_test.go b/p2p/http/auth/auth_test.go
new file mode 100644
index 0000000000..54f0bb060d
--- /dev/null
+++ b/p2p/http/auth/auth_test.go
@@ -0,0 +1,296 @@
+package httppeeridauth
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "log/slog"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TestMutualAuth tests that we can do a mutually authenticated round trip
+func TestMutualAuth(t *testing.T) {
+ originalLogger := log
+ defer func() {
+ log = originalLogger
+ }()
+ // Override to print debug logs
+ log = slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
+ Level: slog.LevelDebug,
+ }))
+
+ zeroBytes := make([]byte, 64)
+ serverKey, _, err := crypto.GenerateEd25519Key(bytes.NewReader(zeroBytes))
+ require.NoError(t, err)
+
+ type clientTestCase struct {
+ name string
+ clientKeyGen func(t *testing.T) crypto.PrivKey
+ }
+
+ clientTestCases := []clientTestCase{
+ {
+ name: "ED25519",
+ clientKeyGen: func(t *testing.T) crypto.PrivKey {
+ t.Helper()
+ clientKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ return clientKey
+ },
+ },
+ {
+ name: "RSA",
+ clientKeyGen: func(t *testing.T) crypto.PrivKey {
+ t.Helper()
+ clientKey, _, err := crypto.GenerateRSAKeyPair(2048, rand.Reader)
+ require.NoError(t, err)
+ return clientKey
+ },
+ },
+ }
+
+ type serverTestCase struct {
+ name string
+ serverGen func(t *testing.T) (*httptest.Server, *ServerPeerIDAuth)
+ }
+
+ serverTestCases := []serverTestCase{
+ {
+ name: "no TLS",
+ serverGen: func(t *testing.T) (*httptest.Server, *ServerPeerIDAuth) {
+ t.Helper()
+ auth := ServerPeerIDAuth{
+ PrivKey: serverKey,
+ ValidHostnameFn: func(s string) bool {
+ return s == "example.com"
+ },
+ TokenTTL: time.Hour,
+ NoTLS: true,
+ }
+
+ ts := httptest.NewServer(&auth)
+ t.Cleanup(ts.Close)
+ return ts, &auth
+ },
+ },
+ {
+ name: "TLS",
+ serverGen: func(t *testing.T) (*httptest.Server, *ServerPeerIDAuth) {
+ t.Helper()
+ auth := ServerPeerIDAuth{
+ PrivKey: serverKey,
+ ValidHostnameFn: func(s string) bool {
+ return s == "example.com"
+ },
+ TokenTTL: time.Hour,
+ }
+
+ ts := httptest.NewTLSServer(&auth)
+ t.Cleanup(ts.Close)
+ return ts, &auth
+ },
+ },
+ }
+
+ for _, ctc := range clientTestCases {
+ for _, stc := range serverTestCases {
+ t.Run(ctc.name+"+"+stc.name, func(t *testing.T) {
+ ts, server := stc.serverGen(t)
+ client := ts.Client()
+ roundTripper := instrumentedRoundTripper{client.Transport, 0}
+ client.Transport = &roundTripper
+ requestsSent := func() int {
+ defer func() { roundTripper.timesRoundtripped = 0 }()
+ return roundTripper.timesRoundtripped
+ }
+
+ tlsClientConfig := roundTripper.TLSClientConfig()
+ if tlsClientConfig != nil {
+ // If we're using TLS, we need to set the SNI so that the
+ // server can verify the request Host matches it.
+ tlsClientConfig.ServerName = "example.com"
+ }
+ clientKey := ctc.clientKeyGen(t)
+ clientAuth := ClientPeerIDAuth{PrivKey: clientKey}
+
+ expectedServerID, err := peer.IDFromPrivateKey(serverKey)
+ require.NoError(t, err)
+
+ req, err := http.NewRequest("POST", ts.URL, nil)
+ require.NoError(t, err)
+ req.Host = "example.com"
+ serverID, resp, err := clientAuth.AuthenticatedDo(client, req)
+ require.NoError(t, err)
+ require.Equal(t, expectedServerID, serverID)
+ require.NotZero(t, clientAuth.tm.tokenMap["example.com"])
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, 2, requestsSent())
+
+ // Once more with the auth token
+ req, err = http.NewRequest("POST", ts.URL, nil)
+ require.NoError(t, err)
+ req.Host = "example.com"
+ serverID, resp, err = clientAuth.AuthenticatedDo(client, req)
+ require.NotEmpty(t, req.Header.Get("Authorization"))
+ require.True(t, HasAuthHeader(req))
+ require.NoError(t, err)
+ require.Equal(t, expectedServerID, serverID)
+ require.NotZero(t, clientAuth.tm.tokenMap["example.com"])
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, 1, requestsSent(), "should only call newRequest once since we have a token")
+
+ t.Run("Tokens Expired", func(t *testing.T) {
+ // Clear the auth token on the server side
+ server.TokenTTL = 1 // Small TTL
+ time.Sleep(100 * time.Millisecond)
+ resetServerTokenTTL := sync.OnceFunc(func() {
+ server.TokenTTL = time.Hour
+ })
+
+ req, err := http.NewRequest("POST", ts.URL, nil)
+ require.NoError(t, err)
+ req.Host = "example.com"
+ req.GetBody = func() (io.ReadCloser, error) {
+ resetServerTokenTTL()
+ return nil, nil
+ }
+ serverID, resp, err = clientAuth.AuthenticatedDo(client, req)
+ require.NoError(t, err)
+ require.NotEmpty(t, req.Header.Get("Authorization"))
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, expectedServerID, serverID)
+ require.NotZero(t, clientAuth.tm.tokenMap["example.com"])
+ require.Equal(t, 3, requestsSent(), "should call newRequest 3x since our token expired")
+ })
+
+ t.Run("Tokens Invalidated", func(t *testing.T) {
+ // Clear the auth token on the server side
+ key := make([]byte, 32)
+ _, err := rand.Read(key)
+ if err != nil {
+ panic(err)
+ }
+ server.hmacPool = newHmacPool(key)
+
+ req, err := http.NewRequest("POST", ts.URL, nil)
+ req.GetBody = func() (io.ReadCloser, error) {
+ return nil, nil
+ }
+ require.NoError(t, err)
+ req.Host = "example.com"
+ serverID, resp, err = clientAuth.AuthenticatedDo(client, req)
+ require.NoError(t, err)
+ require.NotEmpty(t, req.Header.Get("Authorization"))
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, expectedServerID, serverID)
+ require.NotZero(t, clientAuth.tm.tokenMap["example.com"])
+ require.Equal(t, 3, requestsSent(), "should call have sent 3 reqs since our token expired")
+ })
+
+ })
+ }
+ }
+}
+
+func TestBodyNotSentDuringRedirect(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ b, err := io.ReadAll(r.Body)
+ assert.NoError(t, err)
+ assert.Empty(t, string(b))
+ if r.URL.Path != "/redirected" {
+ w.Header().Set("Location", "/redirected")
+ w.WriteHeader(http.StatusTemporaryRedirect)
+ return
+ }
+ }))
+ t.Cleanup(ts.Close)
+ client := ts.Client()
+ clientKey, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+ clientAuth := ClientPeerIDAuth{PrivKey: clientKey}
+
+ req, err :=
+ http.NewRequest(
+ "POST",
+ ts.URL,
+ strings.NewReader("Only for authenticated servers"),
+ )
+ req.Host = "example.com"
+ require.NoError(t, err)
+ _, _, err = clientAuth.AuthenticatedDo(client, req)
+ require.ErrorContains(t, err, "signature not set") // server doesn't actually handshake
+}
+
+type instrumentedRoundTripper struct {
+ http.RoundTripper
+ timesRoundtripped int
+}
+
+func (irt *instrumentedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ irt.timesRoundtripped++
+ return irt.RoundTripper.RoundTrip(req)
+}
+
+func (irt *instrumentedRoundTripper) TLSClientConfig() *tls.Config {
+ return irt.RoundTripper.(*http.Transport).TLSClientConfig
+}
+
+func TestConcurrentAuth(t *testing.T) {
+ serverKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+
+ auth := ServerPeerIDAuth{
+ PrivKey: serverKey,
+ ValidHostnameFn: func(s string) bool {
+ return s == "example.com"
+ },
+ TokenTTL: time.Hour,
+ NoTLS: true,
+ Next: func(_ peer.ID, w http.ResponseWriter, r *http.Request) {
+ reqBody, err := io.ReadAll(r.Body)
+ require.NoError(t, err)
+ _, err = w.Write(reqBody)
+ require.NoError(t, err)
+ },
+ }
+
+ ts := httptest.NewServer(&auth)
+ t.Cleanup(ts.Close)
+
+ wg := sync.WaitGroup{}
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ clientKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+
+ clientAuth := ClientPeerIDAuth{PrivKey: clientKey}
+ reqBody := []byte(fmt.Sprintf("echo %d", i))
+ req, err := http.NewRequest("POST", ts.URL, bytes.NewReader(reqBody))
+ require.NoError(t, err)
+ req.Host = "example.com"
+
+ client := ts.Client()
+ _, resp, err := clientAuth.AuthenticatedDo(client, req)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ respBody, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, reqBody, respBody)
+ }()
+ }
+ wg.Wait()
+}
diff --git a/p2p/http/auth/client.go b/p2p/http/auth/client.go
new file mode 100644
index 0000000000..3bef9d6d8b
--- /dev/null
+++ b/p2p/http/auth/client.go
@@ -0,0 +1,211 @@
+package httppeeridauth
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/http/auth/internal/handshake"
+)
+
+type ClientPeerIDAuth struct {
+ PrivKey crypto.PrivKey
+ TokenTTL time.Duration
+
+ tm tokenMap
+}
+
+type clientAsRoundTripper struct {
+ *http.Client
+}
+
+func (c clientAsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ return c.Client.Do(req)
+}
+
+// AuthenticatedDo is like http.Client.Do, but it does the libp2p peer ID auth
+// handshake if needed.
+//
+// It is recommended to pass in an http.Request with `GetBody` set, so that this
+// method can retry sending the request in case a previously used token has
+// expired.
+func (a *ClientPeerIDAuth) AuthenticatedDo(client *http.Client, req *http.Request) (peer.ID, *http.Response, error) {
+ return a.AuthenticateWithRoundTripper(clientAsRoundTripper{client}, req)
+}
+
+func (a *ClientPeerIDAuth) AuthenticateWithRoundTripper(rt http.RoundTripper, req *http.Request) (peer.ID, *http.Response, error) {
+ hostname := req.Host
+ ti, hasToken := a.tm.get(hostname, a.TokenTTL)
+ handshake := handshake.PeerIDAuthHandshakeClient{
+ Hostname: hostname,
+ PrivKey: a.PrivKey,
+ }
+
+ if hasToken {
+ // We have a token. Attempt to use that, but fallback to server initiated challenge if it fails.
+ peer, resp, err := a.doWithToken(rt, req, ti)
+ switch {
+ case err == nil:
+ return peer, resp, nil
+ case errors.Is(err, errTokenRejected):
+ // Token was rejected, we need to re-authenticate
+ break
+ default:
+ return "", nil, err
+ }
+
+ // Token didn't work, we need to re-authenticate.
+ // Run the server-initiated handshake
+ req = req.Clone(req.Context())
+ req.Body, err = req.GetBody()
+ if err != nil {
+ return "", nil, err
+ }
+
+ handshake.ParseHeader(resp.Header)
+ } else {
+ // We didn't have a handshake token, so we initiate the handshake.
+ // If our token was rejected, the server initiates the handshake.
+ handshake.SetInitiateChallenge()
+ }
+
+ serverPeerID, resp, err := a.runHandshake(rt, req, clearBody(req), &handshake)
+ if err != nil {
+ return "", nil, fmt.Errorf("failed to run handshake: %w", err)
+ }
+ a.tm.set(hostname, tokenInfo{
+ token: handshake.BearerToken(),
+ insertedAt: time.Now(),
+ peerID: serverPeerID,
+ })
+ return serverPeerID, resp, nil
+}
+
+func (a *ClientPeerIDAuth) HasToken(hostname string) bool {
+ _, hasToken := a.tm.get(hostname, a.TokenTTL)
+ return hasToken
+}
+
+func (a *ClientPeerIDAuth) runHandshake(rt http.RoundTripper, req *http.Request, b bodyMeta, hs *handshake.PeerIDAuthHandshakeClient) (peer.ID, *http.Response, error) {
+ maxSteps := 5 // Avoid infinite loops in case of buggy handshake. Shouldn't happen.
+ var resp *http.Response
+
+ err := hs.Run()
+ if err != nil {
+ return "", nil, err
+ }
+
+ sentBody := false
+ for !hs.HandshakeDone() || !sentBody {
+ req = req.Clone(req.Context())
+ hs.AddHeader(req.Header)
+ if hs.ServerAuthenticated() {
+ sentBody = true
+ b.setBody(req)
+ }
+
+ resp, err = rt.RoundTrip(req)
+ if err != nil {
+ return "", nil, err
+ }
+
+ hs.ParseHeader(resp.Header)
+ err = hs.Run()
+ if err != nil {
+ resp.Body.Close()
+ return "", nil, err
+ }
+
+ if maxSteps--; maxSteps == 0 {
+ return "", nil, errors.New("handshake took too many steps")
+ }
+ }
+
+ p, err := hs.PeerID()
+ if err != nil {
+ resp.Body.Close()
+ return "", nil, err
+ }
+ return p, resp, nil
+}
+
+var errTokenRejected = errors.New("token rejected")
+
+func (a *ClientPeerIDAuth) doWithToken(rt http.RoundTripper, req *http.Request, ti tokenInfo) (peer.ID, *http.Response, error) {
+ // Try to make the request with the token
+ req.Header.Set("Authorization", ti.token)
+ resp, err := rt.RoundTrip(req)
+ if err != nil {
+ return "", nil, err
+ }
+ if resp.StatusCode != http.StatusUnauthorized {
+ // our token is still valid
+ return ti.peerID, resp, nil
+ }
+ if req.GetBody == nil {
+ // We can't retry this request even if we wanted to.
+ // Return the response and an error
+ return "", resp, errors.New("expired token. Couldn't run handshake because req.GetBody is nil")
+ }
+ resp.Body.Close()
+
+ return "", resp, errTokenRejected
+}
+
+type bodyMeta struct {
+ body io.ReadCloser
+ contentLength int64
+ getBody func() (io.ReadCloser, error)
+}
+
+func clearBody(req *http.Request) bodyMeta {
+ defer func() {
+ req.Body = nil
+ req.ContentLength = 0
+ req.GetBody = nil
+ }()
+ return bodyMeta{body: req.Body, contentLength: req.ContentLength, getBody: req.GetBody}
+}
+
+func (b *bodyMeta) setBody(req *http.Request) {
+ req.Body = b.body
+ req.ContentLength = b.contentLength
+ req.GetBody = b.getBody
+}
+
+type tokenInfo struct {
+ token string
+ insertedAt time.Time
+ peerID peer.ID
+}
+
+type tokenMap struct {
+ tokenMapMu sync.Mutex
+ tokenMap map[string]tokenInfo
+}
+
+func (tm *tokenMap) get(hostname string, ttl time.Duration) (tokenInfo, bool) {
+ tm.tokenMapMu.Lock()
+ defer tm.tokenMapMu.Unlock()
+
+ ti, ok := tm.tokenMap[hostname]
+ if ok && ttl != 0 && time.Since(ti.insertedAt) > ttl {
+ delete(tm.tokenMap, hostname)
+ return tokenInfo{}, false
+ }
+ return ti, ok
+}
+
+func (tm *tokenMap) set(hostname string, ti tokenInfo) {
+ tm.tokenMapMu.Lock()
+ defer tm.tokenMapMu.Unlock()
+ if tm.tokenMap == nil {
+ tm.tokenMap = make(map[string]tokenInfo)
+ }
+ tm.tokenMap[hostname] = ti
+}
diff --git a/p2p/http/auth/internal/handshake/alloc_test.go b/p2p/http/auth/internal/handshake/alloc_test.go
new file mode 100644
index 0000000000..333bad4f0d
--- /dev/null
+++ b/p2p/http/auth/internal/handshake/alloc_test.go
@@ -0,0 +1,20 @@
+//go:build nocover
+
+package handshake
+
+import "testing"
+
+func TestParsePeerIDAuthSchemeParamsNoAllocNoCover(t *testing.T) {
+ str := []byte(`libp2p-PeerID peer-id="", sig="", public-key="", bearer=""`)
+
+ allocs := testing.AllocsPerRun(1000, func() {
+ p := params{}
+ err := p.parsePeerIDAuthSchemeParams(str)
+ if err != nil {
+ t.Fatal(err)
+ }
+ })
+ if allocs > 0 {
+ t.Fatalf("alloc test failed expected 0 received %0.2f", allocs)
+ }
+}
diff --git a/p2p/http/auth/internal/handshake/client.go b/p2p/http/auth/internal/handshake/client.go
new file mode 100644
index 0000000000..f8d39e9c14
--- /dev/null
+++ b/p2p/http/auth/internal/handshake/client.go
@@ -0,0 +1,247 @@
+package handshake
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+type peerIDAuthClientState int
+
+const (
+ peerIDAuthClientStateSignChallenge peerIDAuthClientState = iota
+ peerIDAuthClientStateVerifyChallenge
+ peerIDAuthClientStateDone // We have the bearer token, and there's nothing left to do
+
+ // Client initiated handshake
+ peerIDAuthClientInitiateChallenge
+ peerIDAuthClientStateVerifyAndSignChallenge
+ peerIDAuthClientStateWaitingForBearer
+)
+
+type PeerIDAuthHandshakeClient struct {
+ Hostname string
+ PrivKey crypto.PrivKey
+
+ serverPeerID peer.ID
+ serverPubKey crypto.PubKey
+ state peerIDAuthClientState
+ p params
+ hb headerBuilder
+ challengeServer []byte
+ buf [128]byte
+}
+
+var errMissingChallenge = errors.New("missing challenge")
+
+func (h *PeerIDAuthHandshakeClient) SetInitiateChallenge() {
+ h.state = peerIDAuthClientInitiateChallenge
+}
+
+func (h *PeerIDAuthHandshakeClient) ParseHeader(header http.Header) error {
+ if h.state == peerIDAuthClientStateDone || h.state == peerIDAuthClientInitiateChallenge {
+ return nil
+ }
+ h.p = params{}
+
+ var headerVal []byte
+ switch h.state {
+ case peerIDAuthClientStateSignChallenge, peerIDAuthClientStateVerifyAndSignChallenge:
+ headerVal = []byte(header.Get("WWW-Authenticate"))
+ case peerIDAuthClientStateVerifyChallenge, peerIDAuthClientStateWaitingForBearer:
+ headerVal = []byte(header.Get("Authentication-Info"))
+ }
+
+ if len(headerVal) == 0 {
+ return errMissingChallenge
+ }
+
+ err := h.p.parsePeerIDAuthSchemeParams(headerVal)
+ if err != nil {
+ return err
+ }
+
+ if h.serverPubKey == nil && len(h.p.publicKeyB64) > 0 {
+ serverPubKeyBytes, err := base64.URLEncoding.AppendDecode(nil, h.p.publicKeyB64)
+ if err != nil {
+ return err
+ }
+ h.serverPubKey, err = crypto.UnmarshalPublicKey(serverPubKeyBytes)
+ if err != nil {
+ return err
+ }
+ h.serverPeerID, err = peer.IDFromPublicKey(h.serverPubKey)
+ if err != nil {
+ return err
+ }
+ }
+
+ return err
+}
+
+func (h *PeerIDAuthHandshakeClient) Run() error {
+ if h.state == peerIDAuthClientStateDone {
+ return nil
+ }
+
+ h.hb.clear()
+ clientPubKeyBytes, err := crypto.MarshalPublicKey(h.PrivKey.GetPublic())
+ if err != nil {
+ return err
+ }
+ switch h.state {
+ case peerIDAuthClientInitiateChallenge:
+ h.hb.writeScheme(PeerIDAuthScheme)
+ h.addChallengeServerParam()
+ h.hb.writeParamB64(nil, "public-key", clientPubKeyBytes)
+ h.state = peerIDAuthClientStateVerifyAndSignChallenge
+ return nil
+ case peerIDAuthClientStateVerifyAndSignChallenge:
+ if len(h.p.sigB64) == 0 && len(h.p.challengeClient) != 0 {
+ // The server refused a client initiated handshake, so we need run the server initiated handshake
+ h.state = peerIDAuthClientStateSignChallenge
+ return h.Run()
+ }
+ if err := h.verifySig(clientPubKeyBytes); err != nil {
+ return err
+ }
+
+ h.hb.writeScheme(PeerIDAuthScheme)
+ h.hb.writeParam("opaque", h.p.opaqueB64)
+ h.addSigParam()
+ h.state = peerIDAuthClientStateWaitingForBearer
+ return nil
+
+ case peerIDAuthClientStateWaitingForBearer:
+ h.hb.writeScheme(PeerIDAuthScheme)
+ h.hb.writeParam("bearer", h.p.bearerTokenB64)
+ h.state = peerIDAuthClientStateDone
+ return nil
+
+ case peerIDAuthClientStateSignChallenge:
+ if len(h.p.challengeClient) < challengeLen {
+ return errors.New("challenge too short")
+ }
+
+ h.hb.writeScheme(PeerIDAuthScheme)
+ h.hb.writeParamB64(nil, "public-key", clientPubKeyBytes)
+ if err := h.addChallengeServerParam(); err != nil {
+ return err
+ }
+ if err := h.addSigParam(); err != nil {
+ return err
+ }
+ h.hb.writeParam("opaque", h.p.opaqueB64)
+
+ h.state = peerIDAuthClientStateVerifyChallenge
+ return nil
+ case peerIDAuthClientStateVerifyChallenge:
+ if err := h.verifySig(clientPubKeyBytes); err != nil {
+ return err
+ }
+
+ h.hb.writeScheme(PeerIDAuthScheme)
+ h.hb.writeParam("bearer", h.p.bearerTokenB64)
+ h.state = peerIDAuthClientStateDone
+
+ return nil
+ }
+
+ return errors.New("unhandled state")
+}
+
+func (h *PeerIDAuthHandshakeClient) addChallengeServerParam() error {
+ _, err := io.ReadFull(randReader, h.buf[:challengeLen])
+ if err != nil {
+ return err
+ }
+ h.challengeServer = base64.URLEncoding.AppendEncode(nil, h.buf[:challengeLen])
+ clear(h.buf[:challengeLen])
+ h.hb.writeParam("challenge-server", h.challengeServer)
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeClient) verifySig(clientPubKeyBytes []byte) error {
+ if len(h.p.sigB64) == 0 {
+ return errors.New("signature not set")
+ }
+ sig, err := base64.URLEncoding.AppendDecode(nil, h.p.sigB64)
+ if err != nil {
+ return fmt.Errorf("failed to decode signature: %w", err)
+ }
+ err = verifySig(h.serverPubKey, PeerIDAuthScheme, []sigParam{
+ {"challenge-server", h.challengeServer},
+ {"client-public-key", clientPubKeyBytes},
+ {"hostname", []byte(h.Hostname)},
+ }, sig)
+ return err
+}
+
+func (h *PeerIDAuthHandshakeClient) addSigParam() error {
+ if h.serverPubKey == nil {
+ return errors.New("server public key not set")
+ }
+ serverPubKeyBytes, err := crypto.MarshalPublicKey(h.serverPubKey)
+ if err != nil {
+ return err
+ }
+ clientSig, err := sign(h.PrivKey, PeerIDAuthScheme, []sigParam{
+ {"challenge-client", h.p.challengeClient},
+ {"server-public-key", serverPubKeyBytes},
+ {"hostname", []byte(h.Hostname)},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to sign challenge: %w", err)
+ }
+ h.hb.writeParamB64(nil, "sig", clientSig)
+ return nil
+
+}
+
+// PeerID returns the peer ID of the authenticated client.
+func (h *PeerIDAuthHandshakeClient) PeerID() (peer.ID, error) {
+ switch h.state {
+ case peerIDAuthClientStateDone:
+ case peerIDAuthClientStateWaitingForBearer:
+ default:
+ return "", errors.New("server not authenticated yet")
+ }
+
+ if h.serverPeerID == "" {
+ return "", errors.New("peer ID not set")
+ }
+ return h.serverPeerID, nil
+}
+
+func (h *PeerIDAuthHandshakeClient) AddHeader(hdr http.Header) {
+ hdr.Set("Authorization", h.hb.b.String())
+}
+
+// BearerToken returns the server given bearer token for the client. Set this on
+// the Authorization header in the client's request.
+func (h *PeerIDAuthHandshakeClient) BearerToken() string {
+ if h.state != peerIDAuthClientStateDone {
+ return ""
+ }
+ return h.hb.b.String()
+}
+
+func (h *PeerIDAuthHandshakeClient) ServerAuthenticated() bool {
+ switch h.state {
+ case peerIDAuthClientStateDone:
+ case peerIDAuthClientStateWaitingForBearer:
+ default:
+ return false
+ }
+
+ return h.serverPeerID != ""
+}
+
+func (h *PeerIDAuthHandshakeClient) HandshakeDone() bool {
+ return h.state == peerIDAuthClientStateDone
+}
diff --git a/p2p/http/auth/internal/handshake/handshake.go b/p2p/http/auth/internal/handshake/handshake.go
new file mode 100644
index 0000000000..1c237ae3a3
--- /dev/null
+++ b/p2p/http/auth/internal/handshake/handshake.go
@@ -0,0 +1,218 @@
+package handshake
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+
+ pool "github.com/libp2p/go-buffer-pool"
+)
+
+const PeerIDAuthScheme = "libp2p-PeerID"
+const challengeLen = 32
+const maxHeaderSize = 2048
+
+var peerIDAuthSchemeBytes = []byte(PeerIDAuthScheme)
+
+var errTooBig = errors.New("header value too big")
+var errInvalid = errors.New("invalid header value")
+var errNotRan = errors.New("not ran. call Run() first")
+
+var randReader = rand.Reader // A var so it can be changed in tests
+var nowFn = time.Now // A var so it can be changed in tests
+
+// params represent params passed in via headers. All []byte fields to avoid allocations.
+type params struct {
+ bearerTokenB64 []byte
+ challengeClient []byte
+ challengeServer []byte
+ opaqueB64 []byte
+ publicKeyB64 []byte
+ sigB64 []byte
+}
+
+// parsePeerIDAuthSchemeParams parses the parameters of the PeerID auth scheme
+// from the header string. zero alloc.
+func (p *params) parsePeerIDAuthSchemeParams(headerVal []byte) error {
+ if len(headerVal) > maxHeaderSize {
+ return errTooBig
+ }
+ startIdx := bytes.Index(headerVal, peerIDAuthSchemeBytes)
+ if startIdx == -1 {
+ return nil
+ }
+
+ headerVal = headerVal[startIdx+len(PeerIDAuthScheme):]
+ advance, token, err := splitAuthHeaderParams(headerVal, true)
+ for ; err == nil; advance, token, err = splitAuthHeaderParams(headerVal, true) {
+ headerVal = headerVal[advance:]
+ bs := token
+ splitAt := bytes.Index(bs, []byte("="))
+ if splitAt == -1 {
+ return errInvalid
+ }
+ kB := bs[:splitAt]
+ v := bs[splitAt+1:]
+ if len(v) < 2 || v[0] != '"' || v[len(v)-1] != '"' {
+ return errInvalid
+ }
+ v = v[1 : len(v)-1] // drop quotes
+ switch string(kB) {
+ case "bearer":
+ p.bearerTokenB64 = v
+ case "challenge-client":
+ p.challengeClient = v
+ case "challenge-server":
+ p.challengeServer = v
+ case "opaque":
+ p.opaqueB64 = v
+ case "public-key":
+ p.publicKeyB64 = v
+ case "sig":
+ p.sigB64 = v
+ }
+ }
+ if err == bufio.ErrFinalToken {
+ err = nil
+ }
+ return err
+}
+
+func splitAuthHeaderParams(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if len(data) == 0 && atEOF {
+ return 0, nil, bufio.ErrFinalToken
+ }
+
+ start := 0
+ for start < len(data) && (data[start] == ' ' || data[start] == ',') {
+ // Ignore leading spaces and commas
+ start++
+ }
+ if start == len(data) {
+ return len(data), nil, nil
+ }
+ end := start + 1
+ for end < len(data) && data[end] != ' ' && data[end] != ',' {
+ // Consume until we hit a space or comma
+ end++
+ }
+ token = data[start:end]
+ if !bytes.ContainsAny(token, "=") {
+ // This isn't a param. It's likely the next scheme. We're done
+ return len(data), nil, bufio.ErrFinalToken
+ }
+
+ return end, token, nil
+}
+
+type headerBuilder struct {
+ b strings.Builder
+ pastFirstField bool
+}
+
+func (h *headerBuilder) clear() {
+ h.b.Reset()
+ h.pastFirstField = false
+}
+
+func (h *headerBuilder) writeScheme(scheme string) {
+ h.b.WriteString(scheme)
+ h.b.WriteByte(' ')
+}
+
+func (h *headerBuilder) maybeAddComma() {
+ if !h.pastFirstField {
+ h.pastFirstField = true
+ return
+ }
+ h.b.WriteString(", ")
+}
+
+// writeParam writes a key value pair to the header. It first b64 encodes the
+// value. It uses buf as scratch space.
+func (h *headerBuilder) writeParamB64(buf []byte, key string, val []byte) {
+ if buf == nil {
+ buf = make([]byte, base64.URLEncoding.EncodedLen(len(val)))
+ }
+ encodedVal := base64.URLEncoding.AppendEncode(buf[:0], val)
+ h.writeParam(key, encodedVal)
+}
+
+// writeParam writes a key value pair to the header. It writes the val as-is.
+func (h *headerBuilder) writeParam(key string, val []byte) {
+ if len(val) == 0 {
+ return
+ }
+ h.maybeAddComma()
+
+ h.b.Grow(len(key) + len(`="`) + len(val) + 1)
+ // Not doing fmt.Fprintf here to avoid one allocation
+ h.b.WriteString(key)
+ h.b.WriteString(`="`)
+ h.b.Write(val)
+ h.b.WriteByte('"')
+}
+
+type sigParam struct {
+ k string
+ v []byte
+}
+
+func verifySig(publicKey crypto.PubKey, prefix string, signedParts []sigParam, sig []byte) error {
+ if publicKey == nil {
+ return fmt.Errorf("no public key to verify signature")
+ }
+
+ b := pool.Get(4096)
+ defer pool.Put(b)
+ buf, err := genDataToSign(b[:0], prefix, signedParts)
+ if err != nil {
+ return fmt.Errorf("failed to generate signed data: %w", err)
+ }
+ ok, err := publicKey.Verify(buf, sig)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return fmt.Errorf("signature verification failed")
+ }
+
+ return nil
+}
+
+func sign(privKey crypto.PrivKey, prefix string, partsToSign []sigParam) ([]byte, error) {
+ if privKey == nil {
+ return nil, fmt.Errorf("no private key available to sign")
+ }
+ b := pool.Get(4096)
+ defer pool.Put(b)
+ buf, err := genDataToSign(b[:0], prefix, partsToSign)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate data to sign: %w", err)
+ }
+ return privKey.Sign(buf)
+}
+
+func genDataToSign(buf []byte, prefix string, parts []sigParam) ([]byte, error) {
+ // Sort the parts in lexicographic order
+ slices.SortFunc(parts, func(a, b sigParam) int {
+ return strings.Compare(a.k, b.k)
+ })
+ buf = append(buf, prefix...)
+ for _, p := range parts {
+ buf = binary.AppendUvarint(buf, uint64(len(p.k)+1+len(p.v))) // +1 for '='
+ buf = append(buf, p.k...)
+ buf = append(buf, '=')
+ buf = append(buf, p.v...)
+ }
+ return buf, nil
+}
diff --git a/p2p/http/auth/internal/handshake/handshake_test.go b/p2p/http/auth/internal/handshake/handshake_test.go
new file mode 100644
index 0000000000..ce22cd0b12
--- /dev/null
+++ b/p2p/http/auth/internal/handshake/handshake_test.go
@@ -0,0 +1,652 @@
+package handshake
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHandshake(t *testing.T) {
+ for _, clientInitiated := range []bool{true, false} {
+ t.Run(fmt.Sprintf("clientInitiated=%t", clientInitiated), func(t *testing.T) {
+ hostname := "example.com"
+ serverPriv, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+ clientPriv, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+
+ serverHandshake := PeerIDAuthHandshakeServer{
+ Hostname: hostname,
+ PrivKey: serverPriv,
+ TokenTTL: time.Hour,
+ Hmac: hmac.New(sha256.New, make([]byte, 32)),
+ }
+
+ clientHandshake := PeerIDAuthHandshakeClient{
+ Hostname: hostname,
+ PrivKey: clientPriv,
+ }
+ if clientInitiated {
+ clientHandshake.state = peerIDAuthClientInitiateChallenge
+ }
+
+ headers := make(http.Header)
+
+ // Start the handshake
+ if !clientInitiated {
+ require.NoError(t, serverHandshake.ParseHeaderVal(nil))
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+ }
+
+ // Server Inititated: Client receives the challenge and signs it. Also sends the challenge server
+ // Client Inititated: Client forms the challenge and sends it
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+
+ // Server Inititated: Server receives the sig and verifies it. Also signs the challenge-server (client authenticated)
+ // Client Inititated: Server receives the challenge and signs it. Also sends the challenge-client
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal([]byte(headers.Get("Authorization"))))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ // Server Inititated: Client verifies sig and sets the bearer token for future requests (server authenticated)
+ // Client Inititated: Client verifies sig, and signs challenge. Sends it along with any application data (server authenticated)
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+
+ // Server Inititated: Server verifies the bearer token
+ // Client Inititated: Server verifies the sig, sets the bearer token (client authenticated)
+ // and processes any application data
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal([]byte(headers.Get("Authorization"))))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ expectedClientPeerID, _ := peer.IDFromPrivateKey(clientPriv)
+ expectedServerPeerID, _ := peer.IDFromPrivateKey(serverPriv)
+ clientPeerID, err := serverHandshake.PeerID()
+ require.NoError(t, err)
+ require.Equal(t, expectedClientPeerID, clientPeerID)
+
+ serverPeerID, err := clientHandshake.PeerID()
+ require.NoError(t, err)
+ require.Equal(t, expectedServerPeerID, serverPeerID)
+ })
+ }
+}
+
+func TestServerRefusesClientInitiatedHandshake(t *testing.T) {
+ hostname := "example.com"
+ serverPriv, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+ clientPriv, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+
+ serverHandshake := PeerIDAuthHandshakeServer{
+ Hostname: hostname,
+ PrivKey: serverPriv,
+ TokenTTL: time.Hour,
+ Hmac: hmac.New(sha256.New, make([]byte, 32)),
+ }
+
+ clientHandshake := PeerIDAuthHandshakeClient{
+ Hostname: hostname,
+ PrivKey: clientPriv,
+ }
+ clientHandshake.SetInitiateChallenge()
+
+ headers := make(http.Header)
+ // Client initiates the handshake
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+
+ // Server receives the challenge-server, but chooses to reject it (simulating this by not passing the challenge)
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal(nil))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ // Client now runs the server-initiated handshake. Signs challenge-client; sends challenge-server
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+
+ // Server verifies the challenge-client and signs the challenge-server
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal([]byte(headers.Get("Authorization"))))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ // Client verifies the challenge-server and sets the bearer token
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+
+ expectedClientPeerID, _ := peer.IDFromPrivateKey(clientPriv)
+ expectedServerPeerID, _ := peer.IDFromPrivateKey(serverPriv)
+ clientPeerID, err := serverHandshake.PeerID()
+ require.NoError(t, err)
+ require.Equal(t, expectedClientPeerID, clientPeerID)
+
+ serverPeerID, err := clientHandshake.PeerID()
+ require.NoError(t, err)
+ require.True(t, clientHandshake.HandshakeDone())
+ require.Equal(t, expectedServerPeerID, serverPeerID)
+}
+
+func BenchmarkServerHandshake(b *testing.B) {
+ clientHeader1 := make(http.Header)
+ clientHeader2 := make(http.Header)
+ headers := make(http.Header)
+
+ hostname := "example.com"
+ serverPriv, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+ clientPriv, _, _ := crypto.GenerateEd25519Key(rand.Reader)
+
+ serverHandshake := PeerIDAuthHandshakeServer{
+ Hostname: hostname,
+ PrivKey: serverPriv,
+ TokenTTL: time.Hour,
+ Hmac: hmac.New(sha256.New, make([]byte, 32)),
+ }
+
+ clientHandshake := PeerIDAuthHandshakeClient{
+ Hostname: hostname,
+ PrivKey: clientPriv,
+ }
+ require.NoError(b, serverHandshake.ParseHeaderVal(nil))
+ require.NoError(b, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ // Client receives the challenge and signs it. Also sends the challenge server
+ require.NoError(b, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(b, clientHandshake.Run())
+ clientHandshake.AddHeader(clientHeader1)
+
+ // Server receives the sig and verifies it. Also signs the challenge server
+ serverHandshake.Reset()
+ require.NoError(b, serverHandshake.ParseHeaderVal([]byte(clientHeader1.Get("Authorization"))))
+ clear(headers)
+ require.NoError(b, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ // Client verifies sig and sets the bearer token for future requests
+ require.NoError(b, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(b, clientHandshake.Run())
+ clientHandshake.AddHeader(clientHeader2)
+
+ // Server verifies the bearer token
+ serverHandshake.Reset()
+ require.NoError(b, serverHandshake.ParseHeaderVal([]byte(clientHeader2.Get("Authorization"))))
+ clear(headers)
+ require.NoError(b, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+
+ initialClientAuth := []byte(clientHeader1.Get("Authorization"))
+ bearerClientAuth := []byte(clientHeader2.Get("Authorization"))
+ _ = initialClientAuth
+ _ = bearerClientAuth
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ serverHandshake.Reset()
+ serverHandshake.ParseHeaderVal(nil)
+ serverHandshake.Run()
+
+ serverHandshake.Reset()
+ serverHandshake.ParseHeaderVal(initialClientAuth)
+ serverHandshake.Run()
+
+ serverHandshake.Reset()
+ serverHandshake.ParseHeaderVal(bearerClientAuth)
+ serverHandshake.Run()
+ }
+
+}
+
+func TestParsePeerIDAuthSchemeParams(t *testing.T) {
+ str := `libp2p-PeerID sig="", public-key="", bearer=""`
+ p := params{}
+ expectedParam := params{
+ sigB64: []byte(``),
+ publicKeyB64: []byte(``),
+ bearerTokenB64: []byte(``),
+ }
+ err := p.parsePeerIDAuthSchemeParams([]byte(str))
+ require.NoError(t, err)
+ require.Equal(t, expectedParam, p)
+}
+
+func BenchmarkParsePeerIDAuthSchemeParams(b *testing.B) {
+ str := []byte(`libp2p-PeerID peer-id="", sig="", public-key="", bearer=""`)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p := params{}
+ err := p.parsePeerIDAuthSchemeParams(str)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestHeaderBuilder(t *testing.T) {
+ hb := headerBuilder{}
+ hb.writeScheme(PeerIDAuthScheme)
+ hb.writeParam("peer-id", []byte("foo"))
+ hb.writeParam("challenge-client", []byte("something-else"))
+ hb.writeParam("hostname", []byte("example.com"))
+
+ expected := `libp2p-PeerID peer-id="foo", challenge-client="something-else", hostname="example.com"`
+ require.Equal(t, expected, hb.b.String())
+}
+
+func BenchmarkHeaderBuilder(b *testing.B) {
+ h := headerBuilder{}
+ scratch := make([]byte, 256)
+ scratch = scratch[:0]
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ h.b.Grow(256)
+ h.writeParamB64(scratch, "foo", []byte("bar"))
+ h.clear()
+ }
+}
+
+// Test Vectors
+var zeroBytes = make([]byte, 64)
+var zeroKey, _, _ = crypto.GenerateEd25519Key(bytes.NewReader(zeroBytes))
+
+// Peer ID derived from the zero key
+var zeroID, _ = peer.IDFromPublicKey(zeroKey.GetPublic())
+
+func TestOpaqueStateRoundTrip(t *testing.T) {
+ zeroBytes := [32]byte{}
+
+ // To drop the monotonic clock reading
+ timeAfterUnmarshal := time.Now()
+ b, err := json.Marshal(timeAfterUnmarshal)
+ require.NoError(t, err)
+ require.NoError(t, json.Unmarshal(b, &timeAfterUnmarshal))
+ hmac := hmac.New(sha256.New, zeroBytes[:])
+
+ o := opaqueState{
+ ChallengeClient: "foo-bar",
+ CreatedTime: timeAfterUnmarshal,
+ IsToken: true,
+ PeerID: zeroID,
+ Hostname: "example.com",
+ }
+
+ hmac.Reset()
+ b, err = o.Marshal(hmac, nil)
+ require.NoError(t, err)
+
+ o2 := opaqueState{}
+
+ hmac.Reset()
+ err = o2.Unmarshal(hmac, b)
+ require.NoError(t, err)
+ require.EqualValues(t, o, o2)
+}
+
+func FuzzServerHandshakeNoPanic(f *testing.F) {
+ zeroBytes := [32]byte{}
+ hmac := hmac.New(sha256.New, zeroBytes[:])
+
+ f.Fuzz(func(_ *testing.T, data []byte) {
+ hmac.Reset()
+ h := PeerIDAuthHandshakeServer{
+ Hostname: "example.com",
+ PrivKey: zeroKey,
+ Hmac: hmac,
+ }
+ err := h.ParseHeaderVal(data)
+ if err != nil {
+ return
+ }
+ err = h.Run()
+ if err != nil {
+ return
+ }
+ h.PeerID()
+ })
+}
+
+func BenchmarkOpaqueStateWrite(b *testing.B) {
+ zeroBytes := [32]byte{}
+ hmac := hmac.New(sha256.New, zeroBytes[:])
+ o := opaqueState{
+ ChallengeClient: "foo-bar",
+ CreatedTime: time.Now(),
+ }
+ d := make([]byte, 512)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ hmac.Reset()
+ _, err := o.Marshal(hmac, d[:0])
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkOpaqueStateRead(b *testing.B) {
+ zeroBytes := [32]byte{}
+ hmac := hmac.New(sha256.New, zeroBytes[:])
+ o := opaqueState{
+ ChallengeClient: "foo-bar",
+ CreatedTime: time.Now(),
+ }
+ d := make([]byte, 256)
+ d, err := o.Marshal(hmac, d[:0])
+ require.NoError(b, err)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ hmac.Reset()
+ err := o.Unmarshal(hmac, d)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func FuzzParsePeerIDAuthSchemeParamsNoPanic(f *testing.F) {
+ p := params{}
+ // Just check that we don't panic
+ f.Fuzz(func(_ *testing.T, data []byte) {
+ p.parsePeerIDAuthSchemeParams(data)
+ })
+}
+
+type specsExampleParameters struct {
+ hostname string
+ serverPriv crypto.PrivKey
+ serverHmacKey [32]byte
+ clientPriv crypto.PrivKey
+}
+
+func TestSpecsExample(t *testing.T) {
+ originalRandReader := randReader
+ originalNowFn := nowFn
+ randReader = bytes.NewReader(append(
+ bytes.Repeat([]byte{0x11}, 32),
+ bytes.Repeat([]byte{0x33}, 32)...,
+ ))
+ nowFn = func() time.Time {
+ return time.Unix(0, 0)
+ }
+ defer func() {
+ randReader = originalRandReader
+ nowFn = originalNowFn
+ }()
+
+ parameters := specsExampleParameters{
+ hostname: "example.com",
+ }
+ serverPrivBytes, err := hex.AppendDecode(nil, []byte("0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"))
+ require.NoError(t, err)
+ clientPrivBytes, err := hex.AppendDecode(nil, []byte("0801124002020202020202020202020202020202020202020202020202020202020202028139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394"))
+ require.NoError(t, err)
+
+ parameters.serverPriv, err = crypto.UnmarshalPrivateKey(serverPrivBytes)
+ require.NoError(t, err)
+
+ parameters.clientPriv, err = crypto.UnmarshalPrivateKey(clientPrivBytes)
+ require.NoError(t, err)
+
+ serverHandshake := PeerIDAuthHandshakeServer{
+ Hostname: parameters.hostname,
+ PrivKey: parameters.serverPriv,
+ TokenTTL: time.Hour,
+ Hmac: hmac.New(sha256.New, parameters.serverHmacKey[:]),
+ }
+
+ clientHandshake := PeerIDAuthHandshakeClient{
+ Hostname: parameters.hostname,
+ PrivKey: parameters.clientPriv,
+ }
+
+ headers := make(http.Header)
+
+ // Start the handshake
+ require.NoError(t, serverHandshake.ParseHeaderVal(nil))
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+ initialWWWAuthenticate := headers.Get("WWW-Authenticate")
+
+ // Client receives the challenge and signs it. Also sends the challenge server
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+ clientAuthentication := headers.Get("Authorization")
+
+ // Server receives the sig and verifies it. Also signs the challenge server
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal([]byte(headers.Get("Authorization"))))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+ serverAuthentication := headers.Get("Authentication-Info")
+
+ // Client verifies sig and sets the bearer token for future requests
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+ clientBearerToken := headers.Get("Authorization")
+
+ params := params{}
+ params.parsePeerIDAuthSchemeParams([]byte(initialWWWAuthenticate))
+ challengeClient := params.challengeClient
+ params.parsePeerIDAuthSchemeParams([]byte(clientAuthentication))
+ challengeServer := params.challengeServer
+
+ fmt.Println("### Parameters")
+ fmt.Println("| Parameter | Value |")
+ fmt.Println("| --- | --- |")
+ fmt.Printf("| hostname | %s |\n", parameters.hostname)
+ fmt.Printf("| Server Private Key (pb encoded as hex) | %s |\n", hex.EncodeToString(serverPrivBytes))
+ fmt.Printf("| Server HMAC Key (hex) | %s |\n", hex.EncodeToString(parameters.serverHmacKey[:]))
+ fmt.Printf("| Challenge Client | %s |\n", string(challengeClient))
+ fmt.Printf("| Client Private Key (pb encoded as hex) | %s |\n", hex.EncodeToString(clientPrivBytes))
+ fmt.Printf("| Challenge Server | %s |\n", string(challengeServer))
+ fmt.Printf("| \"Now\" time | %s |\n", nowFn())
+ fmt.Println()
+ fmt.Println("### Handshake Diagram")
+
+ fmt.Println("```mermaid")
+ fmt.Printf(`sequenceDiagram
+Client->>Server: Initial request
+Server->>Client: WWW-Authenticate=%s
+Client->>Server: Authorization=%s
+Note left of Server: Server has authenticated Client
+Server->>Client: Authentication-Info=%s
+Note right of Client: Client has authenticated Server
+
+Note over Client: Future requests use the bearer token
+Client->>Server: Authorization=%s
+`, initialWWWAuthenticate, clientAuthentication, serverAuthentication, clientBearerToken)
+ fmt.Println("```")
+
+}
+
+func TestSpecsClientInitiatedExample(t *testing.T) {
+ originalRandReader := randReader
+ originalNowFn := nowFn
+ randReader = bytes.NewReader(append(
+ bytes.Repeat([]byte{0x33}, 32),
+ bytes.Repeat([]byte{0x11}, 32)...,
+ ))
+ nowFn = func() time.Time {
+ return time.Unix(0, 0)
+ }
+ defer func() {
+ randReader = originalRandReader
+ nowFn = originalNowFn
+ }()
+
+ parameters := specsExampleParameters{
+ hostname: "example.com",
+ }
+ serverPrivBytes, err := hex.AppendDecode(nil, []byte("0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"))
+ require.NoError(t, err)
+ clientPrivBytes, err := hex.AppendDecode(nil, []byte("0801124002020202020202020202020202020202020202020202020202020202020202028139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394"))
+ require.NoError(t, err)
+
+ parameters.serverPriv, err = crypto.UnmarshalPrivateKey(serverPrivBytes)
+ require.NoError(t, err)
+
+ parameters.clientPriv, err = crypto.UnmarshalPrivateKey(clientPrivBytes)
+ require.NoError(t, err)
+
+ serverHandshake := PeerIDAuthHandshakeServer{
+ Hostname: parameters.hostname,
+ PrivKey: parameters.serverPriv,
+ TokenTTL: time.Hour,
+ Hmac: hmac.New(sha256.New, parameters.serverHmacKey[:]),
+ }
+
+ clientHandshake := PeerIDAuthHandshakeClient{
+ Hostname: parameters.hostname,
+ PrivKey: parameters.clientPriv,
+ }
+
+ headers := make(http.Header)
+
+ // Start the handshake
+ clientHandshake.SetInitiateChallenge()
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+ clientChallenge := headers.Get("Authorization")
+
+ // Server receives the challenge and signs it. Also sends challenge-client
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal([]byte(headers.Get("Authorization"))))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+ serverAuthentication := headers.Get("WWW-Authenticate")
+ params := params{}
+ params.parsePeerIDAuthSchemeParams([]byte(serverAuthentication))
+ challengeClient := params.challengeClient
+
+ // Client verifies sig and signs the challenge-client
+ require.NoError(t, clientHandshake.ParseHeader(headers))
+ clear(headers)
+ require.NoError(t, clientHandshake.Run())
+ clientHandshake.AddHeader(headers)
+ clientAuthentication := headers.Get("Authorization")
+
+ // Server verifies sig and sets the bearer token
+ serverHandshake.Reset()
+ require.NoError(t, serverHandshake.ParseHeaderVal([]byte(headers.Get("Authorization"))))
+ clear(headers)
+ require.NoError(t, serverHandshake.Run())
+ serverHandshake.SetHeader(headers)
+ serverReplayWithBearer := headers.Get("Authentication-Info")
+
+ params.parsePeerIDAuthSchemeParams([]byte(clientChallenge))
+ challengeServer := params.challengeServer
+
+ fmt.Println("### Parameters")
+ fmt.Println("| Parameter | Value |")
+ fmt.Println("| --- | --- |")
+ fmt.Printf("| hostname | %s |\n", parameters.hostname)
+ fmt.Printf("| Server Private Key (pb encoded as hex) | %s |\n", hex.EncodeToString(serverPrivBytes))
+ fmt.Printf("| Server HMAC Key (hex) | %s |\n", hex.EncodeToString(parameters.serverHmacKey[:]))
+ fmt.Printf("| Challenge Client | %s |\n", string(challengeClient))
+ fmt.Printf("| Client Private Key (pb encoded as hex) | %s |\n", hex.EncodeToString(clientPrivBytes))
+ fmt.Printf("| Challenge Server | %s |\n", string(challengeServer))
+ fmt.Printf("| \"Now\" time | %s |\n", nowFn())
+ fmt.Println()
+ fmt.Println("### Handshake Diagram")
+
+ fmt.Println("```mermaid")
+ fmt.Printf(`sequenceDiagram
+Client->>Server: Authorization=%s
+Server->>Client: WWW-Authenticate=%s
+Note right of Client: Client has authenticated Server
+
+Client->>Server: Authorization=%s
+Note left of Server: Server has authenticated Client
+Server->>Client: Authentication-Info=%s
+Note over Client: Future requests use the bearer token
+`, clientChallenge, serverAuthentication, clientAuthentication, serverReplayWithBearer)
+ fmt.Println("```")
+
+}
+
+func TestSigningExample(t *testing.T) {
+ serverPrivBytes, err := hex.AppendDecode(nil, []byte("0801124001010101010101010101010101010101010101010101010101010101010101018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c"))
+ require.NoError(t, err)
+ serverPriv, err := crypto.UnmarshalPrivateKey(serverPrivBytes)
+ require.NoError(t, err)
+ clientPrivBytes, err := hex.AppendDecode(nil, []byte("0801124002020202020202020202020202020202020202020202020202020202020202028139770ea87d175f56a35466c34c7ecccb8d8a91b4ee37a25df60f5b8fc9b394"))
+ require.NoError(t, err)
+ clientPriv, err := crypto.UnmarshalPrivateKey(clientPrivBytes)
+ require.NoError(t, err)
+ clientPubKeyBytes, err := crypto.MarshalPublicKey(clientPriv.GetPublic())
+ require.NoError(t, err)
+
+ require.NoError(t, err)
+ challenge := "ERERERERERERERERERERERERERERERERERERERERERE="
+
+ hostname := "example.com"
+ dataToSign, err := genDataToSign(nil, PeerIDAuthScheme, []sigParam{
+ {"challenge-server", []byte(challenge)},
+ {"client-public-key", clientPubKeyBytes},
+ {"hostname", []byte(hostname)},
+ })
+ require.NoError(t, err)
+
+ sig, err := sign(serverPriv, PeerIDAuthScheme, []sigParam{
+ {"challenge-server", []byte(challenge)},
+ {"client-public-key", clientPubKeyBytes},
+ {"hostname", []byte(hostname)},
+ })
+ require.NoError(t, err)
+
+ fmt.Println("### Signing Example")
+
+ fmt.Println("| Parameter | Value |")
+ fmt.Println("| --- | --- |")
+ fmt.Printf("| hostname | %s |\n", hostname)
+ fmt.Printf("| Server Private Key (pb encoded as hex) | %s |\n", hex.EncodeToString(serverPrivBytes))
+ fmt.Printf("| challenge-server | %s |\n", string(challenge))
+ fmt.Printf("| Client Public Key (pb encoded as hex) | %s |\n", hex.EncodeToString(clientPubKeyBytes))
+ fmt.Printf("| data to sign ([percent encoded](https://datatracker.ietf.org/doc/html/rfc3986#section-2.1)) | %s |\n", url.PathEscape(string(dataToSign)))
+ fmt.Printf("| data to sign (hex encoded) | %s |\n", hex.EncodeToString(dataToSign))
+ fmt.Printf("| signature (base64 encoded) | %s |\n", base64.URLEncoding.EncodeToString(sig))
+ fmt.Println()
+
+ fmt.Println("Note that the `=` after the libp2p-PeerID scheme is actually the varint length of the challenge-server parameter.")
+
+}
diff --git a/p2p/http/auth/internal/handshake/server.go b/p2p/http/auth/internal/handshake/server.go
new file mode 100644
index 0000000000..6b84038d93
--- /dev/null
+++ b/p2p/http/auth/internal/handshake/server.go
@@ -0,0 +1,373 @@
+package handshake
+
+import (
+ "crypto/hmac"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+var (
+ ErrExpiredChallenge = errors.New("challenge expired")
+ ErrExpiredToken = errors.New("token expired")
+ ErrInvalidHMAC = errors.New("invalid HMAC")
+)
+
+const challengeTTL = 5 * time.Minute
+
+type peerIDAuthServerState int
+
+const (
+ // Server initiated
+ peerIDAuthServerStateChallengeClient peerIDAuthServerState = iota
+ peerIDAuthServerStateVerifyChallenge
+ peerIDAuthServerStateVerifyBearer
+
+ // Client initiated
+ peerIDAuthServerStateSignChallenge
+)
+
+type opaqueState struct {
+ IsToken bool `json:"is-token,omitempty"`
+ ClientPublicKey []byte `json:"client-public-key,omitempty"`
+ PeerID peer.ID `json:"peer-id,omitempty"`
+ ChallengeClient string `json:"challenge-client,omitempty"`
+ Hostname string `json:"hostname"`
+ CreatedTime time.Time `json:"created-time"`
+}
+
+// Marshal serializes the state by appending it to the byte slice.
+func (o *opaqueState) Marshal(hmac hash.Hash, b []byte) ([]byte, error) {
+ hmac.Reset()
+ fieldsMarshalled, err := json.Marshal(o)
+ if err != nil {
+ return b, err
+ }
+ _, err = hmac.Write(fieldsMarshalled)
+ if err != nil {
+ return b, err
+ }
+ b = hmac.Sum(b)
+ b = append(b, fieldsMarshalled...)
+ return b, nil
+}
+
+func (o *opaqueState) Unmarshal(hmacImpl hash.Hash, d []byte) error {
+ hmacImpl.Reset()
+ if len(d) < hmacImpl.Size() {
+ return ErrInvalidHMAC
+ }
+ hmacVal := d[:hmacImpl.Size()]
+ fields := d[hmacImpl.Size():]
+ _, err := hmacImpl.Write(fields)
+ if err != nil {
+ return err
+ }
+ expectedHmac := hmacImpl.Sum(nil)
+ if !hmac.Equal(hmacVal, expectedHmac) {
+ return ErrInvalidHMAC
+ }
+
+ err = json.Unmarshal(fields, &o)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+type PeerIDAuthHandshakeServer struct {
+ Hostname string
+ PrivKey crypto.PrivKey
+ TokenTTL time.Duration
+ // used to authenticate opaque blobs and tokens
+ Hmac hash.Hash
+
+ ran bool
+ buf [1024]byte
+
+ state peerIDAuthServerState
+ p params
+ hb headerBuilder
+
+ opaque opaqueState
+}
+
+var errInvalidHeader = errors.New("invalid header")
+
+func (h *PeerIDAuthHandshakeServer) Reset() {
+ h.Hmac.Reset()
+ h.ran = false
+ clear(h.buf[:])
+ h.state = 0
+ h.p = params{}
+ h.hb.clear()
+ h.opaque = opaqueState{}
+}
+
+func (h *PeerIDAuthHandshakeServer) ParseHeaderVal(headerVal []byte) error {
+ if len(headerVal) == 0 {
+ // We are in the initial state. Nothing to parse.
+ return nil
+ }
+ err := h.p.parsePeerIDAuthSchemeParams(headerVal)
+ if err != nil {
+ return err
+ }
+ switch {
+ case h.p.sigB64 != nil && h.p.opaqueB64 != nil:
+ h.state = peerIDAuthServerStateVerifyChallenge
+ case h.p.bearerTokenB64 != nil:
+ h.state = peerIDAuthServerStateVerifyBearer
+ case h.p.challengeServer != nil && h.p.publicKeyB64 != nil:
+ h.state = peerIDAuthServerStateSignChallenge
+ default:
+ return errInvalidHeader
+
+ }
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) Run() error {
+ h.ran = true
+ switch h.state {
+ case peerIDAuthServerStateSignChallenge:
+ h.hb.writeScheme(PeerIDAuthScheme)
+ if err := h.addChallengeClientParam(); err != nil {
+ return err
+ }
+ if err := h.addPublicKeyParam(); err != nil {
+ return err
+ }
+
+ publicKeyBytes, err := base64.URLEncoding.AppendDecode(nil, h.p.publicKeyB64)
+ if err != nil {
+ return err
+ }
+ h.opaque.ClientPublicKey = publicKeyBytes
+ if err := h.addServerSigParam(publicKeyBytes); err != nil {
+ return err
+ }
+ if err := h.addOpaqueParam(); err != nil {
+ return err
+ }
+ case peerIDAuthServerStateChallengeClient:
+ h.hb.writeScheme(PeerIDAuthScheme)
+ if err := h.addChallengeClientParam(); err != nil {
+ return err
+ }
+ if err := h.addPublicKeyParam(); err != nil {
+ return err
+ }
+ if err := h.addOpaqueParam(); err != nil {
+ return err
+ }
+ case peerIDAuthServerStateVerifyChallenge:
+ opaque, err := base64.URLEncoding.AppendDecode(h.buf[:0], h.p.opaqueB64)
+ if err != nil {
+ return err
+ }
+ err = h.opaque.Unmarshal(h.Hmac, opaque)
+ if err != nil {
+ return err
+ }
+
+ if nowFn().After(h.opaque.CreatedTime.Add(challengeTTL)) {
+ return ErrExpiredChallenge
+ }
+ if h.opaque.IsToken {
+ return errors.New("expected challenge, got token")
+ }
+
+ if h.Hostname != h.opaque.Hostname {
+ return errors.New("hostname in opaque mismatch")
+ }
+
+ var publicKeyBytes []byte
+ clientInitiatedHandshake := h.opaque.ClientPublicKey != nil
+
+ if clientInitiatedHandshake {
+ publicKeyBytes = h.opaque.ClientPublicKey
+ } else {
+ if len(h.p.publicKeyB64) == 0 {
+ return errors.New("missing public key")
+ }
+ var err error
+ publicKeyBytes, err = base64.URLEncoding.AppendDecode(nil, h.p.publicKeyB64)
+ if err != nil {
+ return err
+ }
+ }
+ pubKey, err := crypto.UnmarshalPublicKey(publicKeyBytes)
+ if err != nil {
+ return err
+ }
+ if err := h.verifySig(pubKey); err != nil {
+ return err
+ }
+
+ peerID, err := peer.IDFromPublicKey(pubKey)
+ if err != nil {
+ return err
+ }
+
+ // And create a bearer token for the client
+ h.opaque = opaqueState{
+ IsToken: true,
+ PeerID: peerID,
+ Hostname: h.Hostname,
+ CreatedTime: nowFn(),
+ }
+
+ h.hb.writeScheme(PeerIDAuthScheme)
+
+ if !clientInitiatedHandshake {
+ if err := h.addServerSigParam(publicKeyBytes); err != nil {
+ return err
+ }
+ }
+ if err := h.addBearerParam(); err != nil {
+ return err
+ }
+ case peerIDAuthServerStateVerifyBearer:
+ bearerToken, err := base64.URLEncoding.AppendDecode(h.buf[:0], h.p.bearerTokenB64)
+ if err != nil {
+ return err
+ }
+ err = h.opaque.Unmarshal(h.Hmac, bearerToken)
+ if err != nil {
+ return err
+ }
+
+ if !h.opaque.IsToken {
+ return errors.New("expected token, got challenge")
+ }
+
+ if nowFn().After(h.opaque.CreatedTime.Add(h.TokenTTL)) {
+ return ErrExpiredToken
+ }
+
+ return nil
+ default:
+ return errors.New("unhandled state")
+ }
+
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) addChallengeClientParam() error {
+ _, err := io.ReadFull(randReader, h.buf[:challengeLen])
+ if err != nil {
+ return err
+ }
+ encodedChallenge := base64.URLEncoding.AppendEncode(h.buf[challengeLen:challengeLen], h.buf[:challengeLen])
+ h.opaque.ChallengeClient = string(encodedChallenge)
+ h.opaque.Hostname = h.Hostname
+ h.opaque.CreatedTime = nowFn()
+ h.hb.writeParam("challenge-client", encodedChallenge)
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) addOpaqueParam() error {
+ opaqueVal, err := h.opaque.Marshal(h.Hmac, h.buf[:0])
+ if err != nil {
+ return err
+ }
+ h.hb.writeParamB64(h.buf[len(opaqueVal):], "opaque", opaqueVal)
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) addServerSigParam(clientPublicKeyBytes []byte) error {
+ if len(h.p.challengeServer) < challengeLen {
+ return errors.New("challenge too short")
+ }
+ serverSig, err := sign(h.PrivKey, PeerIDAuthScheme, []sigParam{
+ {"challenge-server", h.p.challengeServer},
+ {"client-public-key", clientPublicKeyBytes},
+ {"hostname", []byte(h.Hostname)},
+ })
+ if err != nil {
+ return fmt.Errorf("failed to sign challenge: %w", err)
+ }
+ h.hb.writeParamB64(h.buf[:], "sig", serverSig)
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) addBearerParam() error {
+ bearerToken, err := h.opaque.Marshal(h.Hmac, h.buf[:0])
+ if err != nil {
+ return err
+ }
+ h.hb.writeParamB64(h.buf[len(bearerToken):], "bearer", bearerToken)
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) addPublicKeyParam() error {
+ serverPubKey := h.PrivKey.GetPublic()
+ pubKeyBytes, err := crypto.MarshalPublicKey(serverPubKey)
+ if err != nil {
+ return err
+ }
+ h.hb.writeParamB64(h.buf[:], "public-key", pubKeyBytes)
+ return nil
+}
+
+func (h *PeerIDAuthHandshakeServer) verifySig(clientPubKey crypto.PubKey) error {
+ serverPubKey := h.PrivKey.GetPublic()
+ serverPubKeyBytes, err := crypto.MarshalPublicKey(serverPubKey)
+ if err != nil {
+ return err
+ }
+ sig, err := base64.URLEncoding.AppendDecode(h.buf[:0], h.p.sigB64)
+ if err != nil {
+ return fmt.Errorf("failed to decode signature: %w", err)
+ }
+ err = verifySig(clientPubKey, PeerIDAuthScheme, []sigParam{
+ {k: "challenge-client", v: []byte(h.opaque.ChallengeClient)},
+ {k: "server-public-key", v: serverPubKeyBytes},
+ {k: "hostname", v: []byte(h.Hostname)},
+ }, sig)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// PeerID returns the peer ID of the authenticated client.
+func (h *PeerIDAuthHandshakeServer) PeerID() (peer.ID, error) {
+ if !h.ran {
+ return "", errNotRan
+ }
+ switch h.state {
+ case peerIDAuthServerStateVerifyChallenge:
+ case peerIDAuthServerStateVerifyBearer:
+ default:
+ return "", errors.New("not in proper state")
+ }
+ if h.opaque.PeerID == "" {
+ return "", errors.New("peer ID not set")
+ }
+ return h.opaque.PeerID, nil
+}
+
+func (h *PeerIDAuthHandshakeServer) SetHeader(hdr http.Header) {
+ if !h.ran {
+ return
+ }
+ defer h.hb.clear()
+ switch h.state {
+ case peerIDAuthServerStateChallengeClient, peerIDAuthServerStateSignChallenge:
+ hdr.Set("WWW-Authenticate", h.hb.b.String())
+ case peerIDAuthServerStateVerifyChallenge:
+ hdr.Set("Authentication-Info", h.hb.b.String())
+ case peerIDAuthServerStateVerifyBearer:
+ // For completeness. Nothing to do
+ }
+}
diff --git a/p2p/http/auth/server.go b/p2p/http/auth/server.go
new file mode 100644
index 0000000000..3fb6c0ab59
--- /dev/null
+++ b/p2p/http/auth/server.go
@@ -0,0 +1,169 @@
+package httppeeridauth
+
+import (
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "hash"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/http/auth/internal/handshake"
+)
+
+type hmacPool struct {
+ p sync.Pool
+}
+
+func newHmacPool(key []byte) *hmacPool {
+ return &hmacPool{
+ p: sync.Pool{
+ New: func() any {
+ return hmac.New(sha256.New, key)
+ },
+ },
+ }
+}
+
+func (p *hmacPool) Get() hash.Hash {
+ h := p.p.Get().(hash.Hash)
+ h.Reset()
+ return h
+}
+
+func (p *hmacPool) Put(h hash.Hash) {
+ p.p.Put(h)
+}
+
+type ServerPeerIDAuth struct {
+ PrivKey crypto.PrivKey
+ TokenTTL time.Duration
+ Next func(peer peer.ID, w http.ResponseWriter, r *http.Request)
+ // NoTLS is a flag that allows the server to accept requests without a TLS
+ // ServerName. Used when something else is terminating the TLS connection.
+ NoTLS bool
+ // Required when NoTLS is true. The server will only accept requests for
+ // which the Host header returns true.
+ ValidHostnameFn func(hostname string) bool
+
+ HmacKey []byte
+ initHmac sync.Once
+ hmacPool *hmacPool
+}
+
+// ServeHTTP implements the http.Handler interface for PeerIDAuth. It will
+// attempt to authenticate the request using using the libp2p peer ID auth
+// scheme. If a Next handler is set, it will be called on authenticated
+// requests.
+func (a *ServerPeerIDAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ a.ServeHTTPWithNextHandler(w, r, a.Next)
+}
+
+func (a *ServerPeerIDAuth) ServeHTTPWithNextHandler(w http.ResponseWriter, r *http.Request, next func(peer.ID, http.ResponseWriter, *http.Request)) {
+ a.initHmac.Do(func() {
+ if a.HmacKey == nil {
+ key := make([]byte, 32)
+ _, err := rand.Read(key)
+ if err != nil {
+ panic(err)
+ }
+ a.HmacKey = key
+ }
+ a.hmacPool = newHmacPool(a.HmacKey)
+ })
+
+ hostname := r.Host
+ if a.NoTLS {
+ if a.ValidHostnameFn == nil {
+ log.Error("No ValidHostnameFn set. Required for NoTLS")
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ if !a.ValidHostnameFn(hostname) {
+ log.Debug("Unauthorized request for host: hostname returned false for ValidHostnameFn", "hostname", hostname)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ } else {
+ if r.TLS == nil {
+ log.Warn("No TLS connection, and NoTLS is false")
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ if hostname != r.TLS.ServerName {
+ log.Debug("Unauthorized request for host: hostname mismatch", "hostname", hostname, "expected", r.TLS.ServerName)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ if a.ValidHostnameFn != nil && !a.ValidHostnameFn(hostname) {
+ log.Debug("Unauthorized request for host: hostname returned false for ValidHostnameFn", "hostname", hostname)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ }
+
+ hmac := a.hmacPool.Get()
+ defer a.hmacPool.Put(hmac)
+ hs := handshake.PeerIDAuthHandshakeServer{
+ Hostname: hostname,
+ PrivKey: a.PrivKey,
+ TokenTTL: a.TokenTTL,
+ Hmac: hmac,
+ }
+ err := hs.ParseHeaderVal([]byte(r.Header.Get("Authorization")))
+ if err != nil {
+ log.Debug("Failed to parse header", "err", err)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ err = hs.Run()
+ if err != nil {
+ switch {
+ case errors.Is(err, handshake.ErrInvalidHMAC),
+ errors.Is(err, handshake.ErrExpiredChallenge),
+ errors.Is(err, handshake.ErrExpiredToken):
+
+ hmac.Reset()
+ hs := handshake.PeerIDAuthHandshakeServer{
+ Hostname: hostname,
+ PrivKey: a.PrivKey,
+ TokenTTL: a.TokenTTL,
+ Hmac: hmac,
+ }
+ _ = hs.Run() // First run will never err
+ hs.SetHeader(w.Header())
+ w.WriteHeader(http.StatusUnauthorized)
+
+ return
+ }
+
+ log.Debug("Failed to run handshake", "err", err)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ hs.SetHeader(w.Header())
+
+ peer, err := hs.PeerID()
+ if err != nil {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+
+ if next == nil {
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ next(peer, w, r)
+}
+
+// HasAuthHeader checks if the HTTP request contains an Authorization header
+// that starts with the PeerIDAuthScheme prefix.
+func HasAuthHeader(r *http.Request) bool {
+ h := r.Header.Get("Authorization")
+ return h != "" && strings.HasPrefix(h, handshake.PeerIDAuthScheme)
+}
diff --git a/p2p/http/example_test.go b/p2p/http/example_test.go
new file mode 100644
index 0000000000..e77b3339b3
--- /dev/null
+++ b/p2p/http/example_test.go
@@ -0,0 +1,466 @@
+package libp2phttp_test
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ libp2phttp "github.com/libp2p/go-libp2p/p2p/http"
+ httpauth "github.com/libp2p/go-libp2p/p2p/http/auth"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func ExampleHost_authenticatedHTTP() {
+ clientKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ client := libp2phttp.Host{
+ ClientPeerIDAuth: &httpauth.ClientPeerIDAuth{
+ TokenTTL: time.Hour,
+ PrivKey: clientKey,
+ },
+ }
+
+ serverKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+ server := libp2phttp.Host{
+ ServerPeerIDAuth: &httpauth.ServerPeerIDAuth{
+ PrivKey: serverKey,
+ // No TLS for this example. In practice you want to use TLS.
+ NoTLS: true,
+ ValidHostnameFn: func(hostname string) bool {
+ return strings.HasPrefix(hostname, "127.0.0.1")
+ },
+ TokenTTL: time.Hour,
+ },
+ // No TLS for this example. In practice you want to use TLS.
+ InsecureAllowHTTP: true,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ observedClientID := ""
+ server.SetHTTPHandler("/echo-id", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ observedClientID = libp2phttp.ClientPeerID(r).String()
+ w.WriteHeader(http.StatusOK)
+ }))
+
+ go server.Serve()
+ defer server.Close()
+
+ expectedServerID, err := peer.IDFromPrivateKey(serverKey)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ httpClient := http.Client{Transport: &client}
+ url := fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/echo-id", server.Addrs()[0], expectedServerID)
+ resp, err := httpClient.Get(url)
+ if err != nil {
+ log.Fatal(err)
+ }
+ resp.Body.Close()
+
+ expectedClientID, err := peer.IDFromPrivateKey(clientKey)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if observedClientID != expectedClientID.String() {
+ log.Fatal("observedClientID does not match expectedClientID")
+ }
+
+ observedServerID := libp2phttp.ServerPeerID(resp)
+ if observedServerID != expectedServerID {
+ log.Fatal("observedServerID does not match expectedServerID")
+ }
+
+ fmt.Println("Successfully authenticated HTTP request")
+ // Output: Successfully authenticated HTTP request
+}
+
+func ExampleHost_withAStockGoHTTPClient() {
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ // A server with a simple echo protocol
+ server.SetHTTPHandler("/echo/1.0.0", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add("Content-Type", "application/octet-stream")
+ io.Copy(w, r.Body)
+ }))
+ go server.Serve()
+ defer server.Close()
+
+ var serverHTTPPort string
+ var err error
+ for _, a := range server.Addrs() {
+ serverHTTPPort, err = a.ValueForProtocol(ma.P_TCP)
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Make an HTTP request using the Go standard library.
+ resp, err := http.Post("http://127.0.0.1:"+serverHTTPPort+"/echo/1.0.0/", "application/octet-stream", strings.NewReader("Hello HTTP"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(string(body))
+
+ // Output: Hello HTTP
+}
+
+func ExampleHost_listenOnHTTPTransportAndStreams() {
+ serverStreamHost, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer serverStreamHost.Close()
+
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ StreamHost: serverStreamHost,
+ }
+ go server.Serve()
+ defer server.Close()
+
+ for _, a := range server.Addrs() {
+ _, transport := ma.SplitLast(a)
+ fmt.Printf("Server listening on transport: %s\n", transport)
+ }
+ // Output: Server listening on transport: /quic-v1
+ // Server listening on transport: /http
+}
+
+func ExampleHost_overLibp2pStreams() {
+ serverStreamHost, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ server := libp2phttp.Host{
+ StreamHost: serverStreamHost,
+ }
+
+ // A server with a simple echo protocol
+ server.SetHTTPHandler("/echo/1.0.0", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Add("Content-Type", "application/octet-stream")
+ io.Copy(w, r.Body)
+ }))
+ go server.Serve()
+ defer server.Close()
+
+ clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client := libp2phttp.Host{StreamHost: clientStreamHost}
+
+ // Make an HTTP request using the Go standard library, but over libp2p
+ // streams. If the server were listening on an HTTP transport, this could
+ // also make the request over the HTTP transport.
+ httpClient, _ := client.NamespacedClient("/echo/1.0.0", peer.AddrInfo{ID: server.PeerID(), Addrs: server.Addrs()})
+
+ // Only need to Post to "/" because this client is namespaced to the "/echo/1.0.0" protocol.
+ resp, err := httpClient.Post("/", "application/octet-stream", strings.NewReader("Hello HTTP"))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(string(body))
+
+ // Output: Hello HTTP
+}
+
+var tcpPortRE = regexp.MustCompile(`/tcp/(\d+)`)
+
+func ExampleHost_Serve() {
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ go server.Serve()
+ defer server.Close()
+
+ for _, a := range server.Addrs() {
+ s := a.String()
+ addrWithoutSpecificPort := tcpPortRE.ReplaceAllString(s, "/tcp/")
+ fmt.Println(addrWithoutSpecificPort)
+ }
+
+ // Output: /ip4/127.0.0.1/tcp//http
+}
+
+func ExampleHost_SetHTTPHandler() {
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte("Hello World"))
+ }))
+
+ go server.Serve()
+ defer server.Close()
+
+ port, err := server.Addrs()[0].ValueForProtocol(ma.P_TCP)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ resp, err := http.Get("http://127.0.0.1:" + port + "/hello/1/")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(string(respBody))
+
+ // Output: Hello World
+}
+
+func ExampleHost_SetHTTPHandlerAtPath() {
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte("Hello World"))
+ }))
+
+ go server.Serve()
+ defer server.Close()
+
+ port, err := server.Addrs()[0].ValueForProtocol(ma.P_TCP)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ resp, err := http.Get("http://127.0.0.1:" + port + "/other-place/")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(string(respBody))
+
+ // Output: Hello World
+}
+
+func ExampleHost_NamespacedClient() {
+ var client libp2phttp.Host
+
+ // Create the server
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte("Hello World"))
+ }))
+
+ go server.Serve()
+ defer server.Close()
+
+ // Create an http.Client that is namespaced to this protocol.
+ httpClient, err := client.NamespacedClient("/hello/1", peer.AddrInfo{ID: server.PeerID(), Addrs: server.Addrs()})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ resp, err := httpClient.Get("/")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(string(respBody))
+
+ // Output: Hello World
+}
+
+func ExampleHost_NamespaceRoundTripper() {
+ var client libp2phttp.Host
+
+ // Create the server
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte("Hello World"))
+ }))
+
+ go server.Serve()
+ defer server.Close()
+
+ // Create an http.Roundtripper for the server
+ rt, err := client.NewConstrainedRoundTripper(peer.AddrInfo{ID: server.PeerID(), Addrs: server.Addrs()})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Namespace this roundtripper to a specific protocol
+ rt, err = client.NamespaceRoundTripper(rt, "/hello/1", server.PeerID())
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ resp, err := (&http.Client{Transport: rt}).Get("/")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(string(respBody))
+
+ // Output: Hello World
+}
+
+func ExampleHost_NewConstrainedRoundTripper() {
+ var client libp2phttp.Host
+
+ // Create the server
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte("Hello World"))
+ }))
+
+ go server.Serve()
+ defer server.Close()
+
+ // Create an http.Roundtripper for the server
+ rt, err := client.NewConstrainedRoundTripper(peer.AddrInfo{ID: server.PeerID(), Addrs: server.Addrs()})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ resp, err := (&http.Client{Transport: rt}).Get("/hello/1")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(string(respBody))
+
+ // Output: Hello World
+}
+
+func ExampleWellKnownHandler() {
+ var h libp2phttp.WellKnownHandler
+ h.AddProtocolMeta("/hello/1", libp2phttp.ProtocolMeta{
+ Path: "/hello-path/",
+ })
+
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ defer listener.Close()
+ // Serve the well-known resource. Note, this is handled automatically if you use the libp2phttp.Host.
+ go http.Serve(listener, &h)
+
+ // Get the well-known resource
+ resp, err := http.Get("http://" + listener.Addr().String() + libp2phttp.WellKnownProtocols)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer resp.Body.Close()
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(string(respBody))
+ // Output: {"/hello/1":{"path":"/hello-path/"}}
+
+}
+
+func ExampleHost_RoundTrip() {
+ // Setup server for example
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true, // For our example, we'll allow insecure HTTP
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+ go server.Serve()
+ defer server.Close()
+ server.SetHTTPHandlerAtPath("/hello/", "/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("Hello World"))
+ }))
+
+ // Use the HTTP Host as a RoundTripper
+ httpHost := libp2phttp.Host{}
+ client := http.Client{Transport: &httpHost}
+ resp, err := client.Get("multiaddr:" + server.Addrs()[0].String())
+ if err != nil {
+ log.Fatal(err)
+ }
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(string(body))
+ // Output: Hello World
+}
diff --git a/p2p/http/libp2phttp.go b/p2p/http/libp2phttp.go
new file mode 100644
index 0000000000..7931846c02
--- /dev/null
+++ b/p2p/http/libp2phttp.go
@@ -0,0 +1,1242 @@
+// HTTP semantics with libp2p. Can use a libp2p stream transport or stock HTTP
+// transports. This API is experimental and will likely change soon. Implements [libp2p spec #508](https://github.com/libp2p/specs/pull/508).
+package libp2phttp
+
+import (
+ "bufio"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ lru "github.com/hashicorp/golang-lru/v2"
+ host "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ httpauth "github.com/libp2p/go-libp2p/p2p/http/auth"
+ gostream "github.com/libp2p/go-libp2p/p2p/net/gostream"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var log = logging.Logger("libp2phttp")
+
+var WellKnownRequestTimeout = 30 * time.Second
+
+const ProtocolIDForMultistreamSelect = "/http/1.1"
+const WellKnownProtocols = "/.well-known/libp2p/protocols"
+
+// LegacyWellKnownProtocols refer to a the well-known resource used in an early
+// draft of the libp2p+http spec. Some users have deployed this, and need backwards compatibility.
+// Hopefully we can phase this out in the future. Context: https://github.com/libp2p/go-libp2p/pull/2797
+const LegacyWellKnownProtocols = "/.well-known/libp2p"
+
+const peerMetadataLimit = 8 << 10 // 8KB
+const peerMetadataLRUSize = 256 // How many different peer's metadata to keep in our LRU cache
+
+// DefaultNewStreamTimeout is the default value for new stream establishing timeout.
+// It is the same value as basic_host.DefaultNegotiationTimeout
+var DefaultNewStreamTimeout = 10 * time.Second
+
+type clientPeerIDContextKey struct{}
+type serverPeerIDContextKey struct{}
+
+func ClientPeerID(r *http.Request) peer.ID {
+ if id, ok := r.Context().Value(clientPeerIDContextKey{}).(peer.ID); ok {
+ return id
+ }
+ return ""
+}
+
+func ServerPeerID(r *http.Response) peer.ID {
+ if id, ok := r.Request.Context().Value(serverPeerIDContextKey{}).(peer.ID); ok {
+ return id
+ }
+ return ""
+}
+
+// ProtocolMeta is metadata about a protocol.
+type ProtocolMeta struct {
+ // Path defines the HTTP Path prefix used for this protocol
+ Path string `json:"path"`
+}
+
+type PeerMeta map[protocol.ID]ProtocolMeta
+
+// WellKnownHandler is an http.Handler that serves the well-known resource
+type WellKnownHandler struct {
+ wellknownMapMu sync.Mutex
+ wellKnownMapping PeerMeta
+ wellKnownCache []byte
+}
+
+// streamHostListen returns a net.Listener that listens on libp2p streams for HTTP/1.1 messages.
+func streamHostListen(streamHost host.Host) (net.Listener, error) {
+ return gostream.Listen(streamHost, ProtocolIDForMultistreamSelect, gostream.IgnoreEOF())
+}
+
+func (h *WellKnownHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // Check if the requests accepts JSON
+ accepts := r.Header.Get("Accept")
+ if accepts != "" && !(strings.Contains(accepts, "application/json") || strings.Contains(accepts, "*/*")) {
+ http.Error(w, "Only application/json is supported", http.StatusNotAcceptable)
+ return
+ }
+
+ if r.Method != http.MethodGet {
+ http.Error(w, "Only GET requests are supported", http.StatusMethodNotAllowed)
+ return
+ }
+
+ // Return a JSON object with the well-known protocols
+ h.wellknownMapMu.Lock()
+ mapping := h.wellKnownCache
+ var err error
+ if mapping == nil {
+ mapping, err = json.Marshal(h.wellKnownMapping)
+ if err == nil {
+ h.wellKnownCache = mapping
+ }
+ }
+ h.wellknownMapMu.Unlock()
+ if err != nil {
+ http.Error(w, "Marshal error", http.StatusInternalServerError)
+ return
+ }
+ w.Header().Add("Content-Type", "application/json")
+ w.Header().Add("Content-Length", strconv.Itoa(len(mapping)))
+ w.Write(mapping)
+}
+
+func (h *WellKnownHandler) AddProtocolMeta(p protocol.ID, protocolMeta ProtocolMeta) {
+ h.wellknownMapMu.Lock()
+ if h.wellKnownMapping == nil {
+ h.wellKnownMapping = make(map[protocol.ID]ProtocolMeta)
+ }
+ h.wellKnownMapping[p] = protocolMeta
+ h.wellKnownCache = nil
+ h.wellknownMapMu.Unlock()
+}
+
+func (h *WellKnownHandler) RemoveProtocolMeta(p protocol.ID) {
+ h.wellknownMapMu.Lock()
+ if h.wellKnownMapping != nil {
+ delete(h.wellKnownMapping, p)
+ }
+ h.wellKnownCache = nil
+ h.wellknownMapMu.Unlock()
+}
+
+// Host is a libp2p host for request/responses with HTTP semantics. This is
+// in contrast to a stream-oriented host like the core host.Host interface. Its
+// zero-value (&Host{}) is usable. Do not copy by value.
+// See examples for usage.
+//
+// Warning, this is experimental. The API will likely change.
+type Host struct {
+ // StreamHost is a stream based libp2p host used to do HTTP over libp2p streams. May be nil
+ StreamHost host.Host
+ // ListenAddrs are the requested addresses to listen on. Multiaddrs must be
+ // valid HTTP(s) multiaddr. Only multiaddrs for an HTTP transport are
+ // supported (must end with /http or /https).
+ ListenAddrs []ma.Multiaddr
+ // TLSConfig is the TLS config for the server to use
+ TLSConfig *tls.Config
+ // InsecureAllowHTTP indicates if the server is allowed to serve unencrypted
+ // HTTP requests over TCP.
+ InsecureAllowHTTP bool
+
+ // ServerPeerIDAuth sets the Server's signing key and TTL for server
+ // provided tokens.
+ ServerPeerIDAuth *httpauth.ServerPeerIDAuth
+ // ClientPeerIDAuth sets the Client's signing key and TTL for our stored
+ // tokens.
+ ClientPeerIDAuth *httpauth.ClientPeerIDAuth
+
+ // ServeMux is the http.ServeMux used by the server to serve requests. If
+ // nil, a new serve mux will be created. Users may manually add handlers to
+ // this mux instead of using `SetHTTPHandler`, but if they do, they should
+ // also update the WellKnownHandler's protocol mapping.
+ ServeMux *http.ServeMux
+ initializeServeMux sync.Once
+
+ // DefaultClientRoundTripper is the default http.RoundTripper for clients to
+ // use when making requests over an HTTP transport. This must be an
+ // `*http.Transport` type so that the transport can be cloned and the
+ // `TLSClientConfig` field can be configured. If unset, it will create a new
+ // `http.Transport` on first use.
+ DefaultClientRoundTripper *http.Transport
+
+ // WellKnownHandler is the http handler for the well-known
+ // resource. It is responsible for sharing this node's protocol metadata
+ // with other nodes. Users only care about this if they set their own
+ // ServeMux with pre-existing routes. By default, new protocols are added
+ // here when a user calls `SetHTTPHandler` or `SetHTTPHandlerAtPath`.
+ WellKnownHandler WellKnownHandler
+
+ // EnableCompatibilityWithLegacyWellKnownEndpoint allows compatibility with
+ // an older version of the spec that defined the well-known resource as:
+ // .well-known/libp2p.
+ // For servers, this means hosting the well-known resource at both the
+ // legacy and current paths.
+ // For clients it means making two parallel requests and picking the first one that succeeds.
+ //
+ // Long term this should be deprecated once enough users have upgraded to a
+ // newer go-libp2p version and we can remove all this code.
+ EnableCompatibilityWithLegacyWellKnownEndpoint bool
+
+ // peerMetadata is an LRU cache of a peer's well-known protocol map.
+ peerMetadata *lru.Cache[peer.ID, PeerMeta]
+ // createHTTPTransport is used to lazily create the httpTransport in a thread-safe way.
+ createHTTPTransport sync.Once
+ // createDefaultClientRoundTripper is used to lazily create the default
+ // client round tripper in a thread-safe way.
+ createDefaultClientRoundTripper sync.Once
+ httpTransport *httpTransport
+}
+
+type httpTransport struct {
+ listenAddrs []ma.Multiaddr
+ listeners []net.Listener
+ closeListeners chan struct{}
+ waitingForListeners chan struct{}
+}
+
+func newPeerMetadataCache() *lru.Cache[peer.ID, PeerMeta] {
+ peerMetadata, err := lru.New[peer.ID, PeerMeta](peerMetadataLRUSize)
+ if err != nil {
+ // Only happens if size is < 1. We make sure to not do that, so this should never happen.
+ panic(err)
+ }
+ return peerMetadata
+}
+
+func (h *Host) httpTransportInit() {
+ h.createHTTPTransport.Do(func() {
+ h.httpTransport = &httpTransport{
+ closeListeners: make(chan struct{}),
+ waitingForListeners: make(chan struct{}),
+ }
+ })
+}
+
+func (h *Host) serveMuxInit() {
+ h.initializeServeMux.Do(func() {
+ if h.ServeMux == nil {
+ h.ServeMux = http.NewServeMux()
+ }
+ })
+}
+
+func (h *Host) Addrs() []ma.Multiaddr {
+ h.httpTransportInit()
+ <-h.httpTransport.waitingForListeners
+ return h.httpTransport.listenAddrs
+}
+
+// ID returns the peer ID of the underlying stream host, or the zero value if there is no stream host.
+func (h *Host) PeerID() peer.ID {
+ if h.StreamHost != nil {
+ return h.StreamHost.ID()
+ }
+ return ""
+}
+
+var ErrNoListeners = errors.New("nothing to listen on")
+
+func (h *Host) setupListeners(listenerErrCh chan error) error {
+ for _, addr := range h.ListenAddrs {
+ parsedAddr, err := parseMultiaddr(addr)
+ if err != nil {
+ return err
+ }
+ // resolve the host
+ ipaddr, err := net.ResolveIPAddr("ip", parsedAddr.host)
+ if err != nil {
+ return err
+ }
+
+ host := ipaddr.String()
+ l, err := net.Listen("tcp", host+":"+parsedAddr.port)
+ if err != nil {
+ return err
+ }
+ h.httpTransport.listeners = append(h.httpTransport.listeners, l)
+
+ // get resolved port
+ _, port, err := net.SplitHostPort(l.Addr().String())
+ if err != nil {
+ return err
+ }
+
+ var listenAddr ma.Multiaddr
+ if parsedAddr.useHTTPS && parsedAddr.sni != "" && parsedAddr.sni != host {
+ listenAddr = ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%s/tls/sni/%s/http", host, port, parsedAddr.sni))
+ } else {
+ scheme := "http"
+ if parsedAddr.useHTTPS {
+ scheme = "https"
+ }
+ listenAddr = ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%s/%s", host, port, scheme))
+ }
+
+ if parsedAddr.useHTTPS {
+ go func() {
+ srv := http.Server{
+ Handler: maybeDecorateContextWithAuthMiddleware(h.ServerPeerIDAuth, h.ServeMux),
+ TLSConfig: h.TLSConfig,
+ }
+ listenerErrCh <- srv.ServeTLS(l, "", "")
+ }()
+ h.httpTransport.listenAddrs = append(h.httpTransport.listenAddrs, listenAddr)
+ } else if h.InsecureAllowHTTP {
+ go func() {
+ srv := http.Server{
+ Handler: maybeDecorateContextWithAuthMiddleware(h.ServerPeerIDAuth, h.ServeMux),
+ }
+ listenerErrCh <- srv.Serve(l)
+ }()
+ h.httpTransport.listenAddrs = append(h.httpTransport.listenAddrs, listenAddr)
+ } else {
+ // We are not serving insecure HTTP
+ log.Warn("Not serving insecure HTTP. Prefer an HTTPS endpoint.", "addr", listenAddr)
+ }
+ }
+ return nil
+}
+
+// Serve starts the HTTP transport listeners. Always returns a non-nil error.
+// If there are no listeners, returns ErrNoListeners.
+func (h *Host) Serve() error {
+ // assert that each addr contains a /http component
+ for _, addr := range h.ListenAddrs {
+ _, isHTTP := normalizeHTTPMultiaddr(addr)
+ if !isHTTP {
+ return fmt.Errorf("address %s does not contain a /http or /https component", addr)
+ }
+ }
+
+ h.serveMuxInit()
+ h.ServeMux.Handle(WellKnownProtocols, &h.WellKnownHandler)
+ if h.EnableCompatibilityWithLegacyWellKnownEndpoint {
+ h.ServeMux.Handle(LegacyWellKnownProtocols, &h.WellKnownHandler)
+ }
+
+ h.httpTransportInit()
+
+ closedWaitingForListeners := false
+ defer func() {
+ if !closedWaitingForListeners {
+ close(h.httpTransport.waitingForListeners)
+ }
+ }()
+
+ if len(h.ListenAddrs) == 0 && h.StreamHost == nil {
+ return ErrNoListeners
+ }
+
+ h.httpTransport.listeners = make([]net.Listener, 0, len(h.ListenAddrs)+1) // +1 for stream host
+
+ streamHostAddrsCount := 0
+ if h.StreamHost != nil {
+ streamHostAddrsCount = len(h.StreamHost.Addrs())
+ }
+ h.httpTransport.listenAddrs = make([]ma.Multiaddr, 0, len(h.ListenAddrs)+streamHostAddrsCount)
+
+ errCh := make(chan error)
+
+ if h.StreamHost != nil {
+ listener, err := streamHostListen(h.StreamHost)
+ if err != nil {
+ return err
+ }
+ h.httpTransport.listeners = append(h.httpTransport.listeners, listener)
+ h.httpTransport.listenAddrs = append(h.httpTransport.listenAddrs, h.StreamHost.Addrs()...)
+
+ go func() {
+ srv := &http.Server{
+ Handler: connectionCloseHeaderMiddleware(h.ServeMux),
+ ConnContext: func(ctx context.Context, c net.Conn) context.Context {
+ remote := c.RemoteAddr()
+ if remote.Network() == gostream.Network {
+ remoteID, err := peer.Decode(remote.String())
+ if err == nil {
+ return context.WithValue(ctx, clientPeerIDContextKey{}, remoteID)
+ }
+ }
+ return ctx
+ },
+ }
+ errCh <- srv.Serve(listener)
+ }()
+ }
+
+ closeAllListeners := func() {
+ for _, l := range h.httpTransport.listeners {
+ l.Close()
+ }
+ }
+
+ err := h.setupListeners(errCh)
+ if err != nil {
+ closeAllListeners()
+ return err
+ }
+
+ close(h.httpTransport.waitingForListeners)
+ closedWaitingForListeners = true
+
+ if len(h.httpTransport.listeners) == 0 || len(h.httpTransport.listenAddrs) == 0 {
+ closeAllListeners()
+ return ErrNoListeners
+ }
+
+ expectedErrCount := len(h.httpTransport.listeners)
+ select {
+ case <-h.httpTransport.closeListeners:
+ err = http.ErrServerClosed
+ case err = <-errCh:
+ expectedErrCount--
+ }
+
+ // Close all listeners
+ closeAllListeners()
+ for i := 0; i < expectedErrCount; i++ {
+ <-errCh
+ }
+ close(errCh)
+
+ return err
+}
+
+func (h *Host) Close() error {
+ h.httpTransportInit()
+ close(h.httpTransport.closeListeners)
+ return nil
+}
+
+// SetHTTPHandler sets the HTTP handler for a given protocol. Automatically
+// manages the well-known resource mapping.
+// http.StripPrefix is called on the handler, so the handler will be unaware of
+// its prefix path.
+func (h *Host) SetHTTPHandler(p protocol.ID, handler http.Handler) {
+ h.SetHTTPHandlerAtPath(p, string(p), handler)
+}
+
+// SetHTTPHandlerAtPath sets the HTTP handler for a given protocol using the
+// given path. Automatically manages the well-known resource mapping.
+// http.StripPrefix is called on the handler, so the handler will be unaware of
+// its prefix path.
+func (h *Host) SetHTTPHandlerAtPath(p protocol.ID, path string, handler http.Handler) {
+ if path == "" || path[len(path)-1] != '/' {
+ // We are nesting this handler under this path, so it should end with a slash.
+ path += "/"
+ }
+ h.WellKnownHandler.AddProtocolMeta(p, ProtocolMeta{Path: path})
+ h.serveMuxInit()
+ // Do not trim the trailing / from path
+ // This allows us to serve `/a/b` when we mount a handler for `/b` at path `/a`
+ h.ServeMux.Handle(path, http.StripPrefix(strings.TrimSuffix(path, "/"), handler))
+}
+
+// PeerMetadataGetter lets RoundTrippers implement a specific way of caching a peer's protocol mapping.
+type PeerMetadataGetter interface {
+ GetPeerMetadata() (PeerMeta, error)
+}
+
+type streamRoundTripper struct {
+ server peer.ID
+ // if true, we won't add the server's addresses to the peerstore. This
+ // should only be set when creating the struct.
+ skipAddAddrs bool
+ addrsAdded sync.Once
+ serverAddrs []ma.Multiaddr
+ h host.Host
+ httpHost *Host
+}
+
+// streamReadCloser wraps an io.ReadCloser and closes the underlying stream when
+// closed (as well as closing the wrapped ReadCloser). This is necessary because
+// we have two things to close, the body and the stream. The stream isn't closed
+// by the body automatically, as hinted at by the fact that `http.ReadResponse`
+// takes a bufio.Reader.
+type streamReadCloser struct {
+ io.ReadCloser
+ s network.Stream
+}
+
+func (s *streamReadCloser) Close() error {
+ s.s.Close()
+ return s.ReadCloser.Close()
+}
+
+func (rt *streamRoundTripper) GetPeerMetadata() (PeerMeta, error) {
+ ctx := context.Background()
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
+ defer cancel()
+ return rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
+}
+
+// RoundTrip implements http.RoundTripper.
+func (rt *streamRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ // Add the addresses we learned about for this server
+ if !rt.skipAddAddrs {
+ rt.addrsAdded.Do(func() {
+ if len(rt.serverAddrs) > 0 {
+ rt.h.Peerstore().AddAddrs(rt.server, rt.serverAddrs, peerstore.TempAddrTTL)
+ }
+ rt.serverAddrs = nil // may as well cleanup
+ })
+ }
+
+ // If r.Context() timeout is greater than DefaultNewStreamTimeout
+ // use DefaultNewStreamTimeout for new stream negotiation.
+ newStreamCtx := r.Context()
+ if deadline, ok := newStreamCtx.Deadline(); !ok || deadline.After(time.Now().Add(DefaultNewStreamTimeout)) {
+ var cancel context.CancelFunc
+ newStreamCtx, cancel = context.WithTimeout(context.Background(), DefaultNewStreamTimeout)
+ defer cancel()
+ }
+
+ s, err := rt.h.NewStream(newStreamCtx, rt.server, ProtocolIDForMultistreamSelect)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write connection: close header to ensure the stream is closed after the response
+ r.Header.Add("connection", "close")
+
+ go func() {
+ defer s.CloseWrite()
+ r.Write(s)
+ if r.Body != nil {
+ r.Body.Close()
+ }
+ }()
+
+ if deadline, ok := r.Context().Deadline(); ok {
+ s.SetReadDeadline(deadline)
+ }
+
+ resp, err := http.ReadResponse(bufio.NewReader(s), r)
+ if err != nil {
+ s.Close()
+ return nil, err
+ }
+ resp.Body = &streamReadCloser{resp.Body, s}
+
+ if r.URL.Scheme == "multiaddr" {
+ // This was a multiaddr uri, we may need to convert relative URI
+ // references to absolute multiaddr ones so that the next request
+ // knows how to reach the endpoint.
+ locationHeader := resp.Header.Get("Location")
+ if locationHeader != "" {
+ u, err := locationHeaderToMultiaddrURI(r.URL, locationHeader)
+ if err != nil {
+ return nil, fmt.Errorf("failed to convert location header (%s) from request (%s) to multiaddr uri: %w", locationHeader, r.URL, err)
+ }
+ // Update the location header to be an absolute multiaddr uri
+ resp.Header.Set("Location", u.String())
+ }
+ }
+
+ ctxWithServerID := context.WithValue(r.Context(), serverPeerIDContextKey{}, rt.server)
+ resp.Request = resp.Request.WithContext(ctxWithServerID)
+ return resp, nil
+}
+
+// locationHeaderToMultiaddrURI takes our original URL and the response's Location header
+// and, if the location header is relative, turns it into an absolute multiaddr uri.
+// Refer to https://www.rfc-editor.org/rfc/rfc3986#section-4.2 for the
+// definition of a Relative Reference.
+func locationHeaderToMultiaddrURI(original *url.URL, locationHeader string) (*url.URL, error) {
+ if locationHeader == "" {
+ return nil, errors.New("location header is empty")
+ }
+ if strings.HasPrefix(locationHeader, "//") {
+ // This is a network path reference. We don't support these.
+ return nil, errors.New("network path reference not supported")
+ }
+
+ firstSegment := strings.SplitN(locationHeader, "/", 2)[0]
+ if strings.Contains(firstSegment, ":") {
+ // This location contains a scheme, so it's an absolute uri.
+ return url.Parse(locationHeader)
+ }
+
+ // It's a relative reference. We need to resolve it against the original URL.
+ if original.Scheme != "multiaddr" {
+ return nil, errors.New("original uri is not a multiaddr")
+ }
+
+ // Parse the original multiaddr
+ originalStr := original.RawPath
+ if originalStr == "" {
+ originalStr = original.Path
+ }
+ originalMa, err := ma.NewMultiaddr(originalStr)
+ if err != nil {
+ return nil, fmt.Errorf("original uri is not a valid multiaddr: %w", err)
+ }
+
+ // Get the target http path
+ var targetHTTPPath string
+ for _, c := range originalMa {
+ if c.Protocol().Code == ma.P_HTTP_PATH {
+ targetHTTPPath = string(c.RawValue())
+ break
+ }
+ }
+
+ // Resolve reference from targetURL and relativeURL
+ targetURL := url.URL{Path: targetHTTPPath}
+ relativeURL := url.URL{Path: locationHeader}
+ resolved := targetURL.ResolveReference(&relativeURL)
+
+ resolvedHTTPPath := resolved.Path
+ if len(resolvedHTTPPath) > 0 && resolvedHTTPPath[0] == '/' {
+ resolvedHTTPPath = resolvedHTTPPath[1:] // trim leading slash. It's implied by the http-path component
+ }
+
+ resolvedHTTPPathComponent, err := ma.NewComponent("http-path", resolvedHTTPPath)
+ if err != nil {
+ return nil, fmt.Errorf("relative path is not a valid http-path: %w", err)
+ }
+
+ withoutPath, afterAndIncludingPath := ma.SplitFunc(originalMa, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_HTTP_PATH
+ })
+ withNewPath := withoutPath.AppendComponent(resolvedHTTPPathComponent)
+ if len(afterAndIncludingPath) > 1 {
+ // Include after path since it may include other parts
+ withNewPath = append(withNewPath, afterAndIncludingPath[1:]...)
+ }
+ return url.Parse("multiaddr:" + withNewPath.String())
+}
+
+// roundTripperForSpecificServer is an http.RoundTripper targets a specific server. Still reuses the underlying RoundTripper for the requests.
+// The underlying RoundTripper MUST be an HTTP Transport.
+type roundTripperForSpecificServer struct {
+ http.RoundTripper
+ ownRoundtripper bool
+ httpHost *Host
+ server peer.ID
+ targetServerAddr string
+ sni string
+ scheme string
+ cachedProtos PeerMeta
+}
+
+func (rt *roundTripperForSpecificServer) GetPeerMetadata() (PeerMeta, error) {
+ // Do we already have the peer's protocol mapping?
+ if rt.cachedProtos != nil {
+ return rt.cachedProtos, nil
+ }
+
+ // if the underlying roundtripper implements GetPeerMetadata, use that
+ if g, ok := rt.RoundTripper.(PeerMetadataGetter); ok {
+ wk, err := g.GetPeerMetadata()
+ if err == nil {
+ rt.cachedProtos = wk
+ return wk, nil
+ }
+ }
+
+ ctx := context.Background()
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
+ defer cancel()
+ wk, err := rt.httpHost.getAndStorePeerMetadata(ctx, rt, rt.server)
+ if err == nil {
+ rt.cachedProtos = wk
+ return wk, nil
+ }
+ return wk, err
+}
+
+// RoundTrip implements http.RoundTripper.
+func (rt *roundTripperForSpecificServer) RoundTrip(r *http.Request) (*http.Response, error) {
+ if (r.URL.Scheme != "" && r.URL.Scheme != rt.scheme) || (r.URL.Host != "" && r.URL.Host != rt.targetServerAddr) {
+ return nil, fmt.Errorf("this transport is only for requests to %s://%s", rt.scheme, rt.targetServerAddr)
+ }
+ r.URL.Scheme = rt.scheme
+ r.URL.Host = rt.targetServerAddr
+ r.Host = rt.sni
+ return rt.RoundTripper.RoundTrip(r)
+}
+
+func (rt *roundTripperForSpecificServer) CloseIdleConnections() {
+ if rt.ownRoundtripper {
+ // Safe to close idle connections, since we own the RoundTripper. We
+ // aren't closing other's idle connections.
+ type closeIdler interface {
+ CloseIdleConnections()
+ }
+ if tr, ok := rt.RoundTripper.(closeIdler); ok {
+ tr.CloseIdleConnections()
+ }
+ }
+ // No-op, since we don't want users thinking they are closing idle
+ // connections for this server, when in fact they are closing all idle
+ // connections
+}
+
+// namespacedRoundTripper is a round tripper that prefixes all requests with a
+// given path prefix. It is used to namespace requests to a specific protocol.
+type namespacedRoundTripper struct {
+ http.RoundTripper
+ protocolPrefix string
+ protocolPrefixRaw string
+}
+
+func (rt *namespacedRoundTripper) GetPeerMetadata() (PeerMeta, error) {
+ if g, ok := rt.RoundTripper.(PeerMetadataGetter); ok {
+ return g.GetPeerMetadata()
+ }
+
+ return nil, fmt.Errorf("can not get peer protocol map. Inner roundtripper does not implement GetPeerMetadata")
+}
+
+// RoundTrip implements http.RoundTripper.
+func (rt *namespacedRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ if !strings.HasPrefix(r.URL.Path, rt.protocolPrefix) {
+ r.URL.Path = rt.protocolPrefix + r.URL.Path
+ }
+ if !strings.HasPrefix(r.URL.RawPath, rt.protocolPrefixRaw) {
+ r.URL.RawPath = rt.protocolPrefixRaw + r.URL.Path
+ }
+
+ return rt.RoundTripper.RoundTrip(r)
+}
+
+// NamespaceRoundTripper returns an http.RoundTripper that are scoped to the given protocol on the given server.
+func (h *Host) NamespaceRoundTripper(roundtripper http.RoundTripper, p protocol.ID, server peer.ID) (*namespacedRoundTripper, error) {
+ ctx := context.Background()
+ ctx, cancel := context.WithDeadline(ctx, time.Now().Add(WellKnownRequestTimeout))
+ defer cancel()
+ protos, err := h.getAndStorePeerMetadata(ctx, roundtripper, server)
+ if err != nil {
+ return &namespacedRoundTripper{}, err
+ }
+
+ v, ok := protos[p]
+ if !ok {
+ return &namespacedRoundTripper{}, fmt.Errorf("no protocol %s for server %s", p, server)
+ }
+
+ path := v.Path
+ if path[len(path)-1] == '/' {
+ // Trim the trailing slash, since it's common to make requests starting with a leading forward slash for the path
+ path = path[:len(path)-1]
+ }
+
+ u, err := url.Parse(path)
+ if err != nil {
+ return &namespacedRoundTripper{}, fmt.Errorf("invalid path %s for protocol %s for server %s", v.Path, p, server)
+ }
+
+ return &namespacedRoundTripper{
+ RoundTripper: roundtripper,
+ protocolPrefix: u.Path,
+ protocolPrefixRaw: u.RawPath,
+ }, nil
+}
+
+// NamespacedClient returns an http.Client that is scoped to the given protocol
+// on the given server. It creates a new RoundTripper for each call. If you are
+// creating many namespaced clients, consider creating a round tripper directly
+// and namespacing the roundripper yourself, then creating clients from the
+// namespace round tripper.
+func (h *Host) NamespacedClient(p protocol.ID, server peer.AddrInfo, opts ...RoundTripperOption) (http.Client, error) {
+ rt, err := h.NewConstrainedRoundTripper(server, opts...)
+ if err != nil {
+ return http.Client{}, err
+ }
+
+ nrt, err := h.NamespaceRoundTripper(rt, p, server.ID)
+ if err != nil {
+ return http.Client{}, err
+ }
+
+ return http.Client{Transport: nrt}, nil
+}
+func (h *Host) initDefaultRT() {
+ h.createDefaultClientRoundTripper.Do(func() {
+ if h.DefaultClientRoundTripper == nil {
+ tr, ok := http.DefaultTransport.(*http.Transport)
+ if ok {
+ h.DefaultClientRoundTripper = tr
+ } else {
+ h.DefaultClientRoundTripper = &http.Transport{}
+ }
+ }
+ })
+}
+
+// RoundTrip implements http.RoundTripper for the HTTP Host.
+// This allows you to use the Host as a Transport for an http.Client.
+// See the example for idomatic usage.
+func (h *Host) RoundTrip(r *http.Request) (*http.Response, error) {
+ switch r.URL.Scheme {
+ case "http", "https":
+ h.initDefaultRT()
+ if r.Host == "" {
+ r.Host = r.URL.Host
+ }
+ if h.ClientPeerIDAuth != nil && h.ClientPeerIDAuth.HasToken(r.Host) {
+ serverID, resp, err := h.ClientPeerIDAuth.AuthenticateWithRoundTripper(h.DefaultClientRoundTripper, r)
+ if err != nil {
+ return nil, err
+ }
+ ctxWithServerID := context.WithValue(r.Context(), serverPeerIDContextKey{}, serverID)
+ resp.Request = resp.Request.WithContext(ctxWithServerID)
+ return resp, nil
+ }
+ return h.DefaultClientRoundTripper.RoundTrip(r)
+ case "multiaddr":
+ break
+ default:
+ return nil, fmt.Errorf("unsupported scheme %s", r.URL.Scheme)
+ }
+
+ addr, err := ma.NewMultiaddr(r.URL.String()[len("multiaddr:"):])
+ if err != nil {
+ return nil, err
+ }
+ addr, isHTTP := normalizeHTTPMultiaddr(addr)
+ parsed, err := parseMultiaddr(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ if isHTTP {
+ scheme := "http"
+ if parsed.useHTTPS {
+ scheme = "https"
+ }
+ u := url.URL{
+ Scheme: scheme,
+ Host: parsed.host + ":" + parsed.port,
+ Path: parsed.httpPath,
+ }
+ r.URL = &u
+
+ h.initDefaultRT()
+ rt := h.DefaultClientRoundTripper
+ sni := parsed.sni
+ if sni == "" {
+ sni = parsed.host
+ }
+
+ if sni != parsed.host {
+ // We have a different host and SNI (e.g. using an IP address but specifying a SNI)
+ // We need to make our own transport to support this.
+ //
+ // TODO: if we end up using this code path a lot, we could maintain
+ // a pool of these transports. For now though, it's here for
+ // completeness, but I don't expect us to hit it often.
+ rt = rt.Clone()
+ rt.TLSClientConfig.ServerName = parsed.sni
+ }
+
+ if parsed.peer != "" {
+ // The peer ID is present. We are making an authenticated request
+ if h.ClientPeerIDAuth == nil {
+ return nil, fmt.Errorf("can not authenticate server. Host.ClientPeerIDAuth field is not set")
+ }
+
+ if r.Host == "" {
+ // Missing a host header. Default to what we parsed earlier
+ r.Host = u.Host
+ }
+
+ serverID, resp, err := h.ClientPeerIDAuth.AuthenticateWithRoundTripper(rt, r)
+ if err != nil {
+ return nil, err
+ }
+
+ if serverID != parsed.peer {
+ resp.Body.Close()
+ return nil, fmt.Errorf("authenticated server ID does not match expected server ID")
+ }
+
+ ctxWithServerID := context.WithValue(r.Context(), serverPeerIDContextKey{}, serverID)
+ resp.Request = resp.Request.WithContext(ctxWithServerID)
+
+ return resp, nil
+ }
+
+ return rt.RoundTrip(r)
+ }
+
+ if h.StreamHost == nil {
+ return nil, fmt.Errorf("can not do HTTP over streams. Missing StreamHost")
+ }
+
+ if parsed.peer == "" {
+ return nil, fmt.Errorf("no peer ID in multiaddr")
+ }
+ withoutHTTPPath, _ := ma.SplitFunc(addr, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_HTTP_PATH
+ })
+ h.StreamHost.Peerstore().AddAddrs(parsed.peer, []ma.Multiaddr{withoutHTTPPath}, peerstore.TempAddrTTL)
+
+ // Set the Opaque field to the http-path so that the HTTP request only makes
+ // a reference to that path and not the whole multiaddr uri
+ r.URL.Opaque = parsed.httpPath
+ if r.Host == "" {
+ // Fill in the host if it's not already set
+ r.Host = parsed.host + ":" + parsed.port
+ }
+ srt := streamRoundTripper{
+ server: parsed.peer,
+ skipAddAddrs: true,
+ httpHost: h,
+ h: h.StreamHost,
+ }
+ return srt.RoundTrip(r)
+}
+
+// NewConstrainedRoundTripper returns an http.RoundTripper that can fulfill and HTTP
+// request to the given server. It may use an HTTP transport or a stream based
+// transport. It is valid to pass an empty server.ID.
+// If there are multiple addresses for the server, it will pick the best
+// transport (stream vs standard HTTP) using the following rules:
+// - If PreferHTTPTransport is set, use the HTTP transport.
+// - If ServerMustAuthenticatePeerID is set, use the stream transport, as the
+// HTTP transport does not do peer id auth yet.
+// - If we already have a connection on a stream transport, use that.
+// - Otherwise, if we have both, use the HTTP transport.
+func (h *Host) NewConstrainedRoundTripper(server peer.AddrInfo, opts ...RoundTripperOption) (http.RoundTripper, error) {
+ options := roundTripperOpts{}
+ for _, o := range opts {
+ options = o(options)
+ }
+
+ if options.serverMustAuthenticatePeerID && server.ID == "" {
+ return nil, fmt.Errorf("server must authenticate peer ID, but no peer ID provided")
+ }
+
+ httpAddrs := make([]ma.Multiaddr, 0, 1) // The common case of a single http address
+ nonHTTPAddrs := make([]ma.Multiaddr, 0, len(server.Addrs))
+
+ firstAddrIsHTTP := false
+
+ for i, addr := range server.Addrs {
+ addr, isHTTP := normalizeHTTPMultiaddr(addr)
+ if isHTTP {
+ if i == 0 {
+ firstAddrIsHTTP = true
+ }
+ httpAddrs = append(httpAddrs, addr)
+ } else {
+ nonHTTPAddrs = append(nonHTTPAddrs, addr)
+ }
+ }
+
+ // Do we have an existing connection to this peer?
+ existingStreamConn := false
+ if server.ID != "" && h.StreamHost != nil {
+ existingStreamConn = len(h.StreamHost.Network().ConnsToPeer(server.ID)) > 0
+ }
+
+ // Currently the HTTP transport can not authenticate peer IDs.
+ if !options.serverMustAuthenticatePeerID && len(httpAddrs) > 0 && (options.preferHTTPTransport || (firstAddrIsHTTP && !existingStreamConn)) {
+ parsed, err := parseMultiaddr(httpAddrs[0])
+ if err != nil {
+ return nil, err
+ }
+ scheme := "http"
+ if parsed.useHTTPS {
+ scheme = "https"
+ }
+
+ h.initDefaultRT()
+ rt := h.DefaultClientRoundTripper
+ ownRoundtripper := false
+ if parsed.sni != parsed.host {
+ // We have a different host and SNI (e.g. using an IP address but specifying a SNI)
+ // We need to make our own transport to support this.
+ rt = rt.Clone()
+ rt.TLSClientConfig.ServerName = parsed.sni
+ ownRoundtripper = true
+ }
+
+ return &roundTripperForSpecificServer{
+ RoundTripper: rt,
+ ownRoundtripper: ownRoundtripper,
+ httpHost: h,
+ server: server.ID,
+ targetServerAddr: parsed.host + ":" + parsed.port,
+ sni: parsed.sni,
+ scheme: scheme,
+ }, nil
+ }
+
+ // Otherwise use a stream based transport
+ if h.StreamHost == nil {
+ return nil, fmt.Errorf("can not use the HTTP transport (either no address or PeerID auth is required), and no stream host provided")
+ }
+ if !existingStreamConn {
+ if server.ID == "" {
+ return nil, fmt.Errorf("can not use the HTTP transport, and no server peer ID provided")
+ }
+ }
+
+ return &streamRoundTripper{h: h.StreamHost, server: server.ID, serverAddrs: nonHTTPAddrs, httpHost: h}, nil
+}
+
+type explodedMultiaddr struct {
+ useHTTPS bool
+ host string
+ port string
+ sni string
+ httpPath string
+ peer peer.ID
+}
+
+func parseMultiaddr(addr ma.Multiaddr) (explodedMultiaddr, error) {
+ out := explodedMultiaddr{}
+ var err error
+ ma.ForEach(addr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_IP4, ma.P_IP6, ma.P_DNS, ma.P_DNS4, ma.P_DNS6:
+ out.host = c.Value()
+ case ma.P_TCP, ma.P_UDP:
+ out.port = c.Value()
+ case ma.P_TLS, ma.P_HTTPS:
+ out.useHTTPS = true
+ case ma.P_SNI:
+ out.sni = c.Value()
+ case ma.P_HTTP_PATH:
+ out.httpPath, err = url.QueryUnescape(c.Value())
+ if err == nil && out.httpPath[0] != '/' {
+ out.httpPath = "/" + out.httpPath
+ }
+ case ma.P_P2P:
+ out.peer, err = peer.Decode(c.Value())
+ }
+
+ // stop if there is an error, otherwise iterate over all components in case this is a circuit address
+ return err == nil
+ })
+
+ if out.useHTTPS && out.sni == "" {
+ out.sni = out.host
+ }
+
+ if out.httpPath == "" {
+ out.httpPath = "/"
+ }
+ return out, err
+}
+
+var httpComponent, _ = ma.NewComponent("http", "")
+var tlsComponent, _ = ma.NewComponent("tls", "")
+
+// normalizeHTTPMultiaddr converts an https multiaddr to a tls/http one.
+// Returns a bool indicating if the input multiaddr has an http (or https) component.
+func normalizeHTTPMultiaddr(addr ma.Multiaddr) (ma.Multiaddr, bool) {
+ isHTTPMultiaddr := false
+ beforeHTTPS, afterIncludingHTTPS := ma.SplitFunc(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_HTTP {
+ isHTTPMultiaddr = true
+ }
+
+ if c.Protocol().Code == ma.P_HTTPS {
+ isHTTPMultiaddr = true
+ return true
+ }
+ return false
+ })
+ if beforeHTTPS == nil || !isHTTPMultiaddr {
+ return addr, false
+ }
+
+ if afterIncludingHTTPS == nil {
+ // No HTTPS component, just return the original
+ return addr, isHTTPMultiaddr
+ }
+
+ _, afterHTTPS := ma.SplitFirst(afterIncludingHTTPS)
+ if afterHTTPS == nil {
+ return beforeHTTPS.AppendComponent(tlsComponent, httpComponent), isHTTPMultiaddr
+ }
+
+ t := beforeHTTPS.AppendComponent(tlsComponent, httpComponent)
+ t = append(t, afterHTTPS...)
+ return t, isHTTPMultiaddr
+}
+
+// getAndStorePeerMetadata looks up the protocol path in the well-known mapping and
+// returns it. Will only store the peer's protocol mapping if the server ID is
+// provided.
+func (h *Host) getAndStorePeerMetadata(ctx context.Context, roundtripper http.RoundTripper, server peer.ID) (PeerMeta, error) {
+ if h.peerMetadata == nil {
+ h.peerMetadata = newPeerMetadataCache()
+ }
+ if meta, ok := h.peerMetadata.Get(server); server != "" && ok {
+ return meta, nil
+ }
+
+ var meta PeerMeta
+ var err error
+ if h.EnableCompatibilityWithLegacyWellKnownEndpoint {
+ type metaAndErr struct {
+ m PeerMeta
+ err error
+ }
+ legacyRespCh := make(chan metaAndErr, 1)
+ wellKnownRespCh := make(chan metaAndErr, 1)
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ meta, err := requestPeerMeta(ctx, roundtripper, LegacyWellKnownProtocols)
+ legacyRespCh <- metaAndErr{meta, err}
+ }()
+ go func() {
+ meta, err := requestPeerMeta(ctx, roundtripper, WellKnownProtocols)
+ wellKnownRespCh <- metaAndErr{meta, err}
+ }()
+ select {
+ case resp := <-legacyRespCh:
+ if resp.err != nil {
+ resp = <-wellKnownRespCh
+ }
+ meta, err = resp.m, resp.err
+ case resp := <-wellKnownRespCh:
+ if resp.err != nil {
+ legacyResp := <-legacyRespCh
+ if legacyResp.err != nil {
+ // If both endpoints error, return the error from the well
+ // known resource (not the legacy well known resource)
+ meta, err = resp.m, resp.err
+ } else {
+ meta, err = legacyResp.m, legacyResp.err
+ }
+ } else {
+ meta, err = resp.m, resp.err
+ }
+ }
+ cancel()
+ } else {
+ meta, err = requestPeerMeta(ctx, roundtripper, WellKnownProtocols)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if server != "" {
+ h.peerMetadata.Add(server, meta)
+ }
+
+ return meta, nil
+}
+
+func requestPeerMeta(ctx context.Context, roundtripper http.RoundTripper, wellKnownResource string) (PeerMeta, error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", wellKnownResource, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Accept", "application/json")
+
+ client := http.Client{Transport: roundtripper}
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ meta := PeerMeta{}
+ err = json.NewDecoder(&io.LimitedReader{
+ R: resp.Body,
+ N: peerMetadataLimit,
+ }).Decode(&meta)
+ if err != nil {
+ return nil, err
+ }
+
+ return meta, nil
+}
+
+// SetPeerMetadata adds a peer's protocol metadata to the http host. Useful if
+// you have out-of-band knowledge of a peer's protocol mapping.
+func (h *Host) SetPeerMetadata(server peer.ID, meta PeerMeta) {
+ if h.peerMetadata == nil {
+ h.peerMetadata = newPeerMetadataCache()
+ }
+ h.peerMetadata.Add(server, meta)
+}
+
+// AddPeerMetadata merges the given peer's protocol metadata to the http host. Useful if
+// you have out-of-band knowledge of a peer's protocol mapping.
+func (h *Host) AddPeerMetadata(server peer.ID, meta PeerMeta) {
+ if h.peerMetadata == nil {
+ h.peerMetadata = newPeerMetadataCache()
+ }
+ origMeta, ok := h.peerMetadata.Get(server)
+ if !ok {
+ h.peerMetadata.Add(server, meta)
+ return
+ }
+ for proto, m := range meta {
+ origMeta[proto] = m
+ }
+ h.peerMetadata.Add(server, origMeta)
+}
+
+// GetPeerMetadata gets a peer's cached protocol metadata from the http host.
+func (h *Host) GetPeerMetadata(server peer.ID) (PeerMeta, bool) {
+ if h.peerMetadata == nil {
+ return nil, false
+ }
+ return h.peerMetadata.Get(server)
+}
+
+// RemovePeerMetadata removes a peer's protocol metadata from the http host
+func (h *Host) RemovePeerMetadata(server peer.ID) {
+ if h.peerMetadata == nil {
+ return
+ }
+ h.peerMetadata.Remove(server)
+}
+
+func connectionCloseHeaderMiddleware(next http.Handler) http.Handler {
+ // Sets connection: close. It's preferable to not reuse streams for HTTP.
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Connection", "close")
+ next.ServeHTTP(w, r)
+ })
+}
+
+// maybeDecorateContextWithAuth decorates the request context with
+// authentication information if serverAuth is provided.
+func maybeDecorateContextWithAuthMiddleware(serverAuth *httpauth.ServerPeerIDAuth, next http.Handler) http.Handler {
+ if next == nil {
+ return nil
+ }
+ if serverAuth == nil {
+ return next
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if httpauth.HasAuthHeader(r) {
+ serverAuth.ServeHTTPWithNextHandler(w, r, func(p peer.ID, w http.ResponseWriter, r *http.Request) {
+ r = r.WithContext(context.WithValue(r.Context(), clientPeerIDContextKey{}, p))
+ next.ServeHTTP(w, r)
+ })
+ }
+ })
+}
diff --git a/p2p/http/libp2phttp_test.go b/p2p/http/libp2phttp_test.go
new file mode 100644
index 0000000000..5737c28ce3
--- /dev/null
+++ b/p2p/http/libp2phttp_test.go
@@ -0,0 +1,1148 @@
+package libp2phttp_test
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/big"
+ "net"
+ "net/http"
+ "net/netip"
+ "net/url"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ host "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ libp2phttp "github.com/libp2p/go-libp2p/p2p/http"
+ httpauth "github.com/libp2p/go-libp2p/p2p/http/auth"
+ httpping "github.com/libp2p/go-libp2p/p2p/http/ping"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHTTPOverStreams(t *testing.T) {
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ httpHost := libp2phttp.Host{StreamHost: serverHost}
+
+ httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+
+ // Start server
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ // Start client
+ clientHost, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ clientHost.Connect(context.Background(), peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverHost.Addrs(),
+ })
+
+ clientRT, err := (&libp2phttp.Host{StreamHost: clientHost}).NewConstrainedRoundTripper(peer.AddrInfo{ID: serverHost.ID()})
+ require.NoError(t, err)
+
+ client := &http.Client{Transport: clientRT}
+
+ resp, err := client.Get("/hello")
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t, "hello", string(body))
+}
+
+func TestHTTPOverStreamsSendsConnectionClose(t *testing.T) {
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ httpHost := libp2phttp.Host{StreamHost: serverHost}
+
+ connectionHeaderVal := make(chan string, 1)
+ httpHost.SetHTTPHandlerAtPath("/hello", "/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("hello"))
+ connectionHeaderVal <- r.Header.Get("Connection")
+ }))
+
+ // Start server
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ // run client
+ clientHost, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ clientHost.Connect(context.Background(), peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverHost.Addrs(),
+ })
+ clientHttpHost := libp2phttp.Host{StreamHost: clientHost}
+ rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{ID: serverHost.ID()})
+ require.NoError(t, err)
+ client := &http.Client{Transport: rt}
+ _, err = client.Get("/")
+ require.NoError(t, err)
+
+ select {
+ case val := <-connectionHeaderVal:
+ require.Equal(t, "close", strings.ToLower(val))
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for connection header")
+ }
+}
+
+func TestHTTPOverStreamsContextAndClientTimeout(t *testing.T) {
+ const clientTimeout = 200 * time.Millisecond
+
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ httpHost := libp2phttp.Host{StreamHost: serverHost}
+ httpHost.SetHTTPHandler("/hello/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ time.Sleep(2 * clientTimeout)
+ w.Write([]byte("hello"))
+ }))
+
+ // Start server
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ // Start client
+ clientHost, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ clientHost.Connect(context.Background(), peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverHost.Addrs(),
+ })
+
+ clientRT, err := (&libp2phttp.Host{StreamHost: clientHost}).NewConstrainedRoundTripper(peer.AddrInfo{ID: serverHost.ID()})
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), clientTimeout)
+ defer cancel()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/hello/", nil)
+ require.NoError(t, err)
+
+ client := &http.Client{Transport: clientRT}
+ _, err = client.Do(req)
+ require.Error(t, err)
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+ t.Log("OK, deadline exceeded waiting for response as expected")
+
+ // Make another request, this time using http.Client.Timeout.
+ clientRT, err = (&libp2phttp.Host{StreamHost: clientHost}).NewConstrainedRoundTripper(peer.AddrInfo{ID: serverHost.ID()})
+ require.NoError(t, err)
+
+ client = &http.Client{
+ Transport: clientRT,
+ Timeout: clientTimeout,
+ }
+
+ _, err = client.Get("/hello/")
+ require.Error(t, err)
+ var uerr *url.Error
+ require.ErrorAs(t, err, &uerr)
+ require.True(t, uerr.Timeout())
+ t.Log("OK, timed out waiting for response as expected")
+}
+
+func TestHTTPOverStreamsReturnsConnectionClose(t *testing.T) {
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ httpHost := libp2phttp.Host{StreamHost: serverHost}
+
+ httpHost.SetHTTPHandlerAtPath("/hello", "/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+
+ // Start server
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ // Start client
+ clientHost, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ clientHost.Connect(context.Background(), peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverHost.Addrs(),
+ })
+
+ s, err := clientHost.NewStream(context.Background(), serverHost.ID(), libp2phttp.ProtocolIDForMultistreamSelect)
+ require.NoError(t, err)
+ _, err = s.Write([]byte("GET / HTTP/1.1\r\nHost: \r\n\r\n"))
+ require.NoError(t, err)
+
+ out := make([]byte, 1024)
+ n, err := s.Read(out)
+ if err != io.EOF {
+ require.NoError(t, err)
+ }
+
+ require.Contains(t, strings.ToLower(string(out[:n])), "connection: close")
+}
+
+func TestRoundTrippers(t *testing.T) {
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ httpHost := libp2phttp.Host{
+ InsecureAllowHTTP: true,
+ StreamHost: serverHost,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+
+ // Start stream based server
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ serverMultiaddrs := httpHost.Addrs()
+ serverHTTPAddr := serverMultiaddrs[1]
+
+ testCases := []struct {
+ name string
+ setupRoundTripper func(t *testing.T, clientStreamHost host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper
+ expectStreamRoundTripper bool
+ }{
+ {
+ name: "HTTP preferred",
+ setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
+ rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverMultiaddrs,
+ }, libp2phttp.PreferHTTPTransport)
+ require.NoError(t, err)
+ return rt
+ },
+ },
+ {
+ name: "HTTP first",
+ setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
+ rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: []ma.Multiaddr{serverHTTPAddr, serverHost.Addrs()[0]},
+ })
+ require.NoError(t, err)
+ return rt
+ },
+ },
+ {
+ name: "No HTTP transport",
+ setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
+ rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: []ma.Multiaddr{serverHost.Addrs()[0]},
+ })
+ require.NoError(t, err)
+ return rt
+ },
+ expectStreamRoundTripper: true,
+ },
+ {
+ name: "Stream transport first",
+ setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
+ rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: []ma.Multiaddr{serverHost.Addrs()[0], serverHTTPAddr},
+ })
+ require.NoError(t, err)
+ return rt
+ },
+ expectStreamRoundTripper: true,
+ },
+ {
+ name: "Existing stream transport connection",
+ setupRoundTripper: func(t *testing.T, clientStreamHost host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
+ clientStreamHost.Connect(context.Background(), peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverHost.Addrs(),
+ })
+ rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: []ma.Multiaddr{serverHTTPAddr, serverHost.Addrs()[0]},
+ })
+ require.NoError(t, err)
+ return rt
+ },
+ expectStreamRoundTripper: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // Start client
+ clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ defer clientStreamHost.Close()
+
+ clientHttpHost := &libp2phttp.Host{StreamHost: clientStreamHost}
+
+ rt := tc.setupRoundTripper(t, clientStreamHost, clientHttpHost)
+ if tc.expectStreamRoundTripper {
+ // Hack to get the private type of this roundtripper
+ typ := reflect.TypeOf(rt).String()
+ require.Contains(t, typ, "streamRoundTripper", "Expected stream based round tripper")
+ }
+
+ for _, tc := range []bool{true, false} {
+ name := ""
+ if tc {
+ name = "with namespaced roundtripper"
+ }
+ t.Run(name, func(t *testing.T) {
+ var resp *http.Response
+ var err error
+ if tc {
+ var h libp2phttp.Host
+ require.NoError(t, err)
+ nrt, err := h.NamespaceRoundTripper(rt, "/hello", serverHost.ID())
+ require.NoError(t, err)
+ client := &http.Client{Transport: nrt}
+ resp, err = client.Get("/")
+ require.NoError(t, err)
+ } else {
+ client := &http.Client{Transport: rt}
+ resp, err = client.Get("/hello/")
+ require.NoError(t, err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, "hello", string(body))
+ })
+ }
+
+ // Read the well-known resource
+ wk, err := rt.(libp2phttp.PeerMetadataGetter).GetPeerMetadata()
+ require.NoError(t, err)
+
+ expectedMap := make(libp2phttp.PeerMeta)
+ expectedMap["/hello"] = libp2phttp.ProtocolMeta{Path: "/hello/"}
+ require.Equal(t, expectedMap, wk)
+ })
+ }
+}
+
+func TestPlainOldHTTPServer(t *testing.T) {
+ mux := http.NewServeMux()
+ wk := libp2phttp.WellKnownHandler{}
+ mux.Handle(libp2phttp.WellKnownProtocols, &wk)
+
+ mux.Handle("/ping/", httpping.Ping{})
+ wk.AddProtocolMeta(httpping.PingProtocolID, libp2phttp.ProtocolMeta{Path: "/ping/"})
+
+ server := &http.Server{Addr: "127.0.0.1:0", Handler: mux}
+
+ l, err := net.Listen("tcp", server.Addr)
+ require.NoError(t, err)
+
+ go server.Serve(l)
+ defer server.Close()
+
+ // That's all for the server, now the client:
+
+ serverAddrParts := strings.Split(l.Addr().String(), ":")
+
+ testCases := []struct {
+ name string
+ do func(*testing.T, *http.Request) (*http.Response, error)
+ getWellKnown func(*testing.T) (libp2phttp.PeerMeta, error)
+ }{
+ {
+ name: "using libp2phttp",
+ do: func(t *testing.T, request *http.Request) (*http.Response, error) {
+ var clientHttpHost libp2phttp.Host
+ rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/" + serverAddrParts[1] + "/http")}})
+ require.NoError(t, err)
+
+ client := &http.Client{Transport: rt}
+ return client.Do(request)
+ },
+ getWellKnown: func(t *testing.T) (libp2phttp.PeerMeta, error) {
+ var clientHttpHost libp2phttp.Host
+ rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/" + serverAddrParts[1] + "/http")}})
+ require.NoError(t, err)
+ return rt.(libp2phttp.PeerMetadataGetter).GetPeerMetadata()
+ },
+ },
+ {
+ name: "using stock http client",
+ do: func(_ *testing.T, request *http.Request) (*http.Response, error) {
+ request.URL.Scheme = "http"
+ request.URL.Host = l.Addr().String()
+ request.Host = l.Addr().String()
+
+ client := http.Client{}
+ return client.Do(request)
+ },
+ getWellKnown: func(t *testing.T) (libp2phttp.PeerMeta, error) {
+ client := http.Client{}
+ resp, err := client.Get("http://" + l.Addr().String() + libp2phttp.WellKnownProtocols)
+ require.NoError(t, err)
+
+ b, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ var out libp2phttp.PeerMeta
+ err = json.Unmarshal(b, &out)
+ return out, err
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ body := [32]byte{}
+ _, err = rand.Reader.Read(body[:])
+ require.NoError(t, err)
+ req, err := http.NewRequest(http.MethodPost, "/ping/", bytes.NewReader(body[:]))
+ require.NoError(t, err)
+ resp, err := tc.do(t, req)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ rBody := [32]byte{}
+ _, err = io.ReadFull(resp.Body, rBody[:])
+ require.NoError(t, err)
+ require.Equal(t, body, rBody)
+
+ // Make sure we can get the well known resource
+ protoMap, err := tc.getWellKnown(t)
+ require.NoError(t, err)
+
+ expectedMap := make(libp2phttp.PeerMeta)
+ expectedMap[httpping.PingProtocolID] = libp2phttp.ProtocolMeta{Path: "/ping/"}
+ require.Equal(t, expectedMap, protoMap)
+ })
+ }
+}
+
+func TestHostZeroValue(t *testing.T) {
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+ server.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte("hello")) }))
+ go func() {
+ server.Serve()
+ }()
+ defer server.Close()
+
+ c := libp2phttp.Host{}
+ client, err := c.NamespacedClient("/hello", peer.AddrInfo{Addrs: server.Addrs()})
+ require.NoError(t, err)
+ resp, err := client.Get("/")
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t, "hello", string(body), "expected response from server")
+}
+
+func TestHTTPS(t *testing.T) {
+ server := libp2phttp.Host{
+ TLSConfig: selfSignedTLSConfig(t),
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/https")},
+ }
+ server.SetHTTPHandler(httpping.PingProtocolID, httpping.Ping{})
+ go func() {
+ server.Serve()
+ }()
+ defer server.Close()
+
+ clientTransport := http.DefaultTransport.(*http.Transport).Clone()
+ clientTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+ client := libp2phttp.Host{
+ DefaultClientRoundTripper: clientTransport,
+ }
+ httpClient, err := client.NamespacedClient(httpping.PingProtocolID, peer.AddrInfo{Addrs: server.Addrs()})
+ require.NoError(t, err)
+ err = httpping.SendPing(httpClient)
+ require.NoError(t, err)
+}
+
+func selfSignedTLSConfig(t *testing.T) *tls.Config {
+ t.Helper()
+ priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+
+ notBefore := time.Now()
+ notAfter := notBefore.Add(365 * 24 * time.Hour)
+
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ require.NoError(t, err)
+
+ certTemplate := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ Organization: []string{"Test"},
+ },
+ NotBefore: notBefore,
+ NotAfter: notAfter,
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &priv.PublicKey, priv)
+ require.NoError(t, err)
+
+ cert := tls.Certificate{
+ Certificate: [][]byte{derBytes},
+ PrivateKey: priv,
+ }
+
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+ return tlsConfig
+}
+
+func TestCustomServeMux(t *testing.T) {
+ serveMux := http.NewServeMux()
+ serveMux.Handle("/ping/", httpping.Ping{})
+
+ server := libp2phttp.Host{
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ ServeMux: serveMux,
+ InsecureAllowHTTP: true,
+ }
+ server.WellKnownHandler.AddProtocolMeta(httpping.PingProtocolID, libp2phttp.ProtocolMeta{Path: "/ping/"})
+ go func() {
+ server.Serve()
+ }()
+ defer server.Close()
+
+ addrs := server.Addrs()
+ require.Len(t, addrs, 1)
+ var clientHttpHost libp2phttp.Host
+ rt, err := clientHttpHost.NewConstrainedRoundTripper(peer.AddrInfo{Addrs: addrs}, libp2phttp.PreferHTTPTransport)
+ require.NoError(t, err)
+
+ client := &http.Client{Transport: rt}
+ body := [32]byte{}
+ req, _ := http.NewRequest(http.MethodPost, "/ping/", bytes.NewReader(body[:]))
+ resp, err := client.Do(req)
+ require.NoError(t, err)
+ require.Equal(t, 200, resp.StatusCode)
+}
+
+func TestSetHandlerAtPath(t *testing.T) {
+ hf := func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Add("Content-Type", "text/plain")
+ w.Write([]byte("Hello World"))
+ }
+ tests := []struct {
+ prefix, rest string
+ paths200 []string
+ paths404 []string
+ }{
+ {
+ prefix: "/",
+ rest: "/",
+ paths200: []string{"/", "/a/", "/b", "/a/b"},
+ },
+ {
+ prefix: "/a",
+ rest: "/b/",
+ paths200: []string{"/a/b/", "///a///b/", "/a/b/c"},
+ // Not being able to serve /a/b when handling /a/b/ is a rather annoying limitation
+ // of http.StripPrefix mechanism. This happens because /a/b is redirected to /b/
+ // as the prefix /a is stripped when the redirect happens
+ paths404: []string{"/a/b", "/a", "/b", "/a/a"},
+ },
+ {
+ prefix: "/",
+ rest: "/b/",
+ paths200: []string{"/b", "/b/c", "/b/c/"},
+ paths404: []string{"/", "/a/b"},
+ },
+ }
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ nestedMx := http.NewServeMux()
+ nestedMx.HandleFunc(tc.rest, hf)
+ server := libp2phttp.Host{
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ InsecureAllowHTTP: true,
+ }
+ server.SetHTTPHandlerAtPath("test", tc.prefix, nestedMx)
+ go func() {
+ server.Serve()
+ }()
+ defer server.Close()
+ addrs := server.Addrs()
+ require.Len(t, addrs, 1)
+ port, err := addrs[0].ValueForProtocol(ma.P_TCP)
+ require.NoError(t, err)
+ httpAddr := fmt.Sprintf("http://127.0.0.1:%s", port)
+ for _, p := range tc.paths200 {
+ resp, err := http.Get(httpAddr + p)
+ require.NoError(t, err)
+ require.Equal(t, 200, resp.StatusCode, "path:%s", p)
+ resp.Body.Close()
+ }
+ for _, p := range tc.paths404 {
+ resp, _ := http.Get(httpAddr + p)
+ require.Equal(t, 404, resp.StatusCode, "path:%s", p)
+ resp.Body.Close()
+ }
+ })
+ }
+}
+
+func TestServerLegacyWellKnownResource(t *testing.T) {
+ mkHTTPServer := func(wellKnown string) ma.Multiaddr {
+ mux := http.NewServeMux()
+ wk := libp2phttp.WellKnownHandler{}
+ mux.Handle(wellKnown, &wk)
+
+ mux.Handle("/ping/", httpping.Ping{})
+ wk.AddProtocolMeta(httpping.PingProtocolID, libp2phttp.ProtocolMeta{Path: "/ping/"})
+
+ server := &http.Server{Addr: "127.0.0.1:0", Handler: mux}
+
+ l, err := net.Listen("tcp", server.Addr)
+ require.NoError(t, err)
+
+ go server.Serve(l)
+ t.Cleanup(func() { server.Close() })
+ addrPort, err := netip.ParseAddrPort(l.Addr().String())
+ require.NoError(t, err)
+ return ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%d/http", addrPort.Addr().String(), addrPort.Port()))
+ }
+
+ mkServerlibp2phttp := func(enableLegacyWellKnown bool) ma.Multiaddr {
+ server := libp2phttp.Host{
+ EnableCompatibilityWithLegacyWellKnownEndpoint: enableLegacyWellKnown,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ InsecureAllowHTTP: true,
+ }
+ server.SetHTTPHandler(httpping.PingProtocolID, httpping.Ping{})
+ go server.Serve()
+ t.Cleanup(func() { server.Close() })
+ return server.Addrs()[0]
+ }
+
+ type testCase struct {
+ name string
+ client libp2phttp.Host
+ serverAddr ma.Multiaddr
+ expectErr bool
+ }
+
+ var testCases = []testCase{
+ {
+ name: "legacy server, client with compat",
+ client: libp2phttp.Host{EnableCompatibilityWithLegacyWellKnownEndpoint: true},
+ serverAddr: mkHTTPServer(libp2phttp.LegacyWellKnownProtocols),
+ },
+ {
+ name: "up-to-date http server, client with compat",
+ client: libp2phttp.Host{EnableCompatibilityWithLegacyWellKnownEndpoint: true},
+ serverAddr: mkHTTPServer(libp2phttp.WellKnownProtocols),
+ },
+ {
+ name: "up-to-date http server, client without compat",
+ client: libp2phttp.Host{},
+ serverAddr: mkHTTPServer(libp2phttp.WellKnownProtocols),
+ },
+ {
+ name: "libp2phttp server with compat, client with compat",
+ client: libp2phttp.Host{EnableCompatibilityWithLegacyWellKnownEndpoint: true},
+ serverAddr: mkServerlibp2phttp(true),
+ },
+ {
+ name: "libp2phttp server without compat, client with compat",
+ client: libp2phttp.Host{EnableCompatibilityWithLegacyWellKnownEndpoint: true},
+ serverAddr: mkServerlibp2phttp(false),
+ },
+ {
+ name: "libp2phttp server with compat, client without compat",
+ client: libp2phttp.Host{},
+ serverAddr: mkServerlibp2phttp(true),
+ },
+ {
+ name: "legacy server, client without compat",
+ client: libp2phttp.Host{},
+ serverAddr: mkHTTPServer(libp2phttp.LegacyWellKnownProtocols),
+ expectErr: true,
+ },
+ }
+
+ for i := range testCases {
+ tc := &testCases[i] // to not copy the lock in libp2phttp.Host
+ t.Run(tc.name, func(t *testing.T) {
+ if tc.expectErr {
+ _, err := tc.client.NamespacedClient(httpping.PingProtocolID, peer.AddrInfo{Addrs: []ma.Multiaddr{tc.serverAddr}})
+ require.Error(t, err)
+ return
+ }
+ httpClient, err := tc.client.NamespacedClient(httpping.PingProtocolID, peer.AddrInfo{Addrs: []ma.Multiaddr{tc.serverAddr}})
+ require.NoError(t, err)
+
+ err = httpping.SendPing(httpClient)
+ require.NoError(t, err)
+ })
+ }
+
+}
+
+func TestResponseWriterShouldNotHaveCancelledContext(t *testing.T) {
+ h, err := libp2p.New()
+ require.NoError(t, err)
+ defer h.Close()
+ httpHost := libp2phttp.Host{StreamHost: h}
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ closeNotifyCh := make(chan bool, 1)
+ httpHost.SetHTTPHandlerAtPath("/test", "/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ // Legacy code uses this to check if the connection was closed
+ //lint:ignore SA1019 This is a test to assert we do the right thing since Go HTTP stdlib depends on this.
+ ch := w.(http.CloseNotifier).CloseNotify()
+ select {
+ case <-ch:
+ closeNotifyCh <- true
+ case <-time.After(100 * time.Millisecond):
+ closeNotifyCh <- false
+ }
+ w.WriteHeader(http.StatusOK)
+ }))
+
+ clientH, err := libp2p.New()
+ require.NoError(t, err)
+ defer clientH.Close()
+ clientHost := libp2phttp.Host{StreamHost: clientH}
+
+ rt, err := clientHost.NewConstrainedRoundTripper(peer.AddrInfo{ID: h.ID(), Addrs: h.Addrs()})
+ require.NoError(t, err)
+ httpClient := &http.Client{Transport: rt}
+ _, err = httpClient.Get("/")
+ require.NoError(t, err)
+
+ require.False(t, <-closeNotifyCh)
+}
+
+func TestHTTPHostAsRoundTripper(t *testing.T) {
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ serverHttpHost := libp2phttp.Host{
+ InsecureAllowHTTP: true,
+ StreamHost: serverHost,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ serverHttpHost.SetHTTPHandlerAtPath("/hello", "/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ w.Write([]byte("hello"))
+ }))
+
+ // Different protocol.ID and mounted at a different path
+ serverHttpHost.SetHTTPHandlerAtPath("/hello-again", "/hello2", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+
+ go serverHttpHost.Serve()
+ defer serverHttpHost.Close()
+
+ httpPathSuffix := "/http-path/hello2"
+ var testCases []string
+ for _, a := range serverHttpHost.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_HTTP); err == nil {
+ testCases = append(testCases, "multiaddr:"+a.String())
+ testCases = append(testCases, "multiaddr:"+a.String()+httpPathSuffix)
+ serverPort, err := a.ValueForProtocol(ma.P_TCP)
+ require.NoError(t, err)
+ testCases = append(testCases, "http://127.0.0.1:"+serverPort)
+ } else {
+ testCases = append(testCases, "multiaddr:"+a.String()+"/p2p/"+serverHost.ID().String())
+ testCases = append(testCases, "multiaddr:"+a.String()+"/p2p/"+serverHost.ID().String()+httpPathSuffix)
+ }
+ }
+
+ clientStreamHost, err := libp2p.New()
+ require.NoError(t, err)
+ defer clientStreamHost.Close()
+
+ clientHttpHost := libp2phttp.Host{StreamHost: clientStreamHost}
+ client := http.Client{Transport: &clientHttpHost}
+ for _, tc := range testCases {
+ t.Run(tc, func(t *testing.T) {
+ resp, err := client.Get(tc)
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, "hello", string(body))
+ })
+ }
+}
+
+func TestHTTPHostAsRoundTripperFailsWhenNoStreamHostPresent(t *testing.T) {
+ clientHttpHost := libp2phttp.Host{}
+ client := http.Client{Transport: &clientHttpHost}
+
+ _, err := client.Get("multiaddr:/ip4/127.0.0.1/udp/1111/quic-v1")
+ // Fails because we don't have a stream host available to make the request
+ require.Error(t, err)
+ require.ErrorContains(t, err, "Missing StreamHost")
+}
+
+// TestRedirects tests a client being redirected through multiple HTTP redirects
+func TestRedirects(t *testing.T) {
+ serverHost, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ serverHttpHost := libp2phttp.Host{
+ StreamHost: serverHost,
+ InsecureAllowHTTP: true,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+ go serverHttpHost.Serve()
+ defer serverHttpHost.Close()
+
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/a", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Location", "/b/")
+ w.WriteHeader(http.StatusMovedPermanently)
+ }))
+
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-2/0.0.1", "/b", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Location", "/c/")
+ w.WriteHeader(http.StatusMovedPermanently)
+ }))
+
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-3/0.0.1", "/c", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Location", "/d/")
+ w.WriteHeader(http.StatusMovedPermanently)
+ }))
+
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-4/0.0.1", "/d", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/foo/bar/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Location", "../baz/")
+ w.WriteHeader(http.StatusMovedPermanently)
+ }))
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/foo/baz/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Write([]byte("hello"))
+ }))
+
+ clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs, libp2p.Transport(libp2pquic.NewTransport))
+ require.NoError(t, err)
+ client := http.Client{Transport: &libp2phttp.Host{StreamHost: clientStreamHost}}
+
+ type testCase struct {
+ initialURI string
+ expectedURI string
+ }
+ var testCases []testCase
+ for _, a := range serverHttpHost.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_HTTP); err == nil {
+ port, err := a.ValueForProtocol(ma.P_TCP)
+ require.NoError(t, err)
+ u := fmt.Sprintf("multiaddr:%s/http-path/a%%2f", a)
+ f := fmt.Sprintf("http://127.0.0.1:%s/d/", port)
+ testCases = append(testCases, testCase{u, f})
+
+ u = fmt.Sprintf("multiaddr:%s/http-path/foo%%2Fbar", a)
+ f = fmt.Sprintf("http://127.0.0.1:%s/foo/baz/", port)
+ testCases = append(testCases, testCase{u, f})
+ } else {
+ u := fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/a%%2f", a, serverHost.ID())
+ f := fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/d%%2F", a, serverHost.ID())
+ testCases = append(testCases, testCase{u, f})
+
+ u = fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/foo%%2Fbar", a, serverHost.ID())
+ f = fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/foo%%2Fbaz%%2F", a, serverHost.ID())
+ testCases = append(testCases, testCase{u, f})
+ }
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.initialURI, func(t *testing.T) {
+ resp, err := client.Get(tc.initialURI)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+ require.Equal(t, "hello", string(body))
+
+ finalReqURL := *resp.Request.URL
+ finalReqURL.Opaque = "" // Clear the opaque so we can compare the URI
+ require.Equal(t, tc.expectedURI, finalReqURL.String())
+ })
+ }
+}
+
+// TestMultiaddrURIRedirect tests that we can redirect using a multiaddr URI. We
+// redirect from the http transport to the stream based transport
+func TestMultiaddrURIRedirect(t *testing.T) {
+ serverHost, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ serverHttpHost := libp2phttp.Host{
+ StreamHost: serverHost,
+ InsecureAllowHTTP: true,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+ go serverHttpHost.Serve()
+ defer serverHttpHost.Close()
+
+ var httpMultiaddr ma.Multiaddr
+ var streamMultiaddr ma.Multiaddr
+ for _, a := range serverHttpHost.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_HTTP); err == nil {
+ httpMultiaddr = a
+ } else {
+ streamMultiaddr = a
+ }
+ }
+ require.NotNil(t, httpMultiaddr)
+ require.NotNil(t, streamMultiaddr)
+
+ // Redirect to a whole other transport!
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/a", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.Header().Set("Location", fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/b", streamMultiaddr, serverHost.ID()))
+ w.WriteHeader(http.StatusMovedPermanently)
+ }))
+
+ serverHttpHost.SetHTTPHandlerAtPath("/redirect-2/0.0.1", "/b", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ }))
+
+ clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs, libp2p.Transport(libp2pquic.NewTransport))
+ require.NoError(t, err)
+ client := http.Client{Transport: &libp2phttp.Host{StreamHost: clientStreamHost}}
+
+ resp, err := client.Get(fmt.Sprintf("multiaddr:%s/http-path/a", httpMultiaddr))
+ require.NoError(t, err)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ require.True(t, strings.HasPrefix(resp.Request.URL.RawPath, streamMultiaddr.String()), "expected redirect to stream transport")
+}
+
+func TestImpliedHostIsSet(t *testing.T) {
+ serverHost, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ serverHttpHost := libp2phttp.Host{
+ StreamHost: serverHost,
+ InsecureAllowHTTP: true,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+ go serverHttpHost.Serve()
+ defer serverHttpHost.Close()
+
+ serverHttpHost.SetHTTPHandlerAtPath("/hi", "/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if strings.HasPrefix(r.Host, "localhost") && r.URL.Path == "/" {
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ clientStreamHost, err := libp2p.New(libp2p.NoListenAddrs, libp2p.Transport(libp2pquic.NewTransport))
+ require.NoError(t, err)
+ client := http.Client{Transport: &libp2phttp.Host{StreamHost: clientStreamHost}}
+
+ type testCase struct {
+ uri string
+ }
+ var testCases []testCase
+ for _, a := range serverHttpHost.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_HTTP); err == nil {
+ port, err := a.ValueForProtocol(ma.P_TCP)
+ require.NoError(t, err)
+ u := fmt.Sprintf("multiaddr:/dns/localhost/tcp/%s/http", port)
+ testCases = append(testCases, testCase{u})
+ } else {
+ port, err := a.ValueForProtocol(ma.P_UDP)
+ require.NoError(t, err)
+ u := fmt.Sprintf("multiaddr:/dns/localhost/udp/%s/quic-v1/p2p/%s", port, serverHost.ID())
+ testCases = append(testCases, testCase{u})
+ }
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.uri, func(t *testing.T) {
+ resp, err := client.Get(tc.uri)
+ require.NoError(t, err)
+ defer resp.Body.Close()
+ require.Equal(t, http.StatusOK, resp.StatusCode)
+ })
+ }
+
+}
+
+func TestErrServerClosed(t *testing.T) {
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ }
+
+ done := make(chan struct{})
+ go func() {
+ err := server.Serve()
+ assert.Equal(t, http.ErrServerClosed, err)
+ close(done)
+ }()
+
+ server.Close()
+ <-done
+}
+
+func TestHTTPOverStreamsGetClientID(t *testing.T) {
+ serverHost, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ require.NoError(t, err)
+
+ httpHost := libp2phttp.Host{StreamHost: serverHost}
+
+ httpHost.SetHTTPHandler("/echo-id", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ clientID := libp2phttp.ClientPeerID(r)
+ w.Write([]byte(clientID.String()))
+ }))
+
+ // Start server
+ go httpHost.Serve()
+ defer httpHost.Close()
+
+ // Start client
+ clientHost, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ clientHost.Connect(context.Background(), peer.AddrInfo{
+ ID: serverHost.ID(),
+ Addrs: serverHost.Addrs(),
+ })
+
+ client := http.Client{
+ Transport: &libp2phttp.Host{StreamHost: clientHost},
+ }
+ require.NoError(t, err)
+
+ resp, err := client.Get("multiaddr:" + serverHost.Addrs()[0].String() + "/p2p/" + serverHost.ID().String() + "/http-path/echo-id")
+ require.NoError(t, err)
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t, clientHost.ID().String(), string(body))
+}
+
+func TestAuthenticatedRequest(t *testing.T) {
+ serverSK, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ serverID, err := peer.IDFromPrivateKey(serverSK)
+ require.NoError(t, err)
+
+ serverStreamHost, err := libp2p.New(
+ libp2p.Identity(serverSK),
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ libp2p.Transport(libp2pquic.NewTransport),
+ )
+ require.NoError(t, err)
+
+ server := libp2phttp.Host{
+ InsecureAllowHTTP: true,
+ StreamHost: serverStreamHost,
+ ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
+ ServerPeerIDAuth: &httpauth.ServerPeerIDAuth{
+ TokenTTL: time.Hour,
+ PrivKey: serverSK,
+ NoTLS: true,
+ ValidHostnameFn: func(hostname string) bool {
+ return strings.HasPrefix(hostname, "127.0.0.1")
+ },
+ },
+ }
+ server.SetHTTPHandler("/echo-id", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ clientID := libp2phttp.ClientPeerID(r)
+ w.Write([]byte(clientID.String()))
+ }))
+
+ go server.Serve()
+
+ clientSK, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+
+ clientStreamHost, err := libp2p.New(
+ libp2p.Identity(clientSK),
+ libp2p.NoListenAddrs,
+ libp2p.Transport(libp2pquic.NewTransport))
+ require.NoError(t, err)
+
+ client := &http.Client{
+ Transport: &libp2phttp.Host{
+ StreamHost: clientStreamHost,
+ ClientPeerIDAuth: &httpauth.ClientPeerIDAuth{
+ TokenTTL: time.Hour,
+ PrivKey: clientSK,
+ },
+ },
+ }
+
+ clientID, err := peer.IDFromPrivateKey(clientSK)
+ require.NoError(t, err)
+
+ for _, serverAddr := range server.Addrs() {
+ _, tpt := ma.SplitLast(serverAddr)
+ t.Run(tpt.String(), func(t *testing.T) {
+ url := fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/echo-id", serverAddr, serverID)
+ t.Log("Making a GET request to:", url)
+ resp, err := client.Get(url)
+ require.NoError(t, err)
+
+ observedServerID := libp2phttp.ServerPeerID(resp)
+ require.Equal(t, serverID, observedServerID)
+
+ body, err := io.ReadAll(resp.Body)
+ require.NoError(t, err)
+
+ require.Equal(t, clientID.String(), string(body))
+ })
+ }
+}
diff --git a/p2p/http/options.go b/p2p/http/options.go
new file mode 100644
index 0000000000..0062e59319
--- /dev/null
+++ b/p2p/http/options.go
@@ -0,0 +1,24 @@
+package libp2phttp
+
+type RoundTripperOption func(o roundTripperOpts) roundTripperOpts
+
+type roundTripperOpts struct {
+ preferHTTPTransport bool
+ serverMustAuthenticatePeerID bool
+}
+
+// PreferHTTPTransport tells the roundtripper constructor to prefer using an
+// HTTP transport (as opposed to a libp2p stream transport). Useful, for
+// example, if you want to attempt to leverage HTTP caching.
+func PreferHTTPTransport(o roundTripperOpts) roundTripperOpts {
+ o.preferHTTPTransport = true
+ return o
+}
+
+// ServerMustAuthenticatePeerID tells the roundtripper constructor that we MUST
+// authenticate the Server's PeerID. Note: this currently means we can not use a
+// native HTTP transport (HTTP peer id authentication is not yet implemented: https://github.com/libp2p/specs/pull/564).
+func ServerMustAuthenticatePeerID(o roundTripperOpts) roundTripperOpts {
+ o.serverMustAuthenticatePeerID = true
+ return o
+}
diff --git a/p2p/http/ping/ping.go b/p2p/http/ping/ping.go
new file mode 100644
index 0000000000..2c2ad80fbf
--- /dev/null
+++ b/p2p/http/ping/ping.go
@@ -0,0 +1,67 @@
+package httpping
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+)
+
+const pingSize = 32
+const PingProtocolID = "/http-ping/1"
+
+type Ping struct{}
+
+var _ http.Handler = Ping{}
+
+// ServeHTTP implements http.Handler.
+func (Ping) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ body := [pingSize]byte{}
+ _, err := io.ReadFull(r.Body, body[:])
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(pingSize))
+ w.Write(body[:])
+}
+
+// SendPing send an ping request over HTTP. The provided client should be namespaced to the Ping protocol.
+func SendPing(client http.Client) error {
+ body := [pingSize]byte{}
+ _, err := io.ReadFull(rand.Reader, body[:])
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("POST", "/", bytes.NewReader(body[:]))
+ req.Header.Set("Content-Type", "application/octet-stream")
+ req.Header.Set("Content-Length", strconv.Itoa(pingSize))
+ if err != nil {
+ return err
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ rBody := [pingSize]byte{}
+ _, err = io.ReadFull(resp.Body, rBody[:])
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(body[:], rBody[:]) {
+ return errors.New("ping body mismatch")
+ }
+ return nil
+}
diff --git a/p2p/metricshelper/conn.go b/p2p/metricshelper/conn.go
new file mode 100644
index 0000000000..28050eb2f4
--- /dev/null
+++ b/p2p/metricshelper/conn.go
@@ -0,0 +1,37 @@
+package metricshelper
+
+import ma "github.com/multiformats/go-multiaddr"
+
+var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBRTC_DIRECT, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP}
+
+func GetTransport(a ma.Multiaddr) string {
+ if a == nil {
+ return "other"
+ }
+ for i := len(a) - 1; i >= 0; i-- {
+ p := a[i].Protocol()
+ for _, t := range transports {
+ if p.Code == t {
+ return p.Name
+ }
+ }
+ }
+ return "other"
+}
+
+func GetIPVersion(addr ma.Multiaddr) string {
+ version := "unknown"
+ if addr == nil {
+ return version
+ }
+ ma.ForEach(addr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_IP4, ma.P_DNS4:
+ version = "ip4"
+ case ma.P_IP6, ma.P_DNS6:
+ version = "ip6"
+ }
+ return false
+ })
+ return version
+}
diff --git a/p2p/metricshelper/conn_test.go b/p2p/metricshelper/conn_test.go
new file mode 100644
index 0000000000..0650bfec27
--- /dev/null
+++ b/p2p/metricshelper/conn_test.go
@@ -0,0 +1,68 @@
+package metricshelper
+
+import (
+ "fmt"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestGetTransport(t *testing.T) {
+ cases := []struct {
+ addr ma.Multiaddr
+ result string
+ }{
+ {
+ addr: ma.StringCast("/ip4/1.1.1.1/tcp/1"),
+ result: "tcp",
+ },
+ {
+ addr: ma.StringCast("/ip4/1.1.1.1/udp/10"),
+ result: "other",
+ },
+ {
+ addr: nil,
+ result: "other",
+ },
+ }
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ got := GetTransport(tc.addr)
+ if got != tc.result {
+ t.Fatalf("invalid transport for %s\ngot:%v\nwant:%v", tc.addr, got, tc.result)
+ }
+ })
+ }
+}
+
+func TestIPVersion(t *testing.T) {
+ cases := []struct {
+ addr ma.Multiaddr
+ result string
+ }{
+ {
+ addr: ma.StringCast("/ip4/1.1.1.1/tcp/1"),
+ result: "ip4",
+ },
+ {
+ addr: ma.StringCast("/ip4/1.1.1.1/udp/10"),
+ result: "ip4",
+ },
+ {
+ addr: nil,
+ result: "unknown",
+ },
+ {
+ addr: ma.StringCast("/dns/hello.world/tcp/10"),
+ result: "unknown",
+ },
+ }
+ for i, tc := range cases {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ got := GetIPVersion(tc.addr)
+ if got != tc.result {
+ t.Fatalf("invalid ip version for %s\ngot:%v\nwant:%v", tc.addr, got, tc.result)
+ }
+ })
+ }
+}
diff --git a/p2p/metricshelper/dir.go b/p2p/metricshelper/dir.go
new file mode 100644
index 0000000000..2f89b951c6
--- /dev/null
+++ b/p2p/metricshelper/dir.go
@@ -0,0 +1,14 @@
+package metricshelper
+
+import "github.com/libp2p/go-libp2p/core/network"
+
+func GetDirection(dir network.Direction) string {
+ switch dir {
+ case network.DirOutbound:
+ return "outbound"
+ case network.DirInbound:
+ return "inbound"
+ default:
+ return "unknown"
+ }
+}
diff --git a/p2p/metricshelper/pool.go b/p2p/metricshelper/pool.go
new file mode 100644
index 0000000000..3290ed5a03
--- /dev/null
+++ b/p2p/metricshelper/pool.go
@@ -0,0 +1,26 @@
+package metricshelper
+
+import (
+ "fmt"
+ "sync"
+)
+
+const capacity = 8
+
+var stringPool = sync.Pool{New: func() any {
+ s := make([]string, 0, capacity)
+ return &s
+}}
+
+func GetStringSlice() *[]string {
+ s := stringPool.Get().(*[]string)
+ *s = (*s)[:0]
+ return s
+}
+
+func PutStringSlice(s *[]string) {
+ if c := cap(*s); c < capacity {
+ panic(fmt.Sprintf("expected a string slice with capacity 8 or greater, got %d", c))
+ }
+ stringPool.Put(s)
+}
diff --git a/p2p/metricshelper/pool_test.go b/p2p/metricshelper/pool_test.go
new file mode 100644
index 0000000000..85021e5599
--- /dev/null
+++ b/p2p/metricshelper/pool_test.go
@@ -0,0 +1,21 @@
+package metricshelper
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestStringSlicePool(t *testing.T) {
+ for i := 0; i < 1e5; i++ {
+ s := GetStringSlice()
+ require.Empty(t, *s)
+ require.Equal(t, 8, cap(*s))
+ *s = append(*s, "foo")
+ *s = append(*s, "bar")
+ if rand.Int()%3 == 0 {
+ PutStringSlice(s)
+ }
+ }
+}
diff --git a/p2p/metricshelper/registerer.go b/p2p/metricshelper/registerer.go
new file mode 100644
index 0000000000..99027c0db2
--- /dev/null
+++ b/p2p/metricshelper/registerer.go
@@ -0,0 +1,20 @@
+package metricshelper
+
+import (
+ "errors"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// RegisterCollectors registers the collectors with reg ignoring
+// reregistration error and panics on any other error
+func RegisterCollectors(reg prometheus.Registerer, collectors ...prometheus.Collector) {
+ for _, c := range collectors {
+ err := reg.Register(c)
+ if err != nil {
+ if ok := errors.As(err, &prometheus.AlreadyRegisteredError{}); !ok {
+ panic(err)
+ }
+ }
+ }
+}
diff --git a/p2p/metricshelper/registerer_test.go b/p2p/metricshelper/registerer_test.go
new file mode 100644
index 0000000000..b4c550376e
--- /dev/null
+++ b/p2p/metricshelper/registerer_test.go
@@ -0,0 +1,32 @@
+package metricshelper
+
+import (
+ "testing"
+
+ "github.com/prometheus/client_golang/prometheus"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestRegisterCollectors(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ c1 := prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Name: "counter",
+ },
+ )
+ c2 := prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "test",
+ Name: "gauge",
+ },
+ )
+ // c3 == c1
+ c3 := prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Name: "counter",
+ },
+ )
+ require.NotPanics(t, func() { RegisterCollectors(reg, c1, c2) })
+ require.NotPanics(t, func() { RegisterCollectors(reg, c3) }, "should not panic on duplicate registration")
+}
diff --git a/p2p/muxer/testsuite/mux.go b/p2p/muxer/testsuite/mux.go
new file mode 100644
index 0000000000..93d24785ea
--- /dev/null
+++ b/p2p/muxer/testsuite/mux.go
@@ -0,0 +1,661 @@
+package mux
+
+import (
+ "bytes"
+ "context"
+ crand "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ mrand "math/rand"
+ "net"
+ "reflect"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p-testing/ci"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/stretchr/testify/require"
+)
+
+var randomness []byte
+var Subtests map[string]TransportTest
+
+func init() {
+ // read 1MB of randomness
+ randomness = make([]byte, 1<<20)
+ if _, err := crand.Read(randomness); err != nil {
+ panic(err)
+ }
+
+ Subtests = make(map[string]TransportTest)
+ for _, f := range subtests {
+ Subtests[getFunctionName(f)] = f
+ }
+}
+
+func getFunctionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+type peerScope struct {
+ mx sync.Mutex
+ memory int
+}
+
+func (p *peerScope) ReserveMemory(size int, _ uint8) error {
+ p.mx.Lock()
+ p.memory += size
+ p.mx.Unlock()
+ return nil
+}
+
+func (p *peerScope) ReleaseMemory(size int) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ if p.memory < size {
+ panic(fmt.Sprintf("tried to release too much memory: %d (current: %d)", size, p.memory))
+ }
+ p.memory -= size
+}
+
+// Check checks that we don't have any more reserved memory.
+func (p *peerScope) Check(t *testing.T) {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ require.Zero(t, p.memory, "expected all reserved memory to have been released")
+}
+
+type peerScopeSpan struct {
+ peerScope
+}
+
+func (p *peerScopeSpan) Done() {
+ p.mx.Lock()
+ defer p.mx.Unlock()
+ p.memory = 0
+}
+
+func (p *peerScope) Stat() network.ScopeStat { return network.ScopeStat{} }
+func (p *peerScope) BeginSpan() (network.ResourceScopeSpan, error) { return &peerScopeSpan{}, nil }
+func (p *peerScope) Peer() peer.ID { panic("implement me") }
+
+var _ network.PeerScope = &peerScope{}
+
+type Options struct {
+ tr network.Multiplexer
+ connNum int
+ streamNum int
+ msgNum int
+ msgMin int
+ msgMax int
+}
+
+func randBuf(size int) []byte {
+ n := len(randomness) - size
+ if size < 1 {
+ panic(fmt.Errorf("requested too large buffer (%d). max is %d", size, len(randomness)))
+ }
+
+ start := mrand.Intn(n)
+ return randomness[start : start+size]
+}
+
+func checkErr(t *testing.T, err error) {
+ if err != nil {
+ debug.PrintStack()
+ t.Fatal(err)
+ }
+}
+
+func echoStream(s network.MuxedStream) {
+ defer s.Close()
+ io.Copy(s, s) // echo everything
+}
+
+func GoServe(t *testing.T, tr network.Multiplexer, l net.Listener) (done func()) {
+ closed := make(chan struct{}, 1)
+
+ go func() {
+ for {
+ c1, err := l.Accept()
+ if err != nil {
+ select {
+ case <-closed:
+ return // closed naturally.
+ default:
+ checkErr(t, err)
+ }
+ }
+
+ sc1, err := tr.NewConn(c1, true, nil)
+ checkErr(t, err)
+ go func() {
+ for {
+ str, err := sc1.AcceptStream()
+ if err != nil {
+ break
+ }
+ go echoStream(str)
+ }
+ }()
+ }
+ }()
+
+ return func() {
+ closed <- struct{}{}
+ }
+}
+
+func SubtestSimpleWrite(t *testing.T, tr network.Multiplexer) {
+ l, err := net.Listen("tcp", "localhost:0")
+ checkErr(t, err)
+ done := GoServe(t, tr, l)
+ defer done()
+
+ nc1, err := net.Dial("tcp", l.Addr().String())
+ checkErr(t, err)
+ defer nc1.Close()
+
+ scope := &peerScope{}
+ c1, err := tr.NewConn(nc1, false, scope)
+ checkErr(t, err)
+ defer func() {
+ c1.Close()
+ scope.Check(t)
+ }()
+
+ // serve the outgoing conn, because some muxers assume
+ // that we _always_ call serve. (this is an error?)
+ go c1.AcceptStream()
+
+ s1, err := c1.OpenStream(context.Background())
+ checkErr(t, err)
+ defer s1.Close()
+
+ buf1 := randBuf(4096)
+ _, err = s1.Write(buf1)
+ checkErr(t, err)
+
+ buf2 := make([]byte, len(buf1))
+ _, err = io.ReadFull(s1, buf2)
+ checkErr(t, err)
+
+ require.Equal(t, buf1, buf2)
+}
+
+func SubtestStress(t *testing.T, opt Options) {
+ msgsize := 1 << 11
+ errs := make(chan error) // dont block anything.
+
+ rateLimitN := 5000 // max of 5k funcs, because -race has 8k max.
+ rateLimitChan := make(chan struct{}, rateLimitN)
+ for i := 0; i < rateLimitN; i++ {
+ rateLimitChan <- struct{}{}
+ }
+
+ rateLimit := func(f func()) {
+ <-rateLimitChan
+ f()
+ rateLimitChan <- struct{}{}
+ }
+
+ writeStream := func(s network.MuxedStream, bufs chan<- []byte) {
+ for i := 0; i < opt.msgNum; i++ {
+ buf := randBuf(msgsize)
+ bufs <- buf
+ if _, err := s.Write(buf); err != nil {
+ errs <- fmt.Errorf("s.Write(buf): %s", err)
+ continue
+ }
+ }
+ }
+
+ readStream := func(s network.MuxedStream, bufs <-chan []byte) {
+ buf2 := make([]byte, msgsize)
+ for buf1 := range bufs {
+ if _, err := io.ReadFull(s, buf2); err != nil {
+ errs <- fmt.Errorf("io.ReadFull(s, buf2): %s", err)
+ continue
+ }
+ if !bytes.Equal(buf1, buf2) {
+ errs <- fmt.Errorf("buffers not equal (%x != %x)", buf1[:3], buf2[:3])
+ }
+ }
+ }
+
+ openStreamAndRW := func(c network.MuxedConn) {
+ s, err := c.OpenStream(context.Background())
+ if err != nil {
+ errs <- fmt.Errorf("failed to create NewStream: %s", err)
+ return
+ }
+
+ bufs := make(chan []byte, opt.msgNum)
+ go func() {
+ writeStream(s, bufs)
+ close(bufs)
+ }()
+
+ readStream(s, bufs)
+ s.Close()
+ }
+
+ openConnAndRW := func() {
+ l, err := net.Listen("tcp", "localhost:0")
+ checkErr(t, err)
+ done := GoServe(t, opt.tr, l)
+ defer done()
+
+ nla := l.Addr()
+ nc, err := net.Dial(nla.Network(), nla.String())
+ checkErr(t, err)
+ if err != nil {
+ t.Fatal(fmt.Errorf("net.Dial(%s, %s): %s", nla.Network(), nla.String(), err))
+ return
+ }
+
+ scope := &peerScope{}
+ c, err := opt.tr.NewConn(nc, false, scope)
+ if err != nil {
+ t.Fatal(fmt.Errorf("a.AddConn(%s <--> %s): %s", nc.LocalAddr(), nc.RemoteAddr(), err))
+ return
+ }
+
+ // serve the outgoing conn, because some muxers assume
+ // that we _always_ call serve. (this is an error?)
+ go func() {
+ for {
+ str, err := c.AcceptStream()
+ if err != nil {
+ break
+ }
+ go echoStream(str)
+ }
+ }()
+
+ var wg sync.WaitGroup
+ for i := 0; i < opt.streamNum; i++ {
+ wg.Add(1)
+ go rateLimit(func() {
+ defer wg.Done()
+ openStreamAndRW(c)
+ })
+ }
+ wg.Wait()
+ c.Close()
+ scope.Check(t)
+ }
+
+ openConnsAndRW := func() {
+ var wg sync.WaitGroup
+ for i := 0; i < opt.connNum; i++ {
+ wg.Add(1)
+ go rateLimit(func() {
+ defer wg.Done()
+ openConnAndRW()
+ })
+ }
+ wg.Wait()
+ }
+
+ go func() {
+ openConnsAndRW()
+ close(errs) // done
+ }()
+
+ for err := range errs {
+ t.Error(err)
+ }
+
+}
+
+func tcpPipe(t *testing.T) (net.Conn, net.Conn) {
+ list, err := net.Listen("tcp", "0.0.0.0:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ con1, err := net.Dial("tcp", list.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ con2, err := list.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return con1, con2
+}
+
+func SubtestStreamOpenStress(t *testing.T, tr network.Multiplexer) {
+ wg := new(sync.WaitGroup)
+
+ a, b := tcpPipe(t)
+ defer a.Close()
+ defer b.Close()
+
+ defer wg.Wait()
+
+ wg.Add(1)
+ count := 10000
+ workers := 5
+ go func() {
+ defer wg.Done()
+ muxa, err := tr.NewConn(a, true, nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ stress := func() {
+ defer wg.Done()
+ for i := 0; i < count; i++ {
+ s, err := muxa.OpenStream(context.Background())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ err = s.CloseWrite()
+ if err != nil {
+ t.Error(err)
+ }
+ n, err := s.Read([]byte{0})
+ if n != 0 {
+ t.Error("expected to read no bytes")
+ }
+ if err != io.EOF {
+ t.Errorf("expected an EOF, got %s", err)
+ }
+ }
+ }
+
+ for i := 0; i < workers; i++ {
+ wg.Add(1)
+ go stress()
+ }
+ }()
+
+ scope := &peerScope{}
+ muxb, err := tr.NewConn(b, false, scope)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ muxb.Close()
+ scope.Check(t)
+ }()
+
+ time.Sleep(time.Millisecond * 50)
+
+ wg.Add(1)
+ recv := make(chan struct{}, count*workers)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < count*workers; i++ {
+ str, err := muxb.AcceptStream()
+ if err != nil {
+ break
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ str.Close()
+ select {
+ case recv <- struct{}{}:
+ default:
+ t.Error("too many stream")
+ }
+ }()
+ }
+ }()
+
+ timeout := time.Second * 10
+ if ci.IsRunning() {
+ timeout *= 10
+ }
+
+ limit := time.After(timeout)
+ for i := 0; i < count*workers; i++ {
+ select {
+ case <-recv:
+ case <-limit:
+ t.Fatal("timed out receiving streams")
+ }
+ }
+
+ wg.Wait()
+}
+
+func SubtestStreamReset(t *testing.T, tr network.Multiplexer) {
+ wg := new(sync.WaitGroup)
+ defer wg.Wait()
+
+ a, b := tcpPipe(t)
+ defer a.Close()
+ defer b.Close()
+
+ wg.Add(1)
+ scopea := &peerScope{}
+ muxa, err := tr.NewConn(a, true, scopea)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer func() {
+ muxa.Close()
+ scopea.Check(t)
+ }()
+
+ go func() {
+ defer wg.Done()
+ s, err := muxa.OpenStream(context.Background())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ time.Sleep(time.Millisecond * 50)
+
+ _, err = s.Write([]byte("foo"))
+ if !errors.Is(err, network.ErrReset) {
+ t.Error("should have been stream reset")
+ }
+ s.Close()
+ }()
+
+ scopeb := &peerScope{}
+ muxb, err := tr.NewConn(b, false, scopeb)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ muxb.Close()
+ scopeb.Check(t)
+ }()
+
+ str, err := muxb.AcceptStream()
+ checkErr(t, err)
+ str.Reset()
+
+ wg.Wait()
+}
+
+// check that Close also closes the underlying net.Conn
+func SubtestWriteAfterClose(t *testing.T, tr network.Multiplexer) {
+ a, b := tcpPipe(t)
+
+ scopea := &peerScope{}
+ muxa, err := tr.NewConn(a, true, scopea)
+ checkErr(t, err)
+
+ scopeb := &peerScope{}
+ muxb, err := tr.NewConn(b, false, scopeb)
+ checkErr(t, err)
+
+ checkErr(t, muxa.Close())
+ scopea.Check(t)
+ checkErr(t, muxb.Close())
+ scopeb.Check(t)
+
+ // make sure the underlying net.Conn was closed
+ if _, err := a.Write([]byte("foobar")); err == nil || !strings.Contains(err.Error(), "use of closed network connection") {
+ t.Fatal("write should have failed")
+ }
+ if _, err := b.Write([]byte("foobar")); err == nil || !strings.Contains(err.Error(), "use of closed network connection") {
+ t.Fatal("write should have failed")
+ }
+}
+
+func SubtestStreamLeftOpen(t *testing.T, tr network.Multiplexer) {
+ a, b := tcpPipe(t)
+
+ const numStreams = 10
+ const dataLen = 50 * 1024
+
+ scopea := &peerScope{}
+ muxa, err := tr.NewConn(a, true, scopea)
+ checkErr(t, err)
+
+ scopeb := &peerScope{}
+ muxb, err := tr.NewConn(b, false, scopeb)
+ checkErr(t, err)
+
+ var wg sync.WaitGroup
+ wg.Add(1 + numStreams)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < numStreams; i++ {
+ stra, err := muxa.OpenStream(context.Background())
+ checkErr(t, err)
+ go func() {
+ defer wg.Done()
+ _, err = stra.Write(randBuf(dataLen))
+ checkErr(t, err)
+ // do NOT close or reset the stream
+ }()
+ }
+ }()
+
+ wg.Add(1 + numStreams)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < numStreams; i++ {
+ str, err := muxb.AcceptStream()
+ checkErr(t, err)
+ go func() {
+ defer wg.Done()
+ _, err = io.ReadFull(str, make([]byte, dataLen))
+ checkErr(t, err)
+ }()
+ }
+ }()
+
+ // Now we have a bunch of open streams.
+ // Make sure that their memory is returned when we close the connection.
+ wg.Wait()
+
+ muxa.Close()
+ scopea.Check(t)
+ muxb.Close()
+ scopeb.Check(t)
+}
+
+func SubtestStress1Conn1Stream1Msg(t *testing.T, tr network.Multiplexer) {
+ SubtestStress(t, Options{
+ tr: tr,
+ connNum: 1,
+ streamNum: 1,
+ msgNum: 1,
+ msgMax: 100,
+ msgMin: 100,
+ })
+}
+
+func SubtestStress1Conn1Stream100Msg(t *testing.T, tr network.Multiplexer) {
+ SubtestStress(t, Options{
+ tr: tr,
+ connNum: 1,
+ streamNum: 1,
+ msgNum: 100,
+ msgMax: 100,
+ msgMin: 100,
+ })
+}
+
+func SubtestStress1Conn100Stream100Msg(t *testing.T, tr network.Multiplexer) {
+ SubtestStress(t, Options{
+ tr: tr,
+ connNum: 1,
+ streamNum: 100,
+ msgNum: 100,
+ msgMax: 100,
+ msgMin: 100,
+ })
+}
+
+func SubtestStress10Conn10Stream50Msg(t *testing.T, tr network.Multiplexer) {
+ SubtestStress(t, Options{
+ tr: tr,
+ connNum: 10,
+ streamNum: 10,
+ msgNum: 50,
+ msgMax: 100,
+ msgMin: 100,
+ })
+}
+
+func SubtestStress1Conn1000Stream10Msg(t *testing.T, tr network.Multiplexer) {
+ SubtestStress(t, Options{
+ tr: tr,
+ connNum: 1,
+ streamNum: 1000,
+ msgNum: 10,
+ msgMax: 100,
+ msgMin: 100,
+ })
+}
+
+func SubtestStress1Conn100Stream100Msg10MB(t *testing.T, tr network.Multiplexer) {
+ SubtestStress(t, Options{
+ tr: tr,
+ connNum: 1,
+ streamNum: 100,
+ msgNum: 100,
+ msgMax: 10000,
+ msgMin: 1000,
+ })
+}
+
+// Subtests are all the subtests run by SubtestAll
+var subtests = []TransportTest{
+ SubtestSimpleWrite,
+ SubtestWriteAfterClose,
+ SubtestStress1Conn1Stream1Msg,
+ SubtestStress1Conn1Stream100Msg,
+ SubtestStress1Conn100Stream100Msg,
+ SubtestStress10Conn10Stream50Msg,
+ SubtestStress1Conn1000Stream10Msg,
+ SubtestStress1Conn100Stream100Msg10MB,
+ SubtestStreamOpenStress,
+ SubtestStreamReset,
+ SubtestStreamLeftOpen,
+}
+
+// SubtestAll runs all the stream multiplexer tests against the target
+// transport.
+func SubtestAll(t *testing.T, tr network.Multiplexer) {
+ for name, f := range Subtests {
+ t.Run(name, func(t *testing.T) {
+ f(t, tr)
+ })
+ }
+}
+
+// TransportTest is a stream multiplex transport test case
+type TransportTest func(t *testing.T, tr network.Multiplexer)
diff --git a/p2p/muxer/yamux/conn.go b/p2p/muxer/yamux/conn.go
new file mode 100644
index 0000000000..54a856e58c
--- /dev/null
+++ b/p2p/muxer/yamux/conn.go
@@ -0,0 +1,53 @@
+package yamux
+
+import (
+ "context"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/libp2p/go-yamux/v5"
+)
+
+// conn implements mux.MuxedConn over yamux.Session.
+type conn yamux.Session
+
+var _ network.MuxedConn = &conn{}
+
+// NewMuxedConn constructs a new MuxedConn from a yamux.Session.
+func NewMuxedConn(m *yamux.Session) network.MuxedConn {
+ return (*conn)(m)
+}
+
+// Close closes underlying yamux
+func (c *conn) Close() error {
+ return c.yamux().Close()
+}
+
+func (c *conn) CloseWithError(errCode network.ConnErrorCode) error {
+ return c.yamux().CloseWithError(uint32(errCode))
+}
+
+// IsClosed checks if yamux.Session is in closed state.
+func (c *conn) IsClosed() bool {
+ return c.yamux().IsClosed()
+}
+
+// OpenStream creates a new stream.
+func (c *conn) OpenStream(ctx context.Context) (network.MuxedStream, error) {
+ s, err := c.yamux().OpenStream(ctx)
+ if err != nil {
+ return nil, parseError(err)
+ }
+
+ return (*stream)(s), nil
+}
+
+// AcceptStream accepts a stream opened by the other side.
+func (c *conn) AcceptStream() (network.MuxedStream, error) {
+ s, err := c.yamux().AcceptStream()
+ return (*stream)(s), parseError(err)
+}
+
+func (c *conn) yamux() *yamux.Session {
+ return (*yamux.Session)(c)
+}
diff --git a/p2p/muxer/yamux/stream.go b/p2p/muxer/yamux/stream.go
new file mode 100644
index 0000000000..450bdec479
--- /dev/null
+++ b/p2p/muxer/yamux/stream.go
@@ -0,0 +1,80 @@
+package yamux
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/libp2p/go-yamux/v5"
+)
+
+// stream implements mux.MuxedStream over yamux.Stream.
+type stream yamux.Stream
+
+var _ network.MuxedStream = &stream{}
+
+func parseError(err error) error {
+ if err == nil {
+ return err
+ }
+ se := &yamux.StreamError{}
+ if errors.As(err, &se) {
+ return &network.StreamError{Remote: se.Remote, ErrorCode: network.StreamErrorCode(se.ErrorCode), TransportError: err}
+ }
+ ce := &yamux.GoAwayError{}
+ if errors.As(err, &ce) {
+ return &network.ConnError{Remote: ce.Remote, ErrorCode: network.ConnErrorCode(ce.ErrorCode), TransportError: err}
+ }
+ if errors.Is(err, yamux.ErrStreamReset) {
+ return fmt.Errorf("%w: %w", network.ErrReset, err)
+ }
+ return err
+}
+
+func (s *stream) Read(b []byte) (n int, err error) {
+ n, err = s.yamux().Read(b)
+ return n, parseError(err)
+}
+
+func (s *stream) Write(b []byte) (n int, err error) {
+ n, err = s.yamux().Write(b)
+ return n, parseError(err)
+}
+
+func (s *stream) Close() error {
+ return s.yamux().Close()
+}
+
+func (s *stream) Reset() error {
+ return s.yamux().Reset()
+}
+
+func (s *stream) ResetWithError(errCode network.StreamErrorCode) error {
+ return s.yamux().ResetWithError(uint32(errCode))
+}
+
+func (s *stream) CloseRead() error {
+ return s.yamux().CloseRead()
+}
+
+func (s *stream) CloseWrite() error {
+ return s.yamux().CloseWrite()
+}
+
+func (s *stream) SetDeadline(t time.Time) error {
+ return s.yamux().SetDeadline(t)
+}
+
+func (s *stream) SetReadDeadline(t time.Time) error {
+ return s.yamux().SetReadDeadline(t)
+}
+
+func (s *stream) SetWriteDeadline(t time.Time) error {
+ return s.yamux().SetWriteDeadline(t)
+}
+
+func (s *stream) yamux() *yamux.Stream {
+ return (*yamux.Stream)(s)
+}
diff --git a/p2p/muxer/yamux/transport.go b/p2p/muxer/yamux/transport.go
new file mode 100644
index 0000000000..8350abdd86
--- /dev/null
+++ b/p2p/muxer/yamux/transport.go
@@ -0,0 +1,63 @@
+package yamux
+
+import (
+ "io"
+ "math"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/libp2p/go-yamux/v5"
+)
+
+var DefaultTransport *Transport
+
+const ID = "/yamux/1.0.0"
+
+func init() {
+ config := yamux.DefaultConfig()
+ // We've bumped this to 16MiB as this critically limits throughput.
+ //
+ // 1MiB means a best case of 10MiB/s (83.89Mbps) on a connection with
+ // 100ms latency. The default gave us 2.4MiB *best case* which was
+ // totally unacceptable.
+ config.MaxStreamWindowSize = uint32(16 * 1024 * 1024)
+ // don't spam
+ config.LogOutput = io.Discard
+ // We always run over a security transport that buffers internally
+ // (i.e., uses a block cipher).
+ config.ReadBufSize = 0
+ // Effectively disable the incoming streams limit.
+ // This is now dynamically limited by the resource manager.
+ config.MaxIncomingStreams = math.MaxUint32
+ DefaultTransport = (*Transport)(config)
+}
+
+// Transport implements mux.Multiplexer that constructs
+// yamux-backed muxed connections.
+type Transport yamux.Config
+
+var _ network.Multiplexer = &Transport{}
+
+func (t *Transport) NewConn(nc net.Conn, isServer bool, scope network.PeerScope) (network.MuxedConn, error) {
+ var newSpan func() (yamux.MemoryManager, error)
+ if scope != nil {
+ newSpan = func() (yamux.MemoryManager, error) { return scope.BeginSpan() }
+ }
+
+ var s *yamux.Session
+ var err error
+ if isServer {
+ s, err = yamux.Server(nc, t.Config(), newSpan)
+ } else {
+ s, err = yamux.Client(nc, t.Config(), newSpan)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return NewMuxedConn(s), nil
+}
+
+func (t *Transport) Config() *yamux.Config {
+ return (*yamux.Config)(t)
+}
diff --git a/p2p/muxer/yamux/transport_test.go b/p2p/muxer/yamux/transport_test.go
new file mode 100644
index 0000000000..4a68ac9cb5
--- /dev/null
+++ b/p2p/muxer/yamux/transport_test.go
@@ -0,0 +1,15 @@
+package yamux
+
+import (
+ "testing"
+
+ tmux "github.com/libp2p/go-libp2p/p2p/muxer/testsuite"
+)
+
+func TestDefaultTransport(t *testing.T) {
+ // Yamux doesn't have any backpressure when it comes to opening streams.
+ // If the peer opens too many streams, those are just reset.
+ delete(tmux.Subtests, "github.com/libp2p/go-libp2p-testing/suites/mux.SubtestStress1Conn1000Stream10Msg")
+
+ tmux.SubtestAll(t, DefaultTransport)
+}
diff --git a/p2p/net/README.md b/p2p/net/README.md
index 23e054b222..a7c9062a3c 100644
--- a/p2p/net/README.md
+++ b/p2p/net/README.md
@@ -4,9 +4,9 @@ The IPFS Network package handles all of the peer-to-peer networking. It connects
- `Conn` - a connection to a single Peer
- `MultiConn` - a set of connections to a single Peer
- - `SecureConn` - an encrypted (tls-like) connection
+ - `SecureConn` - an encrypted (TLS-like) connection
- `Swarm` - holds connections to Peers, multiplexes from/to each `MultiConn`
-- `Muxer` - multiplexes between `Services` and `Swarm`. Handles `Requet/Reply`.
+- `Muxer` - multiplexes between `Services` and `Swarm`. Handles `Request/Reply`.
- `Service` - connects between an outside client service and Network.
- `Handler` - the client service part that handles requests
diff --git a/p2p/net/conngater/conngater.go b/p2p/net/conngater/conngater.go
new file mode 100644
index 0000000000..697eadad5b
--- /dev/null
+++ b/p2p/net/conngater/conngater.go
@@ -0,0 +1,352 @@
+package conngater
+
+import (
+ "context"
+ "net"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/control"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/namespace"
+ "github.com/ipfs/go-datastore/query"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+// BasicConnectionGater implements a connection gater that allows the application to perform
+// access control on incoming and outgoing connections.
+type BasicConnectionGater struct {
+ sync.RWMutex
+
+ blockedPeers map[peer.ID]struct{}
+ blockedAddrs map[string]struct{}
+ blockedSubnets map[string]*net.IPNet
+
+ ds datastore.Datastore
+}
+
+var log = logging.Logger("net/conngater")
+
+const (
+ ns = "/libp2p/net/conngater"
+ keyPeer = "/peer/"
+ keyAddr = "/addr/"
+ keySubnet = "/subnet/"
+)
+
+// NewBasicConnectionGater creates a new connection gater.
+// The ds argument is an (optional, can be nil) datastore to persist the connection gater
+// filters.
+func NewBasicConnectionGater(ds datastore.Datastore) (*BasicConnectionGater, error) {
+ cg := &BasicConnectionGater{
+ blockedPeers: make(map[peer.ID]struct{}),
+ blockedAddrs: make(map[string]struct{}),
+ blockedSubnets: make(map[string]*net.IPNet),
+ }
+
+ if ds != nil {
+ cg.ds = namespace.Wrap(ds, datastore.NewKey(ns))
+ err := cg.loadRules(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return cg, nil
+}
+
+func (cg *BasicConnectionGater) loadRules(ctx context.Context) error {
+ // load blocked peers
+ res, err := cg.ds.Query(ctx, query.Query{Prefix: keyPeer})
+ if err != nil {
+ log.Error("error querying datastore for blocked peers", "err", err)
+ return err
+ }
+
+ for r := range res.Next() {
+ if r.Error != nil {
+ log.Error("query result error", "err", r.Error)
+ return r.Error
+ }
+
+ p := peer.ID(r.Entry.Value)
+ cg.blockedPeers[p] = struct{}{}
+ }
+
+ // load blocked addrs
+ res, err = cg.ds.Query(ctx, query.Query{Prefix: keyAddr})
+ if err != nil {
+ log.Error("error querying datastore for blocked addrs", "err", err)
+ return err
+ }
+
+ for r := range res.Next() {
+ if r.Error != nil {
+ log.Error("query result error", "err", r.Error)
+ return r.Error
+ }
+
+ ip := net.IP(r.Entry.Value)
+ cg.blockedAddrs[ip.String()] = struct{}{}
+ }
+
+ // load blocked subnets
+ res, err = cg.ds.Query(ctx, query.Query{Prefix: keySubnet})
+ if err != nil {
+ log.Error("error querying datastore for blocked subnets", "err", err)
+ return err
+ }
+
+ for r := range res.Next() {
+ if r.Error != nil {
+ log.Error("query result error", "err", r.Error)
+ return r.Error
+ }
+
+ ipnetStr := string(r.Entry.Value)
+ _, ipnet, err := net.ParseCIDR(ipnetStr)
+ if err != nil {
+ log.Error("error parsing CIDR subnet", "err", err)
+ return err
+ }
+ cg.blockedSubnets[ipnetStr] = ipnet
+ }
+
+ return nil
+}
+
+// BlockPeer adds a peer to the set of blocked peers.
+// Note: active connections to the peer are not automatically closed.
+func (cg *BasicConnectionGater) BlockPeer(p peer.ID) error {
+ if cg.ds != nil {
+ err := cg.ds.Put(context.Background(), datastore.NewKey(keyPeer+p.String()), []byte(p))
+ if err != nil {
+ log.Error("error writing blocked peer to datastore", "err", err)
+ return err
+ }
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+ cg.blockedPeers[p] = struct{}{}
+
+ return nil
+}
+
+// UnblockPeer removes a peer from the set of blocked peers
+func (cg *BasicConnectionGater) UnblockPeer(p peer.ID) error {
+ if cg.ds != nil {
+ err := cg.ds.Delete(context.Background(), datastore.NewKey(keyPeer+p.String()))
+ if err != nil {
+ log.Error("error deleting blocked peer from datastore", "err", err)
+ return err
+ }
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ delete(cg.blockedPeers, p)
+
+ return nil
+}
+
+// ListBlockedPeers return a list of blocked peers
+func (cg *BasicConnectionGater) ListBlockedPeers() []peer.ID {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ result := make([]peer.ID, 0, len(cg.blockedPeers))
+ for p := range cg.blockedPeers {
+ result = append(result, p)
+ }
+
+ return result
+}
+
+// BlockAddr adds an IP address to the set of blocked addresses.
+// Note: active connections to the IP address are not automatically closed.
+func (cg *BasicConnectionGater) BlockAddr(ip net.IP) error {
+ if cg.ds != nil {
+ err := cg.ds.Put(context.Background(), datastore.NewKey(keyAddr+ip.String()), []byte(ip))
+ if err != nil {
+ log.Error("error writing blocked addr to datastore", "err", err)
+ return err
+ }
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ cg.blockedAddrs[ip.String()] = struct{}{}
+
+ return nil
+}
+
+// UnblockAddr removes an IP address from the set of blocked addresses
+func (cg *BasicConnectionGater) UnblockAddr(ip net.IP) error {
+ if cg.ds != nil {
+ err := cg.ds.Delete(context.Background(), datastore.NewKey(keyAddr+ip.String()))
+ if err != nil {
+ log.Error("error deleting blocked addr from datastore", "err", err)
+ return err
+ }
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ delete(cg.blockedAddrs, ip.String())
+
+ return nil
+}
+
+// ListBlockedAddrs return a list of blocked IP addresses
+func (cg *BasicConnectionGater) ListBlockedAddrs() []net.IP {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ result := make([]net.IP, 0, len(cg.blockedAddrs))
+ for ipStr := range cg.blockedAddrs {
+ ip := net.ParseIP(ipStr)
+ result = append(result, ip)
+ }
+
+ return result
+}
+
+// BlockSubnet adds an IP subnet to the set of blocked addresses.
+// Note: active connections to the IP subnet are not automatically closed.
+func (cg *BasicConnectionGater) BlockSubnet(ipnet *net.IPNet) error {
+ if cg.ds != nil {
+ err := cg.ds.Put(context.Background(), datastore.NewKey(keySubnet+ipnet.String()), []byte(ipnet.String()))
+ if err != nil {
+ log.Error("error writing blocked addr to datastore", "err", err)
+ return err
+ }
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ cg.blockedSubnets[ipnet.String()] = ipnet
+
+ return nil
+}
+
+// UnblockSubnet removes an IP address from the set of blocked addresses
+func (cg *BasicConnectionGater) UnblockSubnet(ipnet *net.IPNet) error {
+ if cg.ds != nil {
+ err := cg.ds.Delete(context.Background(), datastore.NewKey(keySubnet+ipnet.String()))
+ if err != nil {
+ log.Error("error deleting blocked subnet from datastore", "err", err)
+ return err
+ }
+ }
+
+ cg.Lock()
+ defer cg.Unlock()
+
+ delete(cg.blockedSubnets, ipnet.String())
+
+ return nil
+}
+
+// ListBlockedSubnets return a list of blocked IP subnets
+func (cg *BasicConnectionGater) ListBlockedSubnets() []*net.IPNet {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ result := make([]*net.IPNet, 0, len(cg.blockedSubnets))
+ for _, ipnet := range cg.blockedSubnets {
+ result = append(result, ipnet)
+ }
+
+ return result
+}
+
+// ConnectionGater interface
+var _ connmgr.ConnectionGater = (*BasicConnectionGater)(nil)
+
+func (cg *BasicConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ _, block := cg.blockedPeers[p]
+ return !block
+}
+
+func (cg *BasicConnectionGater) InterceptAddrDial(_ peer.ID, a ma.Multiaddr) (allow bool) {
+ // we have already filtered blocked peers in InterceptPeerDial, so we just check the IP
+ cg.RLock()
+ defer cg.RUnlock()
+
+ ip, err := manet.ToIP(a)
+ if err != nil {
+ log.Warn("error converting multiaddr to IP addr", "err", err)
+ return true
+ }
+
+ _, block := cg.blockedAddrs[ip.String()]
+ if block {
+ return false
+ }
+
+ for _, ipnet := range cg.blockedSubnets {
+ if ipnet.Contains(ip) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (cg *BasicConnectionGater) InterceptAccept(cma network.ConnMultiaddrs) (allow bool) {
+ cg.RLock()
+ defer cg.RUnlock()
+
+ a := cma.RemoteMultiaddr()
+
+ ip, err := manet.ToIP(a)
+ if err != nil {
+ log.Warn("error converting multiaddr to IP addr", "err", err)
+ return true
+ }
+
+ _, block := cg.blockedAddrs[ip.String()]
+ if block {
+ return false
+ }
+
+ for _, ipnet := range cg.blockedSubnets {
+ if ipnet.Contains(ip) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (cg *BasicConnectionGater) InterceptSecured(dir network.Direction, p peer.ID, _ network.ConnMultiaddrs) (allow bool) {
+ if dir == network.DirOutbound {
+ // we have already filtered those in InterceptPeerDial/InterceptAddrDial
+ return true
+ }
+
+ // we have already filtered addrs in InterceptAccept, so we just check the peer ID
+ cg.RLock()
+ defer cg.RUnlock()
+
+ _, block := cg.blockedPeers[p]
+ return !block
+}
+
+func (cg *BasicConnectionGater) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) {
+ return true, 0
+}
diff --git a/p2p/net/conngater/conngater_test.go b/p2p/net/conngater/conngater_test.go
new file mode 100644
index 0000000000..d60b5e4515
--- /dev/null
+++ b/p2p/net/conngater/conngater_test.go
@@ -0,0 +1,374 @@
+package conngater
+
+import (
+ "net"
+ "testing"
+
+ "github.com/ipfs/go-datastore"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestConnectionGater(t *testing.T) {
+ ds := datastore.NewMapDatastore()
+
+ peerA := peer.ID("A")
+ peerB := peer.ID("B")
+
+ ip1 := net.ParseIP("1.2.3.4")
+
+ _, ipNet1, err := net.ParseCIDR("1.2.3.0/24")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cg, err := NewBasicConnectionGater(ds)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test peer blocking
+ allow := cg.InterceptPeerDial(peerA)
+ if !allow {
+ t.Fatal("expected gater to allow peerA")
+ }
+
+ allow = cg.InterceptPeerDial(peerB)
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerA, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerA")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerB, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ err = cg.BlockPeer(peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allow = cg.InterceptPeerDial(peerA)
+ if allow {
+ t.Fatal("expected gater to deny peerA")
+ }
+
+ allow = cg.InterceptPeerDial(peerB)
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerA, &mockConnMultiaddrs{local: nil, remote: nil})
+ if allow {
+ t.Fatal("expected gater to deny peerA")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerB, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ // test addr and subnet blocking
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ err = cg.BlockAddr(ip1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234"))
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")})
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ err = cg.BlockSubnet(ipNet1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234"))
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")})
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ // make a new gater reusing the datastore to test persistence
+ cg, err = NewBasicConnectionGater(ds)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test the list methods while at it
+ blockedPeers := cg.ListBlockedPeers()
+ if len(blockedPeers) != 1 {
+ t.Fatalf("expected 1 blocked peer, but got %d", len(blockedPeers))
+ }
+
+ blockedAddrs := cg.ListBlockedAddrs()
+ if len(blockedAddrs) != 1 {
+ t.Fatalf("expected 1 blocked addr, but got %d", len(blockedAddrs))
+ }
+
+ blockedSubnets := cg.ListBlockedSubnets()
+ if len(blockedSubnets) != 1 {
+ t.Fatalf("expected 1 blocked subnet, but got %d", len(blockedSubnets))
+ }
+
+ allow = cg.InterceptPeerDial(peerA)
+ if allow {
+ t.Fatal("expected gater to deny peerA")
+ }
+
+ allow = cg.InterceptPeerDial(peerB)
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerA, &mockConnMultiaddrs{local: nil, remote: nil})
+ if allow {
+ t.Fatal("expected gater to deny peerA")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerB, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234"))
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")})
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234"))
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")})
+ if allow {
+ t.Fatal("expected gater to deny peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ // undo the blocks to ensure that we can unblock stuff
+ err = cg.UnblockPeer(peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cg.UnblockAddr(ip1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = cg.UnblockSubnet(ipNet1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allow = cg.InterceptPeerDial(peerA)
+ if !allow {
+ t.Fatal("expected gater to allow peerA")
+ }
+
+ allow = cg.InterceptPeerDial(peerB)
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerA, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerA")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerB, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ // make a new gater reusing the datastore to test persistence of unblocks
+ cg, err = NewBasicConnectionGater(ds)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ allow = cg.InterceptPeerDial(peerA)
+ if !allow {
+ t.Fatal("expected gater to allow peerA")
+ }
+
+ allow = cg.InterceptPeerDial(peerB)
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerA, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerA")
+ }
+
+ allow = cg.InterceptSecured(network.DirInbound, peerB, &mockConnMultiaddrs{local: nil, remote: nil})
+ if !allow {
+ t.Fatal("expected gater to allow peerB")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.4/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.4/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.4")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/1.2.3.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/1.2.3.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 1.2.3.5")
+ }
+
+ allow = cg.InterceptAddrDial(peerB, ma.StringCast("/ip4/2.3.4.5/tcp/1234"))
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+
+ allow = cg.InterceptAccept(&mockConnMultiaddrs{local: nil, remote: ma.StringCast("/ip4/2.3.4.5/tcp/1234")})
+ if !allow {
+ t.Fatal("expected gater to allow peerB in 2.3.4.5")
+ }
+}
+
+type mockConnMultiaddrs struct {
+ local, remote ma.Multiaddr
+}
+
+func (cma *mockConnMultiaddrs) LocalMultiaddr() ma.Multiaddr {
+ return cma.local
+}
+
+func (cma *mockConnMultiaddrs) RemoteMultiaddr() ma.Multiaddr {
+ return cma.remote
+}
diff --git a/p2p/net/connmgr/bench_test.go b/p2p/net/connmgr/bench_test.go
new file mode 100644
index 0000000000..83442f9162
--- /dev/null
+++ b/p2p/net/connmgr/bench_test.go
@@ -0,0 +1,54 @@
+package connmgr
+
+import (
+ "math/rand"
+ "sync"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/stretchr/testify/require"
+)
+
+func randomConns(tb testing.TB) (c [5000]network.Conn) {
+ for i := range c {
+ c[i] = randConn(tb, nil)
+ }
+ return c
+}
+
+func BenchmarkLockContention(b *testing.B) {
+ conns := randomConns(b)
+ cm, err := NewConnManager(1000, 1000, WithGracePeriod(0))
+ require.NoError(b, err)
+ not := cm.Notifee()
+
+ kill := make(chan struct{})
+ var wg sync.WaitGroup
+
+ for i := 0; i < 16; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case <-kill:
+ return
+ default:
+ cm.TagPeer(conns[rand.Intn(len(conns))].RemotePeer(), "another-tag", 1)
+ }
+ }
+ }()
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ rc := conns[rand.Intn(len(conns))]
+ not.Connected(nil, rc)
+ cm.TagPeer(rc.RemotePeer(), "tag", 100)
+ cm.UntagPeer(rc.RemotePeer(), "tag")
+ not.Disconnected(nil, rc)
+ }
+ close(kill)
+ wg.Wait()
+}
diff --git a/p2p/net/connmgr/connmgr.go b/p2p/net/connmgr/connmgr.go
new file mode 100644
index 0000000000..3b302e31dd
--- /dev/null
+++ b/p2p/net/connmgr/connmgr.go
@@ -0,0 +1,733 @@
+package connmgr
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var log = logging.Logger("connmgr")
+
+// BasicConnMgr is a ConnManager that trims connections whenever the count exceeds the
+// high watermark. New connections are given a grace period before they're subject
+// to trimming. Trims are automatically run on demand, only if the time from the
+// previous trim is higher than 10 seconds. Furthermore, trims can be explicitly
+// requested through the public interface of this struct (see TrimOpenConns).
+//
+// See configuration parameters in NewConnManager.
+type BasicConnMgr struct {
+ *decayer
+
+ clock clock.Clock
+
+ cfg *config
+ segments segments
+
+ plk sync.RWMutex
+ protected map[peer.ID]map[string]struct{}
+
+ // channel-based semaphore that enforces only a single trim is in progress
+ trimMutex sync.Mutex
+ connCount atomic.Int32
+ // to be accessed atomically. This is mimicking the implementation of a sync.Once.
+ // Take care of correct alignment when modifying this struct.
+ trimCount uint64
+
+ lastTrimMu sync.RWMutex
+ lastTrim time.Time
+
+ refCount sync.WaitGroup
+ ctx context.Context
+ cancel func()
+ unregisterMemoryWatcher func()
+}
+
+var (
+ _ connmgr.ConnManager = (*BasicConnMgr)(nil)
+ _ connmgr.Decayer = (*BasicConnMgr)(nil)
+)
+
+type segment struct {
+ sync.Mutex
+ peers map[peer.ID]*peerInfo
+}
+
+type segments struct {
+ // bucketsMu is used to prevent deadlocks when concurrent processes try to
+ // grab multiple segment locks at once. If you need multiple segment locks
+ // at once, you should grab this lock first. You may release this lock once
+ // you have the segment locks.
+ bucketsMu sync.Mutex
+ buckets [256]*segment
+}
+
+func (ss *segments) get(p peer.ID) *segment {
+ return ss.buckets[p[len(p)-1]]
+}
+
+func (ss *segments) countPeers() (count int) {
+ for _, seg := range ss.buckets {
+ seg.Lock()
+ count += len(seg.peers)
+ seg.Unlock()
+ }
+ return count
+}
+
+func (s *segment) tagInfoFor(p peer.ID, now time.Time) *peerInfo {
+ pi, ok := s.peers[p]
+ if ok {
+ return pi
+ }
+ // create a temporary peer to buffer early tags before the Connected notification arrives.
+ pi = &peerInfo{
+ id: p,
+ firstSeen: now, // this timestamp will be updated when the first Connected notification arrives.
+ temp: true,
+ tags: make(map[string]int),
+ decaying: make(map[*decayingTag]*connmgr.DecayingValue),
+ conns: make(map[network.Conn]time.Time),
+ }
+ s.peers[p] = pi
+ return pi
+}
+
+// NewConnManager creates a new BasicConnMgr with the provided params:
+// lo and hi are watermarks governing the number of connections that'll be maintained.
+// When the peer count exceeds the 'high watermark', as many peers will be pruned (and
+// their connections terminated) until 'low watermark' peers remain.
+func NewConnManager(low, hi int, opts ...Option) (*BasicConnMgr, error) {
+ cfg := &config{
+ highWater: hi,
+ lowWater: low,
+ gracePeriod: time.Minute,
+ silencePeriod: 10 * time.Second,
+ clock: clock.New(),
+ }
+ for _, o := range opts {
+ if err := o(cfg); err != nil {
+ return nil, err
+ }
+ }
+
+ if cfg.decayer == nil {
+ // Set the default decayer config.
+ cfg.decayer = (&DecayerCfg{}).WithDefaults()
+ }
+
+ cm := &BasicConnMgr{
+ cfg: cfg,
+ clock: cfg.clock,
+ protected: make(map[peer.ID]map[string]struct{}, 16),
+ segments: segments{},
+ }
+
+ for i := range cm.segments.buckets {
+ cm.segments.buckets[i] = &segment{
+ peers: make(map[peer.ID]*peerInfo),
+ }
+ }
+
+ cm.ctx, cm.cancel = context.WithCancel(context.Background())
+
+ decay, _ := NewDecayer(cfg.decayer, cm)
+ cm.decayer = decay
+
+ cm.refCount.Add(1)
+ go cm.background()
+ return cm, nil
+}
+
+// ForceTrim trims connections down to the low watermark ignoring silence period, grace period,
+// or protected status. It prioritizes closing Unprotected connections. If after closing all
+// unprotected connections, we still have more than lowWaterMark connections, it'll close
+// protected connections.
+func (cm *BasicConnMgr) ForceTrim() {
+ connCount := int(cm.connCount.Load())
+ target := connCount - cm.cfg.lowWater
+ if target < 0 {
+ log.Warn("Low on memory, but we only have a few connections", "num", connCount, "low_watermark", cm.cfg.lowWater)
+ return
+ } else {
+ log.Warn("Low on memory. Closing connections.", "count", target)
+ }
+
+ cm.trimMutex.Lock()
+ defer atomic.AddUint64(&cm.trimCount, 1)
+ defer cm.trimMutex.Unlock()
+
+ // Trim connections without paying attention to the silence period.
+ for _, c := range cm.getConnsToCloseEmergency(target) {
+ log.Info("low on memory. closing conn", "peer", c.RemotePeer())
+
+ c.CloseWithError(network.ConnGarbageCollected)
+ }
+
+ // finally, update the last trim time.
+ cm.lastTrimMu.Lock()
+ cm.lastTrim = cm.clock.Now()
+ cm.lastTrimMu.Unlock()
+}
+
+func (cm *BasicConnMgr) Close() error {
+ cm.cancel()
+ if cm.unregisterMemoryWatcher != nil {
+ cm.unregisterMemoryWatcher()
+ }
+ if err := cm.decayer.Close(); err != nil {
+ return err
+ }
+ cm.refCount.Wait()
+ return nil
+}
+
+func (cm *BasicConnMgr) Protect(id peer.ID, tag string) {
+ cm.plk.Lock()
+ defer cm.plk.Unlock()
+
+ tags, ok := cm.protected[id]
+ if !ok {
+ tags = make(map[string]struct{}, 2)
+ cm.protected[id] = tags
+ }
+ tags[tag] = struct{}{}
+}
+
+func (cm *BasicConnMgr) Unprotect(id peer.ID, tag string) (protected bool) {
+ cm.plk.Lock()
+ defer cm.plk.Unlock()
+
+ tags, ok := cm.protected[id]
+ if !ok {
+ return false
+ }
+ if delete(tags, tag); len(tags) == 0 {
+ delete(cm.protected, id)
+ return false
+ }
+ return true
+}
+
+func (cm *BasicConnMgr) IsProtected(id peer.ID, tag string) (protected bool) {
+ cm.plk.Lock()
+ defer cm.plk.Unlock()
+
+ tags, ok := cm.protected[id]
+ if !ok {
+ return false
+ }
+
+ if tag == "" {
+ return true
+ }
+
+ _, protected = tags[tag]
+ return protected
+}
+
+func (cm *BasicConnMgr) CheckLimit(systemLimit connmgr.GetConnLimiter) error {
+ if cm.cfg.highWater > systemLimit.GetConnLimit() {
+ return fmt.Errorf(
+ "conn manager high watermark limit: %d, exceeds the system connection limit of: %d",
+ cm.cfg.highWater,
+ systemLimit.GetConnLimit(),
+ )
+ }
+ return nil
+}
+
+// peerInfo stores metadata for a given peer.
+type peerInfo struct {
+ id peer.ID
+ tags map[string]int // value for each tag
+ decaying map[*decayingTag]*connmgr.DecayingValue // decaying tags
+
+ value int // cached sum of all tag values
+ temp bool // this is a temporary entry holding early tags, and awaiting connections
+
+ conns map[network.Conn]time.Time // start time of each connection
+
+ firstSeen time.Time // timestamp when we began tracking this peer.
+}
+
+type peerInfos []*peerInfo
+
+// SortByValueAndStreams sorts peerInfos by their value and stream count. It
+// will sort peers with no streams before those with streams (all else being
+// equal). If `sortByMoreStreams` is true it will sort peers with more streams
+// before those with fewer streams. This is useful to prioritize freeing memory.
+func (p peerInfos) SortByValueAndStreams(segments *segments, sortByMoreStreams bool) {
+ sort.Slice(p, func(i, j int) bool {
+ left, right := p[i], p[j]
+
+ // Grab this lock so that we can grab both segment locks below without deadlocking.
+ segments.bucketsMu.Lock()
+
+ // lock this to protect from concurrent modifications from connect/disconnect events
+ leftSegment := segments.get(left.id)
+ leftSegment.Lock()
+ defer leftSegment.Unlock()
+
+ rightSegment := segments.get(right.id)
+ if leftSegment != rightSegment {
+ // These two peers are not in the same segment, lets get the lock
+ rightSegment.Lock()
+ defer rightSegment.Unlock()
+ }
+ segments.bucketsMu.Unlock()
+
+ // temporary peers are preferred for pruning.
+ if left.temp != right.temp {
+ return left.temp
+ }
+ // otherwise, compare by value.
+ if left.value != right.value {
+ return left.value < right.value
+ }
+ incomingAndStreams := func(m map[network.Conn]time.Time) (incoming bool, numStreams int) {
+ for c := range m {
+ stat := c.Stat()
+ if stat.Direction == network.DirInbound {
+ incoming = true
+ }
+ numStreams += stat.NumStreams
+ }
+ return
+ }
+ leftIncoming, leftStreams := incomingAndStreams(left.conns)
+ rightIncoming, rightStreams := incomingAndStreams(right.conns)
+ // prefer closing inactive connections (no streams open)
+ if rightStreams != leftStreams && (leftStreams == 0 || rightStreams == 0) {
+ return leftStreams < rightStreams
+ }
+ // incoming connections are preferred for pruning
+ if leftIncoming != rightIncoming {
+ return leftIncoming
+ }
+
+ if sortByMoreStreams {
+ // prune connections with a higher number of streams first
+ return rightStreams < leftStreams
+ } else {
+ return leftStreams < rightStreams
+ }
+ })
+}
+
+// TrimOpenConns closes the connections of as many peers as needed to make the peer count
+// equal the low watermark. Peers are sorted in ascending order based on their total value,
+// pruning those peers with the lowest scores first, as long as they are not within their
+// grace period.
+//
+// This function blocks until a trim is completed. If a trim is underway, a new
+// one won't be started, and instead it'll wait until that one is completed before
+// returning.
+func (cm *BasicConnMgr) TrimOpenConns(_ context.Context) {
+ // TODO: error return value so we can cleanly signal we are aborting because:
+ // (a) there's another trim in progress, or (b) the silence period is in effect.
+
+ cm.doTrim()
+}
+
+func (cm *BasicConnMgr) background() {
+ defer cm.refCount.Done()
+
+ interval := cm.cfg.gracePeriod / 2
+ if cm.cfg.silencePeriod != 0 {
+ interval = cm.cfg.silencePeriod
+ }
+
+ ticker := cm.clock.Ticker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if cm.connCount.Load() < int32(cm.cfg.highWater) {
+ // Below high water, skip.
+ continue
+ }
+ case <-cm.ctx.Done():
+ return
+ }
+ cm.trim()
+ }
+}
+
+func (cm *BasicConnMgr) doTrim() {
+ // This logic is mimicking the implementation of sync.Once in the standard library.
+ count := atomic.LoadUint64(&cm.trimCount)
+ cm.trimMutex.Lock()
+ defer cm.trimMutex.Unlock()
+ if count == atomic.LoadUint64(&cm.trimCount) {
+ cm.trim()
+ cm.lastTrimMu.Lock()
+ cm.lastTrim = cm.clock.Now()
+ cm.lastTrimMu.Unlock()
+ atomic.AddUint64(&cm.trimCount, 1)
+ }
+}
+
+// trim starts the trim, if the last trim happened before the configured silence period.
+func (cm *BasicConnMgr) trim() {
+ // do the actual trim.
+ for _, c := range cm.getConnsToClose() {
+ log.Debug("closing conn", "peer", c.RemotePeer())
+ c.CloseWithError(network.ConnGarbageCollected)
+ }
+}
+
+func (cm *BasicConnMgr) getConnsToCloseEmergency(target int) []network.Conn {
+ candidates := make(peerInfos, 0, cm.segments.countPeers())
+
+ cm.plk.RLock()
+ for _, s := range cm.segments.buckets {
+ s.Lock()
+ for id, inf := range s.peers {
+ if _, ok := cm.protected[id]; ok {
+ // skip over protected peer.
+ continue
+ }
+ candidates = append(candidates, inf)
+ }
+ s.Unlock()
+ }
+ cm.plk.RUnlock()
+
+ // Sort peers according to their value.
+ candidates.SortByValueAndStreams(&cm.segments, true)
+
+ selected := make([]network.Conn, 0, target+10)
+ for _, inf := range candidates {
+ if target <= 0 {
+ break
+ }
+ s := cm.segments.get(inf.id)
+ s.Lock()
+ for c := range inf.conns {
+ selected = append(selected, c)
+ }
+ target -= len(inf.conns)
+ s.Unlock()
+ }
+ if len(selected) >= target {
+ // We found enough connections that were not protected.
+ return selected
+ }
+
+ // We didn't find enough unprotected connections.
+ // We have no choice but to kill some protected connections.
+ candidates = candidates[:0]
+ cm.plk.RLock()
+ for _, s := range cm.segments.buckets {
+ s.Lock()
+ for _, inf := range s.peers {
+ candidates = append(candidates, inf)
+ }
+ s.Unlock()
+ }
+ cm.plk.RUnlock()
+
+ candidates.SortByValueAndStreams(&cm.segments, true)
+ for _, inf := range candidates {
+ if target <= 0 {
+ break
+ }
+ // lock this to protect from concurrent modifications from connect/disconnect events
+ s := cm.segments.get(inf.id)
+ s.Lock()
+ for c := range inf.conns {
+ selected = append(selected, c)
+ }
+ target -= len(inf.conns)
+ s.Unlock()
+ }
+ return selected
+}
+
+// getConnsToClose runs the heuristics described in TrimOpenConns and returns the
+// connections to close.
+func (cm *BasicConnMgr) getConnsToClose() []network.Conn {
+ if cm.cfg.lowWater == 0 || cm.cfg.highWater == 0 {
+ // disabled
+ return nil
+ }
+
+ if int(cm.connCount.Load()) <= cm.cfg.lowWater {
+ log.Info("open connection count below limit")
+ return nil
+ }
+
+ candidates := make(peerInfos, 0, cm.segments.countPeers())
+ var ncandidates int
+ gracePeriodStart := cm.clock.Now().Add(-cm.cfg.gracePeriod)
+
+ cm.plk.RLock()
+ for _, s := range cm.segments.buckets {
+ s.Lock()
+ for id, inf := range s.peers {
+ if _, ok := cm.protected[id]; ok {
+ // skip over protected peer.
+ continue
+ }
+ if inf.firstSeen.After(gracePeriodStart) {
+ // skip peers in the grace period.
+ continue
+ }
+ // note that we're copying the entry here,
+ // but since inf.conns is a map, it will still point to the original object
+ candidates = append(candidates, inf)
+ ncandidates += len(inf.conns)
+ }
+ s.Unlock()
+ }
+ cm.plk.RUnlock()
+
+ if ncandidates < cm.cfg.lowWater {
+ log.Info("open connection count above limit but too many are in the grace period")
+ // We have too many connections but fewer than lowWater
+ // connections out of the grace period.
+ //
+ // If we trimmed now, we'd kill potentially useful connections.
+ return nil
+ }
+
+ // Sort peers according to their value.
+ candidates.SortByValueAndStreams(&cm.segments, false)
+
+ target := ncandidates - cm.cfg.lowWater
+
+ // slightly overallocate because we may have more than one conns per peer
+ selected := make([]network.Conn, 0, target+10)
+
+ for _, inf := range candidates {
+ if target <= 0 {
+ break
+ }
+
+ // lock this to protect from concurrent modifications from connect/disconnect events
+ s := cm.segments.get(inf.id)
+ s.Lock()
+ if len(inf.conns) == 0 && inf.temp {
+ // handle temporary entries for early tags -- this entry has gone past the grace period
+ // and still holds no connections, so prune it.
+ delete(s.peers, inf.id)
+ } else {
+ for c := range inf.conns {
+ selected = append(selected, c)
+ }
+ target -= len(inf.conns)
+ }
+ s.Unlock()
+ }
+
+ return selected
+}
+
+// GetTagInfo is called to fetch the tag information associated with a given
+// peer, nil is returned if p refers to an unknown peer.
+func (cm *BasicConnMgr) GetTagInfo(p peer.ID) *connmgr.TagInfo {
+ s := cm.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ pi, ok := s.peers[p]
+ if !ok {
+ return nil
+ }
+
+ out := &connmgr.TagInfo{
+ FirstSeen: pi.firstSeen,
+ Value: pi.value,
+ Tags: make(map[string]int),
+ Conns: make(map[string]time.Time),
+ }
+
+ for t, v := range pi.tags {
+ out.Tags[t] = v
+ }
+ for t, v := range pi.decaying {
+ out.Tags[t.name] = v.Value
+ }
+ for c, t := range pi.conns {
+ out.Conns[c.RemoteMultiaddr().String()] = t
+ }
+
+ return out
+}
+
+// TagPeer is called to associate a string and integer with a given peer.
+func (cm *BasicConnMgr) TagPeer(p peer.ID, tag string, val int) {
+ s := cm.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ pi := s.tagInfoFor(p, cm.clock.Now())
+
+ // Update the total value of the peer.
+ pi.value += val - pi.tags[tag]
+ pi.tags[tag] = val
+}
+
+// UntagPeer is called to disassociate a string and integer from a given peer.
+func (cm *BasicConnMgr) UntagPeer(p peer.ID, tag string) {
+ s := cm.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ pi, ok := s.peers[p]
+ if !ok {
+ log.Debug("tried to remove tag from untracked peer", "peer", p, "tag", tag)
+ return
+ }
+
+ // Update the total value of the peer.
+ pi.value -= pi.tags[tag]
+ delete(pi.tags, tag)
+}
+
+// UpsertTag is called to insert/update a peer tag
+func (cm *BasicConnMgr) UpsertTag(p peer.ID, tag string, upsert func(int) int) {
+ s := cm.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ pi := s.tagInfoFor(p, cm.clock.Now())
+
+ oldval := pi.tags[tag]
+ newval := upsert(oldval)
+ pi.value += newval - oldval
+ pi.tags[tag] = newval
+}
+
+// CMInfo holds the configuration for BasicConnMgr, as well as status data.
+type CMInfo struct {
+ // The low watermark, as described in NewConnManager.
+ LowWater int
+
+ // The high watermark, as described in NewConnManager.
+ HighWater int
+
+ // The timestamp when the last trim was triggered.
+ LastTrim time.Time
+
+ // The configured grace period, as described in NewConnManager.
+ GracePeriod time.Duration
+
+ // The current connection count.
+ ConnCount int
+}
+
+// GetInfo returns the configuration and status data for this connection manager.
+func (cm *BasicConnMgr) GetInfo() CMInfo {
+ cm.lastTrimMu.RLock()
+ lastTrim := cm.lastTrim
+ cm.lastTrimMu.RUnlock()
+
+ return CMInfo{
+ HighWater: cm.cfg.highWater,
+ LowWater: cm.cfg.lowWater,
+ LastTrim: lastTrim,
+ GracePeriod: cm.cfg.gracePeriod,
+ ConnCount: int(cm.connCount.Load()),
+ }
+}
+
+// Notifee returns a sink through which Notifiers can inform the BasicConnMgr when
+// events occur. Currently, the notifee only reacts upon connection events
+// {Connected, Disconnected}.
+func (cm *BasicConnMgr) Notifee() network.Notifiee {
+ return (*cmNotifee)(cm)
+}
+
+type cmNotifee BasicConnMgr
+
+func (nn *cmNotifee) cm() *BasicConnMgr {
+ return (*BasicConnMgr)(nn)
+}
+
+// Connected is called by notifiers to inform that a new connection has been established.
+// The notifee updates the BasicConnMgr to start tracking the connection. If the new connection
+// count exceeds the high watermark, a trim may be triggered.
+func (nn *cmNotifee) Connected(_ network.Network, c network.Conn) {
+ cm := nn.cm()
+
+ p := c.RemotePeer()
+ s := cm.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ id := c.RemotePeer()
+ pinfo, ok := s.peers[id]
+ if !ok {
+ pinfo = &peerInfo{
+ id: id,
+ firstSeen: cm.clock.Now(),
+ tags: make(map[string]int),
+ decaying: make(map[*decayingTag]*connmgr.DecayingValue),
+ conns: make(map[network.Conn]time.Time),
+ }
+ s.peers[id] = pinfo
+ } else if pinfo.temp {
+ // we had created a temporary entry for this peer to buffer early tags before the
+ // Connected notification arrived: flip the temporary flag, and update the firstSeen
+ // timestamp to the real one.
+ pinfo.temp = false
+ pinfo.firstSeen = cm.clock.Now()
+ }
+
+ _, ok = pinfo.conns[c]
+ if ok {
+ log.Error("received connected notification for conn we are already tracking", "peer", p)
+ return
+ }
+
+ pinfo.conns[c] = cm.clock.Now()
+ cm.connCount.Add(1)
+}
+
+// Disconnected is called by notifiers to inform that an existing connection has been closed or terminated.
+// The notifee updates the BasicConnMgr accordingly to stop tracking the connection, and performs housekeeping.
+func (nn *cmNotifee) Disconnected(_ network.Network, c network.Conn) {
+ cm := nn.cm()
+
+ p := c.RemotePeer()
+ s := cm.segments.get(p)
+ s.Lock()
+ defer s.Unlock()
+
+ cinf, ok := s.peers[p]
+ if !ok {
+ log.Error("received disconnected notification for peer we are not tracking", "peer", p)
+ return
+ }
+
+ _, ok = cinf.conns[c]
+ if !ok {
+ log.Error("received disconnected notification for conn we are not tracking", "peer", p)
+ return
+ }
+
+ delete(cinf.conns, c)
+ if len(cinf.conns) == 0 {
+ delete(s.peers, p)
+ }
+ cm.connCount.Add(-1)
+}
+
+// Listen is no-op in this implementation.
+func (nn *cmNotifee) Listen(_ network.Network, _ ma.Multiaddr) {}
+
+// ListenClose is no-op in this implementation.
+func (nn *cmNotifee) ListenClose(_ network.Network, _ ma.Multiaddr) {}
diff --git a/p2p/net/connmgr/connmgr_test.go b/p2p/net/connmgr/connmgr_test.go
new file mode 100644
index 0000000000..e8e61914dd
--- /dev/null
+++ b/p2p/net/connmgr/connmgr_test.go
@@ -0,0 +1,1076 @@
+package connmgr
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ tu "github.com/libp2p/go-libp2p/core/test"
+
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+type tconn struct {
+ network.Conn
+
+ peer peer.ID
+ closed uint32 // to be used atomically. Closed if 1
+ disconnectNotify func(net network.Network, conn network.Conn)
+}
+
+func (c *tconn) Close() error {
+ atomic.StoreUint32(&c.closed, 1)
+ if c.disconnectNotify != nil {
+ c.disconnectNotify(nil, c)
+ }
+ return nil
+}
+
+func (c *tconn) CloseWithError(_ network.ConnErrorCode) error {
+ atomic.StoreUint32(&c.closed, 1)
+ if c.disconnectNotify != nil {
+ c.disconnectNotify(nil, c)
+ }
+ return nil
+}
+
+func (c *tconn) isClosed() bool {
+ return atomic.LoadUint32(&c.closed) == 1
+}
+
+func (c *tconn) RemotePeer() peer.ID {
+ return c.peer
+}
+
+func (c *tconn) Stat() network.ConnStats {
+ return network.ConnStats{
+ Stats: network.Stats{
+ Direction: network.DirOutbound,
+ },
+ NumStreams: 1,
+ }
+}
+
+func (c *tconn) RemoteMultiaddr() ma.Multiaddr {
+ addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/1234")
+ if err != nil {
+ panic("cannot create multiaddr")
+ }
+ return addr
+}
+
+func randConn(t testing.TB, discNotify func(network.Network, network.Conn)) network.Conn {
+ pid := tu.RandPeerIDFatal(t)
+ return &tconn{peer: pid, disconnectNotify: discNotify}
+}
+
+// Make sure multiple trim calls block.
+func TestTrimBlocks(t *testing.T) {
+ cm, err := NewConnManager(200, 300, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ cm.lastTrimMu.RLock()
+
+ doneCh := make(chan struct{}, 2)
+ go func() {
+ cm.TrimOpenConns(context.Background())
+ doneCh <- struct{}{}
+ }()
+ go func() {
+ cm.TrimOpenConns(context.Background())
+ doneCh <- struct{}{}
+ }()
+ time.Sleep(time.Millisecond)
+ select {
+ case <-doneCh:
+ cm.lastTrimMu.RUnlock()
+ t.Fatal("expected trim to block")
+ default:
+ cm.lastTrimMu.RUnlock()
+ }
+ <-doneCh
+ <-doneCh
+}
+
+// Make sure trim returns when closed.
+func TestTrimClosed(t *testing.T) {
+ cm, err := NewConnManager(200, 300, WithGracePeriod(0))
+ require.NoError(t, err)
+ require.NoError(t, cm.Close())
+ cm.TrimOpenConns(context.Background())
+}
+
+// Make sure joining an existing trim works.
+func TestTrimJoin(t *testing.T) {
+ cm, err := NewConnManager(200, 300, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ cm.lastTrimMu.RLock()
+ var wg sync.WaitGroup
+ wg.Add(3)
+ go func() {
+ defer wg.Done()
+ cm.TrimOpenConns(context.Background())
+ }()
+ time.Sleep(time.Millisecond)
+ go func() {
+ defer wg.Done()
+ cm.TrimOpenConns(context.Background())
+ }()
+ go func() {
+ defer wg.Done()
+ cm.TrimOpenConns(context.Background())
+ }()
+ time.Sleep(time.Millisecond)
+ cm.lastTrimMu.RUnlock()
+ wg.Wait()
+}
+
+func TestConnTrimming(t *testing.T) {
+ cm, err := NewConnManager(200, 300, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+
+ var conns []network.Conn
+ for i := 0; i < 300; i++ {
+ rc := randConn(t, nil)
+ conns = append(conns, rc)
+ not.Connected(nil, rc)
+ }
+
+ for _, c := range conns {
+ if c.(*tconn).isClosed() {
+ t.Fatal("nothing should be closed yet")
+ }
+ }
+
+ for i := 0; i < 100; i++ {
+ cm.TagPeer(conns[i].RemotePeer(), "foo", 10)
+ }
+
+ cm.TagPeer(conns[299].RemotePeer(), "badfoo", -5)
+
+ cm.TrimOpenConns(context.Background())
+
+ for i := 0; i < 100; i++ {
+ c := conns[i]
+ if c.(*tconn).isClosed() {
+ t.Fatal("these shouldnt be closed")
+ }
+ }
+
+ if !conns[299].(*tconn).isClosed() {
+ t.Fatal("conn with bad tag should have gotten closed")
+ }
+}
+
+func TestConnsToClose(t *testing.T) {
+ addConns := func(cm *BasicConnMgr, n int) {
+ not := cm.Notifee()
+ for i := 0; i < n; i++ {
+ conn := randConn(t, nil)
+ not.Connected(nil, conn)
+ }
+ }
+
+ t.Run("below hi limit", func(t *testing.T) {
+ cm, err := NewConnManager(0, 10, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+ addConns(cm, 5)
+ require.Empty(t, cm.getConnsToClose())
+ })
+
+ t.Run("below low limit", func(t *testing.T) {
+ cm, err := NewConnManager(10, 0, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+ addConns(cm, 5)
+ require.Empty(t, cm.getConnsToClose())
+ })
+
+ t.Run("below low and hi limit", func(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+ addConns(cm, 1)
+ require.Empty(t, cm.getConnsToClose())
+ })
+
+ t.Run("within silence period", func(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(10*time.Minute))
+ require.NoError(t, err)
+ defer cm.Close()
+ addConns(cm, 1)
+ require.Empty(t, cm.getConnsToClose())
+ })
+}
+
+func TestGetTagInfo(t *testing.T) {
+ start := time.Now()
+ cm, err := NewConnManager(1, 1, WithGracePeriod(10*time.Minute))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ not := cm.Notifee()
+ conn := randConn(t, nil)
+ not.Connected(nil, conn)
+ end := time.Now()
+
+ other := tu.RandPeerIDFatal(t)
+ tag := cm.GetTagInfo(other)
+ if tag != nil {
+ t.Fatal("expected no tag")
+ }
+
+ tag = cm.GetTagInfo(conn.RemotePeer())
+ if tag == nil {
+ t.Fatal("expected tag")
+ }
+ if tag.FirstSeen.Before(start) || tag.FirstSeen.After(end) {
+ t.Fatal("expected first seen time")
+ }
+ if tag.Value != 0 {
+ t.Fatal("expected zero value")
+ }
+ if len(tag.Tags) != 0 {
+ t.Fatal("expected no tags")
+ }
+ if len(tag.Conns) != 1 {
+ t.Fatal("expected one connection")
+ }
+ for s, tm := range tag.Conns {
+ if s != conn.RemoteMultiaddr().String() {
+ t.Fatal("unexpected multiaddr")
+ }
+ if tm.Before(start) || tm.After(end) {
+ t.Fatal("unexpected connection time")
+ }
+ }
+
+ cm.TagPeer(conn.RemotePeer(), "tag", 5)
+ tag = cm.GetTagInfo(conn.RemotePeer())
+ if tag == nil {
+ t.Fatal("expected tag")
+ }
+ if tag.FirstSeen.Before(start) || tag.FirstSeen.After(end) {
+ t.Fatal("expected first seen time")
+ }
+ if tag.Value != 5 {
+ t.Fatal("expected five value")
+ }
+ if len(tag.Tags) != 1 {
+ t.Fatal("expected no tags")
+ }
+ for tString, v := range tag.Tags {
+ if tString != "tag" || v != 5 {
+ t.Fatal("expected tag value")
+ }
+ }
+ if len(tag.Conns) != 1 {
+ t.Fatal("expected one connection")
+ }
+ for s, tm := range tag.Conns {
+ if s != conn.RemoteMultiaddr().String() {
+ t.Fatal("unexpected multiaddr")
+ }
+ if tm.Before(start) || tm.After(end) {
+ t.Fatal("unexpected connection time")
+ }
+ }
+}
+
+func TestTagPeerNonExistant(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(10*time.Minute))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ id := tu.RandPeerIDFatal(t)
+ cm.TagPeer(id, "test", 1)
+
+ if !cm.segments.get(id).peers[id].temp {
+ t.Fatal("expected 1 temporary entry")
+ }
+}
+
+func TestUntagPeer(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(10*time.Minute))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+
+ conn := randConn(t, nil)
+ not.Connected(nil, conn)
+ rp := conn.RemotePeer()
+ cm.TagPeer(rp, "tag", 5)
+ cm.TagPeer(rp, "tag two", 5)
+
+ id := tu.RandPeerIDFatal(t)
+ cm.UntagPeer(id, "test")
+ if len(cm.segments.get(rp).peers[rp].tags) != 2 {
+ t.Fatal("expected tags to be uneffected")
+ }
+
+ cm.UntagPeer(conn.RemotePeer(), "test")
+ if len(cm.segments.get(rp).peers[rp].tags) != 2 {
+ t.Fatal("expected tags to be uneffected")
+ }
+
+ cm.UntagPeer(conn.RemotePeer(), "tag")
+ if len(cm.segments.get(rp).peers[rp].tags) != 1 {
+ t.Fatal("expected tag to be removed")
+ }
+ if cm.segments.get(rp).peers[rp].value != 5 {
+ t.Fatal("expected aggreagte tag value to be 5")
+ }
+}
+
+func TestGetInfo(t *testing.T) {
+ start := time.Now()
+ const gp = 10 * time.Minute
+ cm, err := NewConnManager(1, 5, WithGracePeriod(gp))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+ conn := randConn(t, nil)
+ not.Connected(nil, conn)
+ cm.TrimOpenConns(context.Background())
+ end := time.Now()
+
+ info := cm.GetInfo()
+ if info.HighWater != 5 {
+ t.Fatal("expected highwater to be 5")
+ }
+ if info.LowWater != 1 {
+ t.Fatal("expected highwater to be 1")
+ }
+ if info.LastTrim.Before(start) || info.LastTrim.After(end) {
+ t.Fatal("unexpected last trim time")
+ }
+ if info.GracePeriod != gp {
+ t.Fatal("unexpected grace period")
+ }
+ if info.ConnCount != 1 {
+ t.Fatal("unexpected number of connections")
+ }
+}
+
+func TestDoubleConnection(t *testing.T) {
+ const gp = 10 * time.Minute
+ cm, err := NewConnManager(1, 5, WithGracePeriod(gp))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+ conn := randConn(t, nil)
+ not.Connected(nil, conn)
+ cm.TagPeer(conn.RemotePeer(), "foo", 10)
+ not.Connected(nil, conn)
+ if cm.connCount.Load() != 1 {
+ t.Fatal("unexpected number of connections")
+ }
+ if cm.segments.get(conn.RemotePeer()).peers[conn.RemotePeer()].value != 10 {
+ t.Fatal("unexpected peer value")
+ }
+}
+
+func TestDisconnected(t *testing.T) {
+ const gp = 10 * time.Minute
+ cm, err := NewConnManager(1, 5, WithGracePeriod(gp))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+ conn := randConn(t, nil)
+ not.Connected(nil, conn)
+ cm.TagPeer(conn.RemotePeer(), "foo", 10)
+
+ not.Disconnected(nil, randConn(t, nil))
+ if cm.connCount.Load() != 1 {
+ t.Fatal("unexpected number of connections")
+ }
+ if cm.segments.get(conn.RemotePeer()).peers[conn.RemotePeer()].value != 10 {
+ t.Fatal("unexpected peer value")
+ }
+
+ not.Disconnected(nil, &tconn{peer: conn.RemotePeer()})
+ if cm.connCount.Load() != 1 {
+ t.Fatal("unexpected number of connections")
+ }
+ if cm.segments.get(conn.RemotePeer()).peers[conn.RemotePeer()].value != 10 {
+ t.Fatal("unexpected peer value")
+ }
+
+ not.Disconnected(nil, conn)
+ if cm.connCount.Load() != 0 {
+ t.Fatal("unexpected number of connections")
+ }
+ if cm.segments.countPeers() != 0 {
+ t.Fatal("unexpected number of peers")
+ }
+}
+
+func TestGracePeriod(t *testing.T) {
+ const gp = 100 * time.Millisecond
+ mockClock := clock.NewMock()
+ cm, err := NewConnManager(10, 20, WithGracePeriod(gp), WithSilencePeriod(time.Hour), WithClock(mockClock))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ not := cm.Notifee()
+
+ var conns []network.Conn
+
+ // Add a connection and wait the grace period.
+ {
+ rc := randConn(t, not.Disconnected)
+ conns = append(conns, rc)
+ not.Connected(nil, rc)
+
+ mockClock.Add(2 * gp)
+
+ if rc.(*tconn).isClosed() {
+ t.Fatal("expected conn to remain open")
+ }
+ }
+
+ // quickly add 30 connections (sending us above the high watermark)
+ for i := 0; i < 30; i++ {
+ rc := randConn(t, not.Disconnected)
+ conns = append(conns, rc)
+ not.Connected(nil, rc)
+ }
+
+ cm.TrimOpenConns(context.Background())
+
+ for _, c := range conns {
+ if c.(*tconn).isClosed() {
+ t.Fatal("expected no conns to be closed")
+ }
+ }
+
+ mockClock.Add(200 * time.Millisecond)
+
+ cm.TrimOpenConns(context.Background())
+
+ closed := 0
+ for _, c := range conns {
+ if c.(*tconn).isClosed() {
+ closed++
+ }
+ }
+
+ if closed != 21 {
+ t.Fatal("expected to have closed 21 connections")
+ }
+}
+
+// see https://github.com/libp2p/go-libp2p-connmgr/issues/23
+func TestQuickBurstRespectsSilencePeriod(t *testing.T) {
+ mockClock := clock.NewMock()
+ cm, err := NewConnManager(10, 20, WithGracePeriod(0), WithClock(mockClock))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+
+ var conns []network.Conn
+
+ // quickly produce 30 connections (sending us above the high watermark)
+ for i := 0; i < 30; i++ {
+ rc := randConn(t, not.Disconnected)
+ conns = append(conns, rc)
+ not.Connected(nil, rc)
+ }
+
+ // wait for a few seconds
+ mockClock.Add(3 * time.Second)
+
+ // only the first trim is allowed in; make sure we close at most 20 connections, not all of them.
+ var closed int
+ for _, c := range conns {
+ if c.(*tconn).isClosed() {
+ closed++
+ }
+ }
+ if closed > 20 {
+ t.Fatalf("should have closed at most 20 connections, closed: %d", closed)
+ }
+ if total := closed + int(cm.connCount.Load()); total != 30 {
+ t.Fatalf("expected closed connections + open conn count to equal 30, value: %d", total)
+ }
+}
+
+func TestPeerProtectionSingleTag(t *testing.T) {
+ cm, err := NewConnManager(19, 20, WithGracePeriod(0), WithSilencePeriod(time.Hour))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+
+ var conns []network.Conn
+ addConn := func(value int) {
+ rc := randConn(t, not.Disconnected)
+ conns = append(conns, rc)
+ not.Connected(nil, rc)
+ cm.TagPeer(rc.RemotePeer(), "test", value)
+ }
+
+ // produce 20 connections with unique peers.
+ for i := 0; i < 20; i++ {
+ addConn(20)
+ }
+
+ // protect the first 5 peers.
+ protected := make([]network.Conn, 0, 5)
+ for _, c := range conns[0:5] {
+ cm.Protect(c.RemotePeer(), "global")
+ protected = append(protected, c)
+ // tag them negatively to make them preferred for pruning.
+ cm.TagPeer(c.RemotePeer(), "test", -100)
+ }
+
+ // add 1 more conn, this shouldn't send us over the limit as protected conns don't count
+ addConn(20)
+
+ time.Sleep(100 * time.Millisecond)
+ cm.TrimOpenConns(context.Background())
+
+ for _, c := range conns {
+ if c.(*tconn).isClosed() {
+ t.Error("connection was closed by connection manager")
+ }
+ }
+
+ // add 5 more connection, sending the connection manager overboard.
+ for i := 0; i < 5; i++ {
+ addConn(20)
+ }
+
+ cm.TrimOpenConns(context.Background())
+
+ for _, c := range protected {
+ if c.(*tconn).isClosed() {
+ t.Error("protected connection was closed by connection manager")
+ }
+ }
+
+ closed := 0
+ for _, c := range conns {
+ if c.(*tconn).isClosed() {
+ closed++
+ }
+ }
+ if closed != 2 {
+ t.Errorf("expected 2 connection to be closed, found %d", closed)
+ }
+
+ // unprotect the first peer.
+ cm.Unprotect(protected[0].RemotePeer(), "global")
+
+ // add 2 more connections, sending the connection manager overboard again.
+ for i := 0; i < 2; i++ {
+ addConn(20)
+ }
+
+ cm.TrimOpenConns(context.Background())
+
+ if !protected[0].(*tconn).isClosed() {
+ t.Error("unprotected connection was kept open by connection manager")
+ }
+ for _, c := range protected[1:] {
+ if c.(*tconn).isClosed() {
+ t.Error("protected connection was closed by connection manager")
+ }
+ }
+}
+
+func TestPeerProtectionMultipleTags(t *testing.T) {
+ cm, err := NewConnManager(19, 20, WithGracePeriod(0), WithSilencePeriod(time.Hour))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+
+ // produce 20 connections with unique peers.
+ var conns []network.Conn
+ for i := 0; i < 20; i++ {
+ rc := randConn(t, not.Disconnected)
+ conns = append(conns, rc)
+ not.Connected(nil, rc)
+ cm.TagPeer(rc.RemotePeer(), "test", 20)
+ }
+
+ // protect the first 5 peers under two tags.
+ protected := make([]network.Conn, 0, 5)
+ for _, c := range conns[0:5] {
+ cm.Protect(c.RemotePeer(), "tag1")
+ cm.Protect(c.RemotePeer(), "tag2")
+ protected = append(protected, c)
+ // tag them negatively to make them preferred for pruning.
+ cm.TagPeer(c.RemotePeer(), "test", -100)
+ }
+
+ // add one more connection, sending the connection manager overboard.
+ not.Connected(nil, randConn(t, not.Disconnected))
+
+ cm.TrimOpenConns(context.Background())
+
+ for _, c := range protected {
+ if c.(*tconn).isClosed() {
+ t.Error("protected connection was closed by connection manager")
+ }
+ }
+
+ // remove the protection from one tag.
+ for _, c := range protected {
+ if !cm.Unprotect(c.RemotePeer(), "tag1") {
+ t.Error("peer should still be protected")
+ }
+ }
+
+ // add 2 more connections, sending the connection manager overboard again.
+ for i := 0; i < 2; i++ {
+ rc := randConn(t, not.Disconnected)
+ not.Connected(nil, rc)
+ cm.TagPeer(rc.RemotePeer(), "test", 20)
+ }
+
+ cm.TrimOpenConns(context.Background())
+
+ // connections should still remain open, as they were protected.
+ for _, c := range protected[0:] {
+ if c.(*tconn).isClosed() {
+ t.Error("protected connection was closed by connection manager")
+ }
+ }
+
+ // unprotect the first peer entirely.
+ cm.Unprotect(protected[0].RemotePeer(), "tag2")
+
+ // add 2 more connections, sending the connection manager overboard again.
+ for i := 0; i < 2; i++ {
+ rc := randConn(t, not.Disconnected)
+ not.Connected(nil, rc)
+ cm.TagPeer(rc.RemotePeer(), "test", 20)
+ }
+
+ cm.TrimOpenConns(context.Background())
+
+ if !protected[0].(*tconn).isClosed() {
+ t.Error("unprotected connection was kept open by connection manager")
+ }
+ for _, c := range protected[1:] {
+ if c.(*tconn).isClosed() {
+ t.Error("protected connection was closed by connection manager")
+ }
+ }
+}
+
+func TestPeerProtectionIdempotent(t *testing.T) {
+ cm, err := NewConnManager(10, 20, WithGracePeriod(0), WithSilencePeriod(time.Hour))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ id, _ := tu.RandPeerID()
+ cm.Protect(id, "global")
+ cm.Protect(id, "global")
+ cm.Protect(id, "global")
+ cm.Protect(id, "global")
+
+ if len(cm.protected[id]) > 1 {
+ t.Error("expected peer to be protected only once")
+ }
+
+ if !cm.Unprotect(id, "unused") {
+ t.Error("expected peer to continue to be protected")
+ }
+
+ if !cm.Unprotect(id, "unused2") {
+ t.Error("expected peer to continue to be protected")
+ }
+
+ if cm.Unprotect(id, "global") {
+ t.Error("expected peer to be unprotected")
+ }
+
+ if len(cm.protected) > 0 {
+ t.Error("expected no protections")
+ }
+}
+
+func TestUpsertTag(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(10*time.Minute))
+ require.NoError(t, err)
+ defer cm.Close()
+ not := cm.Notifee()
+ conn := randConn(t, nil)
+ rp := conn.RemotePeer()
+
+ // this is an early tag, before the Connected notification arrived.
+ cm.UpsertTag(rp, "tag", func(v int) int { return v + 1 })
+ if len(cm.segments.get(rp).peers[rp].tags) != 1 {
+ t.Fatal("expected a tag")
+ }
+ if cm.segments.get(rp).peers[rp].value != 1 {
+ t.Fatal("expected a tag value of 1")
+ }
+
+ // now let's notify the connection.
+ not.Connected(nil, conn)
+
+ cm.UpsertTag(rp, "tag", func(v int) int { return v + 1 })
+ if len(cm.segments.get(rp).peers[rp].tags) != 1 {
+ t.Fatal("expected a tag")
+ }
+ if cm.segments.get(rp).peers[rp].value != 2 {
+ t.Fatal("expected a tag value of 2")
+ }
+
+ cm.UpsertTag(rp, "tag", func(v int) int { return v - 1 })
+ if len(cm.segments.get(rp).peers[rp].tags) != 1 {
+ t.Fatal("expected a tag")
+ }
+ if cm.segments.get(rp).peers[rp].value != 1 {
+ t.Fatal("expected a tag value of 1")
+ }
+}
+
+func TestTemporaryEntriesClearedFirst(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(0))
+ require.NoError(t, err)
+
+ id := tu.RandPeerIDFatal(t)
+ cm.TagPeer(id, "test", 20)
+
+ if cm.GetTagInfo(id).Value != 20 {
+ t.Fatal("expected an early tag with value 20")
+ }
+
+ not := cm.Notifee()
+ conn1, conn2 := randConn(t, nil), randConn(t, nil)
+ not.Connected(nil, conn1)
+ not.Connected(nil, conn2)
+
+ cm.TrimOpenConns(context.Background())
+ if cm.GetTagInfo(id) != nil {
+ t.Fatal("expected no temporary tags after trimming")
+ }
+}
+
+func TestTemporaryEntryConvertedOnConnection(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(0))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ conn := randConn(t, nil)
+ cm.TagPeer(conn.RemotePeer(), "test", 20)
+
+ ti := cm.segments.get(conn.RemotePeer()).peers[conn.RemotePeer()]
+
+ if ti.value != 20 || !ti.temp {
+ t.Fatal("expected a temporary tag with value 20")
+ }
+
+ not := cm.Notifee()
+ not.Connected(nil, conn)
+
+ if ti.value != 20 || ti.temp {
+ t.Fatal("expected a non-temporary tag with value 20")
+ }
+}
+
+// see https://github.com/libp2p/go-libp2p-connmgr/issues/82
+func TestConcurrentCleanupAndTagging(t *testing.T) {
+ cm, err := NewConnManager(1, 1, WithGracePeriod(0), WithSilencePeriod(time.Millisecond))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ for i := 0; i < 1000; i++ {
+ conn := randConn(t, nil)
+ cm.TagPeer(conn.RemotePeer(), "test", 20)
+ }
+}
+
+type mockConn struct {
+ stats network.ConnStats
+}
+
+func (m mockConn) Close() error { panic("implement me") }
+func (m mockConn) CloseWithError(_ network.ConnErrorCode) error { panic("implement me") }
+func (m mockConn) LocalPeer() peer.ID { panic("implement me") }
+func (m mockConn) RemotePeer() peer.ID { panic("implement me") }
+func (m mockConn) RemotePublicKey() crypto.PubKey { panic("implement me") }
+func (m mockConn) LocalMultiaddr() ma.Multiaddr { panic("implement me") }
+func (m mockConn) RemoteMultiaddr() ma.Multiaddr { panic("implement me") }
+func (m mockConn) Stat() network.ConnStats { return m.stats }
+func (m mockConn) ID() string { panic("implement me") }
+func (m mockConn) IsClosed() bool { panic("implement me") }
+func (m mockConn) NewStream(_ context.Context) (network.Stream, error) { panic("implement me") }
+func (m mockConn) GetStreams() []network.Stream { panic("implement me") }
+func (m mockConn) Scope() network.ConnScope { panic("implement me") }
+func (m mockConn) ConnState() network.ConnectionState { return network.ConnectionState{} }
+
+func makeSegmentsWithPeerInfos(peerInfos peerInfos) *segments {
+ var s = func() *segments {
+ ret := segments{}
+ for i := range ret.buckets {
+ ret.buckets[i] = &segment{
+ peers: make(map[peer.ID]*peerInfo),
+ }
+ }
+ return &ret
+ }()
+
+ for _, pi := range peerInfos {
+ segment := s.get(pi.id)
+ segment.Lock()
+ segment.peers[pi.id] = pi
+ segment.Unlock()
+ }
+
+ return s
+}
+
+func TestPeerInfoSorting(t *testing.T) {
+ t.Run("starts with temporary connections", func(t *testing.T) {
+ p1 := &peerInfo{id: peer.ID("peer1")}
+ p2 := &peerInfo{id: peer.ID("peer2"), temp: true}
+ pis := peerInfos{p1, p2}
+ pis.SortByValueAndStreams(makeSegmentsWithPeerInfos(pis), false)
+ require.Equal(t, peerInfos{p2, p1}, pis)
+ })
+
+ t.Run("starts with low-value connections", func(t *testing.T) {
+ p1 := &peerInfo{id: peer.ID("peer1"), value: 40}
+ p2 := &peerInfo{id: peer.ID("peer2"), value: 20}
+ pis := peerInfos{p1, p2}
+ pis.SortByValueAndStreams(makeSegmentsWithPeerInfos(pis), false)
+ require.Equal(t, peerInfos{p2, p1}, pis)
+ })
+
+ t.Run("prefer peers with no streams", func(t *testing.T) {
+ p1 := &peerInfo{id: peer.ID("peer1"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: network.ConnStats{NumStreams: 0}}: time.Now(),
+ },
+ }
+ p2 := &peerInfo{id: peer.ID("peer2"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: network.ConnStats{NumStreams: 1}}: time.Now(),
+ },
+ }
+ pis := peerInfos{p2, p1}
+ pis.SortByValueAndStreams(makeSegmentsWithPeerInfos(pis), false)
+ require.Equal(t, peerInfos{p1, p2}, pis)
+ })
+
+ t.Run("in a memory emergency, starts with incoming connections and higher streams", func(t *testing.T) {
+ incoming := network.ConnStats{}
+ incoming.Direction = network.DirInbound
+ outgoing := network.ConnStats{}
+ outgoing.Direction = network.DirOutbound
+
+ outgoingSomeStreams := network.ConnStats{Stats: network.Stats{Direction: network.DirOutbound}, NumStreams: 1}
+ outgoingMoreStreams := network.ConnStats{Stats: network.Stats{Direction: network.DirOutbound}, NumStreams: 2}
+ p1 := &peerInfo{
+ id: peer.ID("peer1"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: outgoingSomeStreams}: time.Now(),
+ },
+ }
+ p2 := &peerInfo{
+ id: peer.ID("peer2"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: outgoingSomeStreams}: time.Now(),
+ &mockConn{stats: incoming}: time.Now(),
+ },
+ }
+ p3 := &peerInfo{
+ id: peer.ID("peer3"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: outgoing}: time.Now(),
+ &mockConn{stats: incoming}: time.Now(),
+ },
+ }
+ p4 := &peerInfo{
+ id: peer.ID("peer4"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: outgoingMoreStreams}: time.Now(),
+ &mockConn{stats: incoming}: time.Now(),
+ },
+ }
+ pis := peerInfos{p1, p2, p3, p4}
+ pis.SortByValueAndStreams(makeSegmentsWithPeerInfos(pis), true)
+ // p3 is first because it is inactive (no streams).
+ // p4 is second because it has the most streams and we priortize killing
+ // connections with the higher number of streams.
+ require.Equal(t, peerInfos{p3, p4, p2, p1}, pis)
+ })
+
+ t.Run("in a memory emergency, starts with connections that have many streams", func(t *testing.T) {
+ p1 := &peerInfo{
+ id: peer.ID("peer1"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: network.ConnStats{NumStreams: 100}}: time.Now(),
+ },
+ }
+ p2 := &peerInfo{
+ id: peer.ID("peer2"),
+ conns: map[network.Conn]time.Time{
+ &mockConn{stats: network.ConnStats{NumStreams: 80}}: time.Now(),
+ &mockConn{stats: network.ConnStats{NumStreams: 40}}: time.Now(),
+ },
+ }
+ pis := peerInfos{p1, p2}
+ pis.SortByValueAndStreams(makeSegmentsWithPeerInfos(pis), true)
+ require.Equal(t, peerInfos{p2, p1}, pis)
+ })
+}
+
+func TestSafeConcurrency(t *testing.T) {
+ t.Run("Safe Concurrency", func(t *testing.T) {
+ cl := clock.NewMock()
+
+ p1 := &peerInfo{id: peer.ID("peer1"), conns: map[network.Conn]time.Time{}}
+ p2 := &peerInfo{id: peer.ID("peer2"), conns: map[network.Conn]time.Time{}}
+ pis := peerInfos{p1, p2}
+
+ ss := makeSegmentsWithPeerInfos(pis)
+
+ const runs = 10
+ const concurrency = 10
+ var wg sync.WaitGroup
+ for i := 0; i < concurrency; i++ {
+ wg.Add(1)
+ go func() {
+ // add conns. This mimics new connection events
+ pis := peerInfos{p1, p2}
+ for i := 0; i < runs; i++ {
+ pi := pis[i%len(pis)]
+ s := ss.get(pi.id)
+ s.Lock()
+ s.peers[pi.id].conns[randConn(t, nil)] = cl.Now()
+ s.Unlock()
+ }
+ wg.Done()
+ }()
+
+ wg.Add(1)
+ go func() {
+ pis := peerInfos{p1, p2}
+ for i := 0; i < runs; i++ {
+ pis.SortByValueAndStreams(ss, false)
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+ })
+}
+
+func TestCheckLimit(t *testing.T) {
+ low, hi := 1, 2
+ cm, err := NewConnManager(low, hi)
+ require.NoError(t, err)
+
+ err = cm.CheckLimit(testLimitGetter{hi + 1})
+ require.NoError(t, err)
+ err = cm.CheckLimit(testLimitGetter{hi})
+ require.NoError(t, err)
+ err = cm.CheckLimit(testLimitGetter{hi - 1})
+ require.Error(t, err)
+}
+
+type testLimitGetter struct {
+ limit int
+}
+
+func (g testLimitGetter) GetConnLimit() int {
+ return g.limit
+}
+
+func TestErrorCode(t *testing.T) {
+ sw1, sw2, sw3 := swarmt.GenSwarm(t), swarmt.GenSwarm(t), swarmt.GenSwarm(t)
+ defer sw1.Close()
+ defer sw2.Close()
+ defer sw3.Close()
+
+ cm, err := NewConnManager(1, 1, WithGracePeriod(0), WithSilencePeriod(10))
+ require.NoError(t, err)
+ defer cm.Close()
+
+ sw1.Peerstore().AddAddrs(sw2.LocalPeer(), sw2.ListenAddresses(), peerstore.PermanentAddrTTL)
+ sw1.Peerstore().AddAddrs(sw3.LocalPeer(), sw3.ListenAddresses(), peerstore.PermanentAddrTTL)
+
+ c12, err := sw1.DialPeer(context.Background(), sw2.LocalPeer())
+ require.NoError(t, err)
+
+ var c21 network.Conn
+ require.Eventually(t, func() bool {
+ conns := sw2.ConnsToPeer(sw1.LocalPeer())
+ if len(conns) == 0 {
+ return false
+ }
+ c21 = conns[0]
+ return true
+ }, 10*time.Second, 100*time.Millisecond)
+
+ c13, err := sw1.DialPeer(context.Background(), sw3.LocalPeer())
+ require.NoError(t, err)
+
+ var c31 network.Conn
+ require.Eventually(t, func() bool {
+ conns := sw3.ConnsToPeer(sw1.LocalPeer())
+ if len(conns) == 0 {
+ return false
+ }
+ c31 = conns[0]
+ return true
+ }, 10*time.Second, 100*time.Millisecond)
+
+ not := cm.Notifee()
+ not.Connected(sw1, c12)
+ not.Connected(sw1, c13)
+
+ cm.TrimOpenConns(context.Background())
+
+ require.True(t, c12.IsClosed() || c13.IsClosed())
+ var c, cr network.Conn
+ if c12.IsClosed() {
+ c = c12
+ require.Eventually(t, func() bool {
+ conns := sw2.ConnsToPeer(sw1.LocalPeer())
+ if len(conns) == 0 {
+ cr = c21
+ return true
+ }
+ return false
+ }, 5*time.Second, 100*time.Millisecond)
+ } else {
+ c = c13
+ require.Eventually(t, func() bool {
+ conns := sw3.ConnsToPeer(sw1.LocalPeer())
+ if len(conns) == 0 {
+ cr = c31
+ return true
+ }
+ return false
+ }, 5*time.Second, 100*time.Millisecond)
+ }
+
+ _, err = c.NewStream(context.Background())
+ require.ErrorIs(t, err, &network.ConnError{ErrorCode: network.ConnGarbageCollected, Remote: false})
+
+ _, err = cr.NewStream(context.Background())
+ require.ErrorIs(t, err, &network.ConnError{ErrorCode: network.ConnGarbageCollected, Remote: true})
+}
diff --git a/p2p/net/connmgr/decay.go b/p2p/net/connmgr/decay.go
new file mode 100644
index 0000000000..9610a25c5f
--- /dev/null
+++ b/p2p/net/connmgr/decay.go
@@ -0,0 +1,358 @@
+package connmgr
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/benbjohnson/clock"
+)
+
+// DefaultResolution is the default resolution of the decay tracker.
+var DefaultResolution = 1 * time.Minute
+
+// bumpCmd represents a bump command.
+type bumpCmd struct {
+ peer peer.ID
+ tag *decayingTag
+ delta int
+}
+
+// removeCmd represents a tag removal command.
+type removeCmd struct {
+ peer peer.ID
+ tag *decayingTag
+}
+
+// decayer tracks and manages all decaying tags and their values.
+type decayer struct {
+ cfg *DecayerCfg
+ mgr *BasicConnMgr
+ clock clock.Clock // for testing.
+
+ tagsMu sync.Mutex
+ knownTags map[string]*decayingTag
+
+ // lastTick stores the last time the decayer ticked. Guarded by atomic.
+ lastTick atomic.Pointer[time.Time]
+
+ // bumpTagCh queues bump commands to be processed by the loop.
+ bumpTagCh chan bumpCmd
+ removeTagCh chan removeCmd
+ closeTagCh chan *decayingTag
+
+ // closure thingies.
+ closeCh chan struct{}
+ doneCh chan struct{}
+ err error
+}
+
+var _ connmgr.Decayer = (*decayer)(nil)
+
+// DecayerCfg is the configuration object for the Decayer.
+type DecayerCfg struct {
+ Resolution time.Duration
+ Clock clock.Clock
+}
+
+// WithDefaults writes the default values on this DecayerConfig instance,
+// and returns itself for chainability.
+//
+// cfg := (&DecayerCfg{}).WithDefaults()
+// cfg.Resolution = 30 * time.Second
+// t := NewDecayer(cfg, cm)
+func (cfg *DecayerCfg) WithDefaults() *DecayerCfg {
+ cfg.Resolution = DefaultResolution
+ return cfg
+}
+
+// NewDecayer creates a new decaying tag registry.
+func NewDecayer(cfg *DecayerCfg, mgr *BasicConnMgr) (*decayer, error) {
+ // use real time if the Clock in the config is nil.
+ if cfg.Clock == nil {
+ cfg.Clock = clock.New()
+ }
+
+ d := &decayer{
+ cfg: cfg,
+ mgr: mgr,
+ clock: cfg.Clock,
+ knownTags: make(map[string]*decayingTag),
+ bumpTagCh: make(chan bumpCmd, 128),
+ removeTagCh: make(chan removeCmd, 128),
+ closeTagCh: make(chan *decayingTag, 128),
+ closeCh: make(chan struct{}),
+ doneCh: make(chan struct{}),
+ }
+
+ now := d.clock.Now()
+ d.lastTick.Store(&now)
+
+ // kick things off.
+ go d.process()
+
+ return d, nil
+}
+
+func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decayFn connmgr.DecayFn, bumpFn connmgr.BumpFn) (connmgr.DecayingTag, error) {
+ d.tagsMu.Lock()
+ defer d.tagsMu.Unlock()
+
+ if _, ok := d.knownTags[name]; ok {
+ return nil, fmt.Errorf("decaying tag with name %s already exists", name)
+ }
+
+ if interval < d.cfg.Resolution {
+ log.Warn("decay interval was lower than tracker's resolution; overridden to resolution",
+ "name", name,
+ "interval", interval,
+ "resolution", d.cfg.Resolution)
+ interval = d.cfg.Resolution
+ }
+
+ if interval%d.cfg.Resolution != 0 {
+ log.Warn("decay interval for tag is not a multiple of tracker's resolution; some precision may be lost",
+ "tag", name, "interval", interval, "resolution", d.cfg.Resolution)
+ }
+
+ lastTick := d.lastTick.Load()
+ tag := &decayingTag{
+ trkr: d,
+ name: name,
+ interval: interval,
+ nextTick: lastTick.Add(interval),
+ decayFn: decayFn,
+ bumpFn: bumpFn,
+ }
+
+ d.knownTags[name] = tag
+ return tag, nil
+}
+
+// Close closes the Decayer. It is idempotent.
+func (d *decayer) Close() error {
+ select {
+ case <-d.doneCh:
+ return d.err
+ default:
+ }
+
+ close(d.closeCh)
+ <-d.doneCh
+ return d.err
+}
+
+// process is the heart of the tracker. It performs the following duties:
+//
+// 1. Manages decay.
+// 2. Applies score bumps.
+// 3. Yields when closed.
+func (d *decayer) process() {
+ defer close(d.doneCh)
+
+ ticker := d.clock.Ticker(d.cfg.Resolution)
+ defer ticker.Stop()
+
+ var (
+ bmp bumpCmd
+ visit = make(map[*decayingTag]struct{})
+ )
+
+ for {
+ select {
+ case <-ticker.C:
+ now := d.clock.Now()
+ d.lastTick.Store(&now)
+
+ d.tagsMu.Lock()
+ for _, tag := range d.knownTags {
+ if tag.nextTick.After(now) {
+ // skip the tag.
+ continue
+ }
+ // Mark the tag to be updated in this round.
+ visit[tag] = struct{}{}
+ }
+ d.tagsMu.Unlock()
+
+ // Visit each peer, and decay tags that need to be decayed.
+ for _, s := range d.mgr.segments.buckets {
+ s.Lock()
+
+ // Entered a segment that contains peers. Process each peer.
+ for _, p := range s.peers {
+ for tag, v := range p.decaying {
+ if _, ok := visit[tag]; !ok {
+ // skip this tag.
+ continue
+ }
+
+ // ~ this value needs to be visited. ~
+ var delta int
+ if after, rm := tag.decayFn(*v); rm {
+ // delete the value and move on to the next tag.
+ delta -= v.Value
+ delete(p.decaying, tag)
+ } else {
+ // accumulate the delta, and apply the changes.
+ delta += after - v.Value
+ v.Value, v.LastVisit = after, now
+ }
+ p.value += delta
+ }
+ }
+
+ s.Unlock()
+ }
+
+ // Reset each tag's next visit round, and clear the visited set.
+ for tag := range visit {
+ tag.nextTick = tag.nextTick.Add(tag.interval)
+ delete(visit, tag)
+ }
+
+ case bmp = <-d.bumpTagCh:
+ var (
+ now = d.clock.Now()
+ peer, tag = bmp.peer, bmp.tag
+ )
+
+ s := d.mgr.segments.get(peer)
+ s.Lock()
+
+ p := s.tagInfoFor(peer, d.clock.Now())
+ v, ok := p.decaying[tag]
+ if !ok {
+ v = &connmgr.DecayingValue{
+ Tag: tag,
+ Peer: peer,
+ LastVisit: now,
+ Added: now,
+ Value: 0,
+ }
+ p.decaying[tag] = v
+ }
+
+ prev := v.Value
+ v.Value, v.LastVisit = v.Tag.(*decayingTag).bumpFn(*v, bmp.delta), now
+ p.value += v.Value - prev
+
+ s.Unlock()
+
+ case rm := <-d.removeTagCh:
+ s := d.mgr.segments.get(rm.peer)
+ s.Lock()
+
+ p := s.tagInfoFor(rm.peer, d.clock.Now())
+ v, ok := p.decaying[rm.tag]
+ if !ok {
+ s.Unlock()
+ continue
+ }
+ p.value -= v.Value
+ delete(p.decaying, rm.tag)
+ s.Unlock()
+
+ case t := <-d.closeTagCh:
+ // Stop tracking the tag.
+ d.tagsMu.Lock()
+ delete(d.knownTags, t.name)
+ d.tagsMu.Unlock()
+
+ // Remove the tag from all peers that had it in the connmgr.
+ for _, s := range d.mgr.segments.buckets {
+ // visit all segments, and attempt to remove the tag from all the peers it stores.
+ s.Lock()
+ for _, p := range s.peers {
+ if dt, ok := p.decaying[t]; ok {
+ // decrease the value of the tagInfo, and delete the tag.
+ p.value -= dt.Value
+ delete(p.decaying, t)
+ }
+ }
+ s.Unlock()
+ }
+
+ case <-d.closeCh:
+ return
+ }
+ }
+}
+
+// decayingTag represents a decaying tag, with an associated decay interval, a
+// decay function, and a bump function.
+type decayingTag struct {
+ trkr *decayer
+ name string
+ interval time.Duration
+ nextTick time.Time
+ decayFn connmgr.DecayFn
+ bumpFn connmgr.BumpFn
+
+ // closed marks this tag as closed, so that if it's bumped after being
+ // closed, we can return an error.
+ closed atomic.Bool
+}
+
+var _ connmgr.DecayingTag = (*decayingTag)(nil)
+
+func (t *decayingTag) Name() string {
+ return t.name
+}
+
+func (t *decayingTag) Interval() time.Duration {
+ return t.interval
+}
+
+// Bump bumps a tag for this peer.
+func (t *decayingTag) Bump(p peer.ID, delta int) error {
+ if t.closed.Load() {
+ return fmt.Errorf("decaying tag %s had been closed; no further bumps are accepted", t.name)
+ }
+
+ bmp := bumpCmd{peer: p, tag: t, delta: delta}
+
+ select {
+ case t.trkr.bumpTagCh <- bmp:
+ return nil
+ default:
+ return fmt.Errorf(
+ "unable to bump decaying tag for peer %s, tag %s, delta %d; queue full (len=%d)",
+ p, t.name, delta, len(t.trkr.bumpTagCh))
+ }
+}
+
+func (t *decayingTag) Remove(p peer.ID) error {
+ if t.closed.Load() {
+ return fmt.Errorf("decaying tag %s had been closed; no further removals are accepted", t.name)
+ }
+
+ rm := removeCmd{peer: p, tag: t}
+
+ select {
+ case t.trkr.removeTagCh <- rm:
+ return nil
+ default:
+ return fmt.Errorf(
+ "unable to remove decaying tag for peer %s, tag %s; queue full (len=%d)",
+ p, t.name, len(t.trkr.removeTagCh))
+ }
+}
+
+func (t *decayingTag) Close() error {
+ if !t.closed.CompareAndSwap(false, true) {
+ log.Warn("duplicate decaying tag closure; skipping", "tag", t.name)
+ return nil
+ }
+
+ select {
+ case t.trkr.closeTagCh <- t:
+ return nil
+ default:
+ return fmt.Errorf("unable to close decaying tag %s; queue full (len=%d)", t.name, len(t.trkr.closeTagCh))
+ }
+}
diff --git a/p2p/net/connmgr/decay_test.go b/p2p/net/connmgr/decay_test.go
new file mode 100644
index 0000000000..d51ca298c2
--- /dev/null
+++ b/p2p/net/connmgr/decay_test.go
@@ -0,0 +1,338 @@
+package connmgr
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tu "github.com/libp2p/go-libp2p/core/test"
+
+ "github.com/benbjohnson/clock"
+ "github.com/stretchr/testify/require"
+)
+
+const TestResolution = 50 * time.Millisecond
+
+func waitForTag(t *testing.T, mgr *BasicConnMgr, id peer.ID) {
+ t.Helper()
+ require.Eventually(t, func() bool { return mgr.GetTagInfo(id) != nil }, 500*time.Millisecond, 10*time.Millisecond)
+}
+
+func TestDecayExpire(t *testing.T) {
+ id := tu.RandPeerIDFatal(t)
+ mgr, decay, mockClock := testDecayTracker(t)
+
+ tag, err := decay.RegisterDecayingTag("pop", 250*time.Millisecond, connmgr.DecayExpireWhenInactive(1*time.Second), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+ require.NoError(t, tag.Bump(id, 10))
+
+ waitForTag(t, mgr, id)
+ require.Equal(t, 10, mgr.GetTagInfo(id).Value)
+
+ mockClock.Add(250 * time.Millisecond)
+ mockClock.Add(250 * time.Millisecond)
+ mockClock.Add(250 * time.Millisecond)
+ mockClock.Add(250 * time.Millisecond)
+
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 0)
+}
+
+func TestMultipleBumps(t *testing.T) {
+ id := tu.RandPeerIDFatal(t)
+ mgr, decay, _ := testDecayTracker(t)
+
+ tag, err := decay.RegisterDecayingTag("pop", 250*time.Millisecond, connmgr.DecayExpireWhenInactive(1*time.Second), connmgr.BumpSumBounded(10, 20))
+ require.NoError(t, err)
+
+ require.NoError(t, tag.Bump(id, 5))
+
+ waitForTag(t, mgr, id)
+ require.Equal(t, 10, mgr.GetTagInfo(id).Value)
+
+ require.NoError(t, tag.Bump(id, 100))
+ require.Eventually(t, func() bool { return mgr.GetTagInfo(id).Value == 20 }, 100*time.Millisecond, 10*time.Millisecond, "expected tag value to decay to 20")
+}
+
+func TestMultipleTagsNoDecay(t *testing.T) {
+ id := tu.RandPeerIDFatal(t)
+ mgr, decay, _ := testDecayTracker(t)
+
+ tag1, err := decay.RegisterDecayingTag("beep", 250*time.Millisecond, connmgr.DecayNone(), connmgr.BumpSumBounded(0, 100))
+ require.NoError(t, err)
+ tag2, err := decay.RegisterDecayingTag("bop", 250*time.Millisecond, connmgr.DecayNone(), connmgr.BumpSumBounded(0, 100))
+ require.NoError(t, err)
+ tag3, err := decay.RegisterDecayingTag("foo", 250*time.Millisecond, connmgr.DecayNone(), connmgr.BumpSumBounded(0, 100))
+ require.NoError(t, err)
+
+ _ = tag1.Bump(id, 100)
+ _ = tag2.Bump(id, 100)
+ _ = tag3.Bump(id, 100)
+ _ = tag1.Bump(id, 100)
+ _ = tag2.Bump(id, 100)
+ _ = tag3.Bump(id, 100)
+
+ waitForTag(t, mgr, id)
+
+ // all tags are upper-bounded, so the score must be 300
+ ti := mgr.GetTagInfo(id)
+ require.Equal(t, 300, ti.Value)
+
+ for _, s := range []string{"beep", "bop", "foo"} {
+ if v, ok := ti.Tags[s]; !ok || v != 100 {
+ t.Fatalf("expected tag %s to be 100; was = %d", s, v)
+ }
+ }
+}
+
+func TestCustomFunctions(t *testing.T) {
+ id := tu.RandPeerIDFatal(t)
+ mgr, decay, mockClock := testDecayTracker(t)
+
+ tag1, err := decay.RegisterDecayingTag("beep", 250*time.Millisecond, connmgr.DecayFixed(10), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+ tag2, err := decay.RegisterDecayingTag("bop", 100*time.Millisecond, connmgr.DecayFixed(5), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+ tag3, err := decay.RegisterDecayingTag("foo", 50*time.Millisecond, connmgr.DecayFixed(1), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+
+ _ = tag1.Bump(id, 1000)
+ _ = tag2.Bump(id, 1000)
+ _ = tag3.Bump(id, 1000)
+
+ waitForTag(t, mgr, id)
+
+ // no decay has occurred yet, so score must be 3000.
+ require.Equal(t, 3000, mgr.GetTagInfo(id).Value)
+
+ // only tag3 should tick.
+ mockClock.Add(50 * time.Millisecond)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 2999)
+
+ // tag3 will tick thrice, tag2 will tick twice.
+ mockClock.Add(150 * time.Millisecond)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 2986)
+
+ // tag3 will tick once, tag1 will tick once.
+ mockClock.Add(50 * time.Millisecond)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 2975)
+}
+
+func TestMultiplePeers(t *testing.T) {
+ ids := []peer.ID{tu.RandPeerIDFatal(t), tu.RandPeerIDFatal(t), tu.RandPeerIDFatal(t)}
+ mgr, decay, mockClock := testDecayTracker(t)
+
+ tag1, err := decay.RegisterDecayingTag("beep", 250*time.Millisecond, connmgr.DecayFixed(10), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+ tag2, err := decay.RegisterDecayingTag("bop", 100*time.Millisecond, connmgr.DecayFixed(5), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+ tag3, err := decay.RegisterDecayingTag("foo", 50*time.Millisecond, connmgr.DecayFixed(1), connmgr.BumpSumUnbounded())
+ require.NoError(t, err)
+
+ _ = tag1.Bump(ids[0], 1000)
+ _ = tag2.Bump(ids[0], 1000)
+ _ = tag3.Bump(ids[0], 1000)
+
+ _ = tag1.Bump(ids[1], 500)
+ _ = tag2.Bump(ids[1], 500)
+ _ = tag3.Bump(ids[1], 500)
+
+ _ = tag1.Bump(ids[2], 100)
+ _ = tag2.Bump(ids[2], 100)
+ _ = tag3.Bump(ids[2], 100)
+
+ // allow the background goroutine to process bumps.
+ waitFor := 100 * time.Millisecond
+ tick := 10 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ waitFor *= 10
+ tick *= 10
+ }
+
+ require.Eventually(t, func() bool {
+ return mgr.GetTagInfo(ids[0]) != nil && mgr.GetTagInfo(ids[1]) != nil && mgr.GetTagInfo(ids[2]) != nil
+ }, waitFor, tick)
+
+ mockClock.Add(3 * time.Second)
+
+ waitFor = 500 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ waitFor *= 10
+ }
+
+ require.Eventually(t, func() bool { return mgr.GetTagInfo(ids[0]).Value == 2670 }, waitFor, tick)
+ require.Equal(t, 1170, mgr.GetTagInfo(ids[1]).Value)
+ require.Equal(t, 40, mgr.GetTagInfo(ids[2]).Value)
+}
+
+func eventuallyEqual(t *testing.T, f func() int, val int) {
+ t.Helper()
+ require.Eventually(t, func() bool {
+ v := f()
+ if v == val {
+ return true
+ }
+ t.Log("f() was", v, "expected", val, "retrying...")
+ return false
+ }, 1*time.Second, 10*time.Millisecond)
+}
+
+func TestLinearDecayOverwrite(t *testing.T) {
+ id := tu.RandPeerIDFatal(t)
+ mgr, decay, mockClock := testDecayTracker(t)
+
+ tag1, err := decay.RegisterDecayingTag("beep", 250*time.Millisecond, connmgr.DecayLinear(0.5), connmgr.BumpOverwrite())
+ require.NoError(t, err)
+
+ _ = tag1.Bump(id, 1000)
+ waitForTag(t, mgr, id)
+
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 1000)
+ mockClock.Add(250 * time.Millisecond)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 500)
+
+ mockClock.Add(250 * time.Millisecond)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 250)
+
+ _ = tag1.Bump(id, 1000)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Value }, 1000)
+}
+
+func TestResolutionMisaligned(t *testing.T) {
+ var (
+ id = tu.RandPeerIDFatal(t)
+ mgr, decay, mockClock = testDecayTracker(t)
+ require = require.New(t)
+ )
+
+ tag1, err := decay.RegisterDecayingTag("beep", time.Duration(float64(TestResolution)*1.4), connmgr.DecayFixed(1), connmgr.BumpOverwrite())
+ require.NoError(err)
+
+ tag2, err := decay.RegisterDecayingTag("bop", time.Duration(float64(TestResolution)*2.4), connmgr.DecayFixed(1), connmgr.BumpOverwrite())
+ require.NoError(err)
+
+ _ = tag1.Bump(id, 1000)
+ _ = tag2.Bump(id, 1000)
+ // allow the background goroutine to process bumps.
+ <-time.After(500 * time.Millisecond)
+
+ // first tick.
+ mockClock.Add(TestResolution)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Tags["beep"] }, 1000)
+ require.Equal(1000, mgr.GetTagInfo(id).Tags["bop"])
+
+ // next tick; tag1 would've ticked.
+ mockClock.Add(TestResolution)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Tags["beep"] }, 999)
+ require.Equal(1000, mgr.GetTagInfo(id).Tags["bop"])
+
+ // next tick; tag1 would've ticked twice, tag2 once.
+ mockClock.Add(TestResolution)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Tags["beep"] }, 998)
+ require.Equal(999, mgr.GetTagInfo(id).Tags["bop"])
+
+ require.Equal(1997, mgr.GetTagInfo(id).Value)
+}
+
+func TestTagRemoval(t *testing.T) {
+ id1, id2 := tu.RandPeerIDFatal(t), tu.RandPeerIDFatal(t)
+ mgr, decay, mockClock := testDecayTracker(t)
+
+ tag1, err := decay.RegisterDecayingTag("beep", TestResolution, connmgr.DecayFixed(1), connmgr.BumpOverwrite())
+ require.NoError(t, err)
+
+ tag2, err := decay.RegisterDecayingTag("bop", TestResolution, connmgr.DecayFixed(1), connmgr.BumpOverwrite())
+ require.NoError(t, err)
+
+ // id1 has both tags; id2 only has the first tag.
+ _ = tag1.Bump(id1, 1000)
+ _ = tag2.Bump(id1, 1000)
+ _ = tag1.Bump(id2, 1000)
+
+ waitForTag(t, mgr, id1)
+ waitForTag(t, mgr, id2)
+
+ // first tick.
+ mockClock.Add(TestResolution)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id1).Tags["beep"] }, 999)
+ require.Equal(t, 999, mgr.GetTagInfo(id1).Tags["bop"])
+ require.Equal(t, 999, mgr.GetTagInfo(id2).Tags["beep"])
+
+ require.Equal(t, 999*2, mgr.GetTagInfo(id1).Value)
+ require.Equal(t, 999, mgr.GetTagInfo(id2).Value)
+
+ // remove tag1 from p1.
+ require.NoError(t, tag1.Remove(id1))
+
+ // next tick. both peers only have 1 tag, both at 998 value.
+ mockClock.Add(TestResolution)
+ require.Eventually(t, func() bool { return mgr.GetTagInfo(id1).Tags["beep"] == 0 }, 500*time.Millisecond, 10*time.Millisecond)
+ require.Equal(t, 998, mgr.GetTagInfo(id1).Tags["bop"])
+ require.Equal(t, 998, mgr.GetTagInfo(id2).Tags["beep"])
+
+ require.Equal(t, 998, mgr.GetTagInfo(id1).Value)
+ require.Equal(t, 998, mgr.GetTagInfo(id2).Value)
+
+ // remove tag1 from p1 again; no error.
+ require.NoError(t, tag1.Remove(id1))
+}
+
+func TestTagClosure(t *testing.T) {
+ id := tu.RandPeerIDFatal(t)
+ mgr, decay, mockClock := testDecayTracker(t)
+
+ tag1, err := decay.RegisterDecayingTag("beep", TestResolution, connmgr.DecayFixed(1), connmgr.BumpOverwrite())
+ require.NoError(t, err)
+ tag2, err := decay.RegisterDecayingTag("bop", TestResolution, connmgr.DecayFixed(1), connmgr.BumpOverwrite())
+ require.NoError(t, err)
+
+ _ = tag1.Bump(id, 1000)
+ _ = tag2.Bump(id, 1000)
+ waitForTag(t, mgr, id)
+
+ // nothing has happened.
+ mockClock.Add(TestResolution)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Tags["beep"] }, 999)
+ require.Equal(t, 999, mgr.GetTagInfo(id).Tags["bop"])
+ require.Equal(t, 999*2, mgr.GetTagInfo(id).Value)
+
+ // next tick; tag1 would've ticked.
+ mockClock.Add(TestResolution)
+ eventuallyEqual(t, func() int { return mgr.GetTagInfo(id).Tags["beep"] }, 998)
+ require.Equal(t, 998, mgr.GetTagInfo(id).Tags["bop"])
+ require.Equal(t, 998*2, mgr.GetTagInfo(id).Value)
+
+ // close the tag.
+ require.NoError(t, tag1.Close())
+
+ // allow the background goroutine to process the closure.
+ require.Eventually(t, func() bool { return mgr.GetTagInfo(id).Value == 998 }, 500*time.Millisecond, 10*time.Millisecond)
+
+ // a second closure should not error.
+ require.NoError(t, tag1.Close())
+
+ // bumping a tag after it's been closed should error.
+ require.Error(t, tag1.Bump(id, 5))
+}
+
+func testDecayTracker(tb testing.TB) (*BasicConnMgr, connmgr.Decayer, *clock.Mock) {
+ mockClock := clock.NewMock()
+ cfg := &DecayerCfg{
+ Resolution: TestResolution,
+ Clock: mockClock,
+ }
+
+ mgr, err := NewConnManager(10, 10, WithGracePeriod(time.Second), DecayerConfig(cfg))
+ require.NoError(tb, err)
+ decay, ok := connmgr.SupportsDecay(mgr)
+ if !ok {
+ tb.Fatalf("connmgr does not support decay")
+ }
+ tb.Cleanup(func() {
+ mgr.Close()
+ decay.Close()
+ })
+
+ return mgr, decay, mockClock
+}
diff --git a/p2p/net/connmgr/options.go b/p2p/net/connmgr/options.go
new file mode 100644
index 0000000000..3766e341c7
--- /dev/null
+++ b/p2p/net/connmgr/options.go
@@ -0,0 +1,63 @@
+package connmgr
+
+import (
+ "errors"
+ "time"
+
+ "github.com/benbjohnson/clock"
+)
+
+// config is the configuration struct for the basic connection manager.
+type config struct {
+ highWater int
+ lowWater int
+ gracePeriod time.Duration
+ silencePeriod time.Duration
+ decayer *DecayerCfg
+ clock clock.Clock
+}
+
+// Option represents an option for the basic connection manager.
+type Option func(*config) error
+
+// DecayerConfig applies a configuration for the decayer.
+func DecayerConfig(opts *DecayerCfg) Option {
+ return func(cfg *config) error {
+ cfg.decayer = opts
+ return nil
+ }
+}
+
+// WithClock sets the internal clock impl
+func WithClock(c clock.Clock) Option {
+ return func(cfg *config) error {
+ cfg.clock = c
+ return nil
+ }
+}
+
+// WithGracePeriod sets the grace period.
+// The grace period is the time a newly opened connection is given before it becomes
+// subject to pruning.
+func WithGracePeriod(p time.Duration) Option {
+ return func(cfg *config) error {
+ if p < 0 {
+ return errors.New("grace period must be non-negative")
+ }
+ cfg.gracePeriod = p
+ return nil
+ }
+}
+
+// WithSilencePeriod sets the silence period.
+// The connection manager will perform a cleanup once per silence period
+// if the number of connections surpasses the high watermark.
+func WithSilencePeriod(p time.Duration) Option {
+ return func(cfg *config) error {
+ if p <= 0 {
+ return errors.New("silence period must be non-zero")
+ }
+ cfg.silencePeriod = p
+ return nil
+ }
+}
diff --git a/p2p/net/gostream/addr.go b/p2p/net/gostream/addr.go
new file mode 100644
index 0000000000..49d844f675
--- /dev/null
+++ b/p2p/net/gostream/addr.go
@@ -0,0 +1,14 @@
+package gostream
+
+import "github.com/libp2p/go-libp2p/core/peer"
+
+// addr implements net.Addr and holds a libp2p peer ID.
+type addr struct{ id peer.ID }
+
+// Network returns the name of the network that this address belongs to
+// (libp2p).
+func (a *addr) Network() string { return Network }
+
+// String returns the peer ID of this address in string form
+// (B58-encoded).
+func (a *addr) String() string { return a.id.String() }
diff --git a/p2p/net/gostream/conn.go b/p2p/net/gostream/conn.go
new file mode 100644
index 0000000000..6959b6cbe0
--- /dev/null
+++ b/p2p/net/gostream/conn.go
@@ -0,0 +1,53 @@
+package gostream
+
+import (
+ "context"
+ "io"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// conn is an implementation of net.Conn which wraps
+// libp2p streams.
+type conn struct {
+ network.Stream
+ ignoreEOF bool
+}
+
+func (c *conn) Read(b []byte) (int, error) {
+ n, err := c.Stream.Read(b)
+ if err != nil && c.ignoreEOF && err == io.EOF {
+ return n, nil
+ }
+ return n, err
+}
+
+// newConn creates a conn given a libp2p stream
+func newConn(s network.Stream, ignoreEOF bool) net.Conn {
+ return &conn{s, ignoreEOF}
+}
+
+// LocalAddr returns the local network address.
+func (c *conn) LocalAddr() net.Addr {
+ return &addr{c.Stream.Conn().LocalPeer()}
+}
+
+// RemoteAddr returns the remote network address.
+func (c *conn) RemoteAddr() net.Addr {
+ return &addr{c.Stream.Conn().RemotePeer()}
+}
+
+// Dial opens a stream to the destination address
+// (which should parseable to a peer ID) using the given
+// host and returns it as a standard net.Conn.
+func Dial(ctx context.Context, h host.Host, pid peer.ID, tag protocol.ID) (net.Conn, error) {
+ s, err := h.NewStream(ctx, pid, tag)
+ if err != nil {
+ return nil, err
+ }
+ return newConn(s, false), nil
+}
diff --git a/p2p/net/gostream/gostream.go b/p2p/net/gostream/gostream.go
new file mode 100644
index 0000000000..a15125be32
--- /dev/null
+++ b/p2p/net/gostream/gostream.go
@@ -0,0 +1,19 @@
+// Package gostream allows to replace the standard net stack in Go
+// with [LibP2P](https://github.com/libp2p/libp2p) streams.
+//
+// Given a libp2p.Host, gostream provides Dial() and Listen() methods which
+// return implementations of net.Conn and net.Listener.
+//
+// Instead of the regular "host:port" addressing, `gostream` uses a Peer ID,
+// and rather than a raw TCP connection, gostream will use libp2p's net.Stream.
+// This means your connections will take advantage of LibP2P's multi-routes,
+// NAT transversal and stream multiplexing.
+//
+// Note that LibP2P hosts cannot dial to themselves, so there is no possibility
+// of using the same Host as server and as client.
+package gostream
+
+// Network is the "net.Addr.Network()" name returned by
+// addresses used by gostream connections. In turn, the "net.Addr.String()" will
+// be a peer ID.
+var Network = "libp2p"
diff --git a/p2p/net/gostream/gostream_test.go b/p2p/net/gostream/gostream_test.go
new file mode 100644
index 0000000000..c024cabc8b
--- /dev/null
+++ b/p2p/net/gostream/gostream_test.go
@@ -0,0 +1,141 @@
+package gostream
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/multiformats/go-multiaddr"
+)
+
+// newHost illustrates how to build a libp2p host with secio using
+// a randomly generated key-pair
+func newHost(t *testing.T, listen multiaddr.Multiaddr) host.Host {
+ h, err := libp2p.New(
+ libp2p.ListenAddrs(listen),
+ )
+ if err != nil {
+ t.Fatal(err)
+ }
+ return h
+}
+
+func TestServerClient(t *testing.T) {
+ m1, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/10000")
+ m2, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/10001")
+ srvHost := newHost(t, m1)
+ clientHost := newHost(t, m2)
+ defer srvHost.Close()
+ defer clientHost.Close()
+
+ srvHost.Peerstore().AddAddrs(clientHost.ID(), clientHost.Addrs(), peerstore.PermanentAddrTTL)
+ clientHost.Peerstore().AddAddrs(srvHost.ID(), srvHost.Addrs(), peerstore.PermanentAddrTTL)
+
+ var tag protocol.ID = "/testitytest"
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ listener, err := Listen(srvHost, tag)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer listener.Close()
+
+ if listener.Addr().String() != srvHost.ID().String() {
+ t.Error("bad listener address")
+ return
+ }
+
+ servConn, err := listener.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer servConn.Close()
+
+ reader := bufio.NewReader(servConn)
+ for {
+ msg, err := reader.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if msg != "is libp2p awesome?\n" {
+ t.Errorf("Bad incoming message: %s", msg)
+ return
+ }
+
+ _, err = servConn.Write([]byte("yes it is\n"))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ }
+ }()
+
+ clientConn, err := Dial(ctx, clientHost, srvHost.ID(), tag)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if clientConn.LocalAddr().String() != clientHost.ID().String() {
+ t.Fatal("Bad LocalAddr")
+ }
+
+ if clientConn.RemoteAddr().String() != srvHost.ID().String() {
+ t.Fatal("Bad RemoteAddr")
+ }
+
+ if clientConn.LocalAddr().Network() != Network {
+ t.Fatal("Bad Network()")
+ }
+
+ err = clientConn.SetDeadline(time.Now().Add(time.Second))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = clientConn.SetReadDeadline(time.Now().Add(time.Second))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = clientConn.SetWriteDeadline(time.Now().Add(time.Second))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = clientConn.Write([]byte("is libp2p awesome?\n"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ reader := bufio.NewReader(clientConn)
+ resp, err := reader.ReadString('\n')
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if resp != "yes it is\n" {
+ t.Errorf("Bad response: %s", resp)
+ }
+
+ err = clientConn.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ <-done
+}
diff --git a/p2p/net/gostream/listener.go b/p2p/net/gostream/listener.go
new file mode 100644
index 0000000000..f1146b0617
--- /dev/null
+++ b/p2p/net/gostream/listener.go
@@ -0,0 +1,89 @@
+package gostream
+
+import (
+ "context"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// listener is an implementation of net.Listener which handles
+// http-tagged streams from a libp2p connection.
+// A listener can be built with Listen()
+type listener struct {
+ host host.Host
+ ctx context.Context
+ tag protocol.ID
+ cancel func()
+ streamCh chan network.Stream
+ // ignoreEOF is a flag that tells the listener to return conns that ignore EOF errors.
+ // Necessary because the default responsewriter will consider a connection closed if it reads EOF.
+ // But when on streams, it's fine for us to read EOF, but still be able to write.
+ ignoreEOF bool
+}
+
+// Accept returns the next a connection to this listener.
+// It blocks if there are no connections. Under the hood,
+// connections are libp2p streams.
+func (l *listener) Accept() (net.Conn, error) {
+ select {
+ case s := <-l.streamCh:
+ return newConn(s, l.ignoreEOF), nil
+ case <-l.ctx.Done():
+ return nil, l.ctx.Err()
+ }
+}
+
+// Close terminates this listener. It will no longer handle any
+// incoming streams
+func (l *listener) Close() error {
+ l.cancel()
+ l.host.RemoveStreamHandler(l.tag)
+ return nil
+}
+
+// Addr returns the address for this listener, which is its libp2p Peer ID.
+func (l *listener) Addr() net.Addr {
+ return &addr{l.host.ID()}
+}
+
+// Listen provides a standard net.Listener ready to accept "connections".
+// Under the hood, these connections are libp2p streams tagged with the
+// given protocol.ID.
+func Listen(h host.Host, tag protocol.ID, opts ...ListenerOption) (net.Listener, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ l := &listener{
+ host: h,
+ ctx: ctx,
+ cancel: cancel,
+ tag: tag,
+ streamCh: make(chan network.Stream),
+ }
+ for _, opt := range opts {
+ if err := opt(l); err != nil {
+ return nil, err
+ }
+ }
+
+ h.SetStreamHandler(tag, func(s network.Stream) {
+ select {
+ case l.streamCh <- s:
+ case <-ctx.Done():
+ s.Reset()
+ }
+ })
+
+ return l, nil
+}
+
+type ListenerOption func(*listener) error
+
+func IgnoreEOF() ListenerOption {
+ return func(l *listener) error {
+ l.ignoreEOF = true
+ return nil
+ }
+}
diff --git a/p2p/net/mock/complement.go b/p2p/net/mock/complement.go
new file mode 100644
index 0000000000..d0162ca420
--- /dev/null
+++ b/p2p/net/mock/complement.go
@@ -0,0 +1,17 @@
+package mocknet
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+)
+
+// StreamComplement returns the other end of the given stream. This function
+// panics when passed a non-mocknet stream.
+func StreamComplement(s network.Stream) network.Stream {
+ return s.(*stream).rstream
+}
+
+// ConnComplement returns the other end of the given connection. This function
+// panics when passed a non-mocknet connection.
+func ConnComplement(c network.Conn) network.Conn {
+ return c.(*conn).rconn
+}
diff --git a/p2p/net/mock/interface.go b/p2p/net/mock/interface.go
index e62fc4d718..acb2563500 100644
--- a/p2p/net/mock/interface.go
+++ b/p2p/net/mock/interface.go
@@ -1,43 +1,53 @@
// Package mocknet provides a mock net.Network to test with.
//
-// - a Mocknet has many inet.Networks
+// - a Mocknet has many network.Networks
// - a Mocknet has many Links
-// - a Link joins two inet.Networks
-// - inet.Conns and inet.Streams are created by inet.Networks
+// - a Link joins two network.Networks
+// - network.Conns and network.Streams are created by network.Networks
package mocknet
import (
"io"
"time"
- host "github.com/libp2p/go-libp2p-host"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
- ic "github.com/libp2p/go-libp2p-crypto"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
-type Mocknet interface {
+type PeerOptions struct {
+ // ps is the Peerstore to use when adding peer. If nil, a default peerstore will be created.
+ ps peerstore.Peerstore
+
+ // gater is the ConnectionGater to use when adding a peer. If nil, no connection gater will be used.
+ gater connmgr.ConnectionGater
+}
- // GenPeer generates a peer and its inet.Network in the Mocknet
+type Mocknet interface {
+ // GenPeer generates a peer and its network.Network in the Mocknet
GenPeer() (host.Host, error)
+ GenPeerWithOptions(PeerOptions) (host.Host, error)
// AddPeer adds an existing peer. we need both a privkey and addr.
// ID is derived from PrivKey
AddPeer(ic.PrivKey, ma.Multiaddr) (host.Host, error)
- AddPeerWithPeerstore(peer.ID, pstore.Peerstore) (host.Host, error)
+ AddPeerWithPeerstore(peer.ID, peerstore.Peerstore) (host.Host, error)
+ AddPeerWithOptions(peer.ID, PeerOptions) (host.Host, error)
// retrieve things (with randomized iteration order)
Peers() []peer.ID
- Net(peer.ID) inet.Network
- Nets() []inet.Network
+ Net(peer.ID) network.Network
+ Nets() []network.Network
Host(peer.ID) host.Host
Hosts() []host.Host
Links() LinkMap
LinksBetweenPeers(a, b peer.ID) []Link
- LinksBetweenNets(a, b inet.Network) []Link
+ LinksBetweenNets(a, b network.Network) []Link
// Links are the **ability to connect**.
// think of Links as the physical medium.
@@ -45,24 +55,26 @@ type Mocknet interface {
// (this makes it possible to test dial failures, and
// things like relaying traffic)
LinkPeers(peer.ID, peer.ID) (Link, error)
- LinkNets(inet.Network, inet.Network) (Link, error)
+ LinkNets(network.Network, network.Network) (Link, error)
Unlink(Link) error
UnlinkPeers(peer.ID, peer.ID) error
- UnlinkNets(inet.Network, inet.Network) error
+ UnlinkNets(network.Network, network.Network) error
// LinkDefaults are the default options that govern links
- // if they do not have thier own option set.
+ // if they do not have their own option set.
SetLinkDefaults(LinkOptions)
LinkDefaults() LinkOptions
// Connections are the usual. Connecting means Dialing.
// **to succeed, peers must be linked beforehand**
- ConnectPeers(peer.ID, peer.ID) (inet.Conn, error)
- ConnectNets(inet.Network, inet.Network) (inet.Conn, error)
+ ConnectPeers(peer.ID, peer.ID) (network.Conn, error)
+ ConnectNets(network.Network, network.Network) (network.Conn, error)
DisconnectPeers(peer.ID, peer.ID) error
- DisconnectNets(inet.Network, inet.Network) error
+ DisconnectNets(network.Network, network.Network) error
LinkAll() error
ConnectAllButSelf() error
+
+ io.Closer
}
// LinkOptions are used to change aspects of the links.
@@ -79,7 +91,7 @@ type LinkOptions struct {
// connect. This allows constructing topologies where specific
// nodes cannot talk to each other directly. :)
type Link interface {
- Networks() []inet.Network
+ Networks() []network.Network
Peers() []peer.ID
SetOptions(LinkOptions)
@@ -96,7 +108,7 @@ type LinkMap map[string]map[string]map[Link]struct{}
type Printer interface {
// MocknetLinks shows the entire Mocknet's link table :)
MocknetLinks(mn Mocknet)
- NetworkConns(ni inet.Network)
+ NetworkConns(ni network.Network)
}
// PrinterTo returns a Printer ready to write to w.
diff --git a/p2p/net/mock/log2.txt b/p2p/net/mock/log2.txt
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/p2p/net/mock/mock.go b/p2p/net/mock/mock.go
index 8760b4d489..db1c7d3851 100644
--- a/p2p/net/mock/mock.go
+++ b/p2p/net/mock/mock.go
@@ -1,16 +1,14 @@
package mocknet
import (
- "context"
-
- logging "github.com/ipfs/go-log"
+ logging "github.com/libp2p/go-libp2p/gologshim"
)
var log = logging.Logger("mocknet")
// WithNPeers constructs a Mocknet with N peers.
-func WithNPeers(ctx context.Context, n int) (Mocknet, error) {
- m := New(ctx)
+func WithNPeers(n int) (Mocknet, error) {
+ m := New()
for i := 0; i < n; i++ {
if _, err := m.GenPeer(); err != nil {
return nil, err
@@ -22,8 +20,8 @@ func WithNPeers(ctx context.Context, n int) (Mocknet, error) {
// FullMeshLinked constructs a Mocknet with full mesh of Links.
// This means that all the peers **can** connect to each other
// (not that they already are connected. you can use m.ConnectAll())
-func FullMeshLinked(ctx context.Context, n int) (Mocknet, error) {
- m, err := WithNPeers(ctx, n)
+func FullMeshLinked(n int) (Mocknet, error) {
+ m, err := WithNPeers(n)
if err != nil {
return nil, err
}
@@ -31,27 +29,20 @@ func FullMeshLinked(ctx context.Context, n int) (Mocknet, error) {
if err := m.LinkAll(); err != nil {
return nil, err
}
-
return m, nil
}
// FullMeshConnected constructs a Mocknet with full mesh of Connections.
// This means that all the peers have dialed and are ready to talk to
// each other.
-func FullMeshConnected(ctx context.Context, n int) (Mocknet, error) {
- m, err := FullMeshLinked(ctx, n)
+func FullMeshConnected(n int) (Mocknet, error) {
+ m, err := FullMeshLinked(n)
if err != nil {
return nil, err
}
- nets := m.Nets()
- for _, n1 := range nets {
- for _, n2 := range nets {
- if _, err := m.ConnectNets(n1, n2); err != nil {
- return nil, err
- }
- }
+ if err := m.ConnectAllButSelf(); err != nil {
+ return nil, err
}
-
return m, nil
}
diff --git a/p2p/net/mock/mock_conn.go b/p2p/net/mock/mock_conn.go
index fcb634624c..36c60f53fc 100644
--- a/p2p/net/mock/mock_conn.go
+++ b/p2p/net/mock/mock_conn.go
@@ -2,19 +2,28 @@ package mocknet
import (
"container/list"
+ "context"
+ "strconv"
"sync"
+ "sync/atomic"
- process "github.com/jbenet/goprocess"
- ic "github.com/libp2p/go-libp2p-crypto"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
)
+var connCounter atomic.Int64
+
// conn represents one side's perspective of a
// live connection between two peers.
// it goes over a particular link.
type conn struct {
+ notifLk sync.Mutex
+
+ id int64
+
local peer.ID
remote peer.ID
@@ -28,46 +37,68 @@ type conn struct {
link *link
rconn *conn // counterpart
streams list.List
- proc process.Process
+ stat network.ConnStats
+
+ closeOnce sync.Once
+
+ isClosed atomic.Bool
sync.RWMutex
}
-func newConn(ln, rn *peernet, l *link) *conn {
+func newConn(ln, rn *peernet, l *link, dir network.Direction) *conn {
c := &conn{net: ln, link: l}
c.local = ln.peer
c.remote = rn.peer
+ c.stat.Direction = dir
+ c.id = connCounter.Add(1)
c.localAddr = ln.ps.Addrs(ln.peer)[0]
- c.remoteAddr = rn.ps.Addrs(rn.peer)[0]
+ for _, a := range rn.ps.Addrs(rn.peer) {
+ if !manet.IsIPUnspecified(a) {
+ c.remoteAddr = a
+ break
+ }
+ }
+ if c.remoteAddr == nil {
+ c.remoteAddr = rn.ps.Addrs(rn.peer)[0]
+ }
c.localPrivKey = ln.ps.PrivKey(ln.peer)
c.remotePubKey = rn.ps.PubKey(rn.peer)
-
- c.proc = process.WithTeardown(c.teardown)
return c
}
+func (c *conn) IsClosed() bool {
+ return c.isClosed.Load()
+}
+
+func (c *conn) ID() string {
+ return strconv.FormatInt(c.id, 10)
+}
+
func (c *conn) Close() error {
- return c.proc.Close()
+ c.closeOnce.Do(func() {
+ c.isClosed.Store(true)
+ go c.rconn.Close()
+ c.teardown()
+ })
+ return nil
}
-func (c *conn) teardown() error {
+func (c *conn) teardown() {
for _, s := range c.allStreams() {
s.Reset()
}
+
c.net.removeConn(c)
- c.net.notifyAll(func(n inet.Notifiee) {
- n.Disconnected(c.net, c)
- })
- return nil
}
func (c *conn) addStream(s *stream) {
c.Lock()
+ defer c.Unlock()
s.conn = c
c.streams.PushBack(s)
- c.Unlock()
}
func (c *conn) removeStream(s *stream) {
@@ -81,11 +112,11 @@ func (c *conn) removeStream(s *stream) {
}
}
-func (c *conn) allStreams() []inet.Stream {
+func (c *conn) allStreams() []network.Stream {
c.RLock()
defer c.RUnlock()
- strs := make([]inet.Stream, 0, c.streams.Len())
+ strs := make([]network.Stream, 0, c.streams.Len())
for e := c.streams.Front(); e != nil; e = e.Next() {
s := e.Value.(*stream)
strs = append(strs, s)
@@ -96,34 +127,24 @@ func (c *conn) allStreams() []inet.Stream {
func (c *conn) remoteOpenedStream(s *stream) {
c.addStream(s)
c.net.handleNewStream(s)
- c.net.notifyAll(func(n inet.Notifiee) {
- n.OpenedStream(c.net, s)
- })
}
func (c *conn) openStream() *stream {
- sl, sr := c.link.newStreamPair()
+ sl, sr := newStreamPair()
+ go c.rconn.remoteOpenedStream(sr)
c.addStream(sl)
- c.net.notifyAll(func(n inet.Notifiee) {
- n.OpenedStream(c.net, sl)
- })
- c.rconn.remoteOpenedStream(sr)
return sl
}
-func (c *conn) NewStream() (inet.Stream, error) {
- log.Debugf("Conn.NewStreamWithProtocol: %s --> %s", c.local, c.remote)
+func (c *conn) NewStream(context.Context) (network.Stream, error) {
+ log.Debug("Conn.NewStreamWithProtocol", "source_peer", c.local, "destination_peer", c.remote)
s := c.openStream()
return s, nil
}
-func (c *conn) GetStreams() ([]inet.Stream, error) {
- var out []inet.Stream
- for e := c.streams.Front(); e != nil; e = e.Next() {
- out = append(out, e.Value.(*stream))
- }
- return out, nil
+func (c *conn) GetStreams() []network.Stream {
+ return c.allStreams()
}
// LocalMultiaddr is the Multiaddr on this side
@@ -136,11 +157,6 @@ func (c *conn) LocalPeer() peer.ID {
return c.local
}
-// LocalPrivateKey is the private key of the peer on our side.
-func (c *conn) LocalPrivateKey() ic.PrivKey {
- return c.localPrivKey
-}
-
// RemoteMultiaddr is the Multiaddr on the remote side
func (c *conn) RemoteMultiaddr() ma.Multiaddr {
return c.remoteAddr
@@ -155,3 +171,21 @@ func (c *conn) RemotePeer() peer.ID {
func (c *conn) RemotePublicKey() ic.PubKey {
return c.remotePubKey
}
+
+// ConnState of security connection. Empty if not supported.
+func (c *conn) ConnState() network.ConnectionState {
+ return network.ConnectionState{}
+}
+
+// Stat returns metadata about the connection
+func (c *conn) Stat() network.ConnStats {
+ return c.stat
+}
+
+func (c *conn) Scope() network.ConnScope {
+ return &network.NullScope{}
+}
+
+func (c *conn) CloseWithError(_ network.ConnErrorCode) error {
+ return c.Close()
+}
diff --git a/p2p/net/mock/mock_link.go b/p2p/net/mock/mock_link.go
index d4d605310d..47bf9a3f13 100644
--- a/p2p/net/mock/mock_link.go
+++ b/p2p/net/mock/mock_link.go
@@ -1,22 +1,20 @@
package mocknet
import (
- // "fmt"
- "io"
"sync"
"time"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
)
// link implements mocknet.Link
-// and, for simplicity, inet.Conn
+// and, for simplicity, network.Conn
type link struct {
mock *mocknet
nets []*peernet
opts LinkOptions
- ratelimiter *ratelimiter
+ ratelimiter *RateLimiter
// this could have addresses on both sides.
sync.RWMutex
@@ -25,7 +23,7 @@ type link struct {
func newLink(mn *mocknet, opts LinkOptions) *link {
l := &link{mock: mn,
opts: opts,
- ratelimiter: NewRatelimiter(opts.Bandwidth)}
+ ratelimiter: NewRateLimiter(opts.Bandwidth)}
return l
}
@@ -33,31 +31,22 @@ func (l *link) newConnPair(dialer *peernet) (*conn, *conn) {
l.RLock()
defer l.RUnlock()
- c1 := newConn(l.nets[0], l.nets[1], l)
- c2 := newConn(l.nets[1], l.nets[0], l)
- c1.rconn = c2
- c2.rconn = c1
-
- if dialer == c1.net {
- return c1, c2
+ target := l.nets[0]
+ if target == dialer {
+ target = l.nets[1]
}
- return c2, c1
-}
-
-func (l *link) newStreamPair() (*stream, *stream) {
- ra, wb := io.Pipe()
- rb, wa := io.Pipe()
-
- sa := NewStream(wa, ra)
- sb := NewStream(wb, rb)
- return sa, sb
+ dc := newConn(dialer, target, l, network.DirOutbound)
+ tc := newConn(target, dialer, l, network.DirInbound)
+ dc.rconn = tc
+ tc.rconn = dc
+ return dc, tc
}
-func (l *link) Networks() []inet.Network {
+func (l *link) Networks() []network.Network {
l.RLock()
defer l.RUnlock()
- cp := make([]inet.Network, len(l.nets))
+ cp := make([]network.Network, len(l.nets))
for i, n := range l.nets {
cp[i] = n
}
@@ -76,15 +65,21 @@ func (l *link) Peers() []peer.ID {
}
func (l *link) SetOptions(o LinkOptions) {
+ l.Lock()
+ defer l.Unlock()
l.opts = o
l.ratelimiter.UpdateBandwidth(l.opts.Bandwidth)
}
func (l *link) Options() LinkOptions {
+ l.RLock()
+ defer l.RUnlock()
return l.opts
}
func (l *link) GetLatency() time.Duration {
+ l.RLock()
+ defer l.RUnlock()
return l.opts.Latency
}
diff --git a/p2p/net/mock/mock_net.go b/p2p/net/mock/mock_net.go
index 898292af8e..43294d4a54 100644
--- a/p2p/net/mock/mock_net.go
+++ b/p2p/net/mock/mock_net.go
@@ -2,28 +2,32 @@ package mocknet
import (
"context"
+ "crypto/rand"
"fmt"
+ "net"
"sort"
"sync"
- host "github.com/libp2p/go-libp2p-host"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
- "github.com/jbenet/goprocess"
- goprocessctx "github.com/jbenet/goprocess/context"
- ic "github.com/libp2p/go-libp2p-crypto"
- inet "github.com/libp2p/go-libp2p-net"
- p2putil "github.com/libp2p/go-libp2p-netutil"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- testutil "github.com/libp2p/go-testutil"
ma "github.com/multiformats/go-multiaddr"
)
+// IP6 range that gets blackholed (in case our traffic ever makes it out onto
+// the internet).
+var blackholeIP6 = net.ParseIP("100::")
+
// mocknet implements mocknet.Mocknet
type mocknet struct {
nets map[peer.ID]*peernet
- hosts map[peer.ID]*bhost.BasicHost
+ hosts map[peer.ID]host.Host
// links make it possible to connect two peers.
// think of links as the physical medium.
@@ -33,30 +37,73 @@ type mocknet struct {
linkDefaults LinkOptions
- proc goprocess.Process // for Context closing
- ctx context.Context
+ ctxCancel context.CancelFunc
+ ctx context.Context
sync.Mutex
}
-func New(ctx context.Context) Mocknet {
- return &mocknet{
+func New() Mocknet {
+ mn := &mocknet{
nets: map[peer.ID]*peernet{},
- hosts: map[peer.ID]*bhost.BasicHost{},
+ hosts: map[peer.ID]host.Host{},
links: map[peer.ID]map[peer.ID]map[*link]struct{}{},
- proc: goprocessctx.WithContext(ctx),
- ctx: ctx,
}
+ mn.ctx, mn.ctxCancel = context.WithCancel(context.Background())
+ return mn
+}
+
+func (mn *mocknet) Close() error {
+ mn.ctxCancel()
+ for _, h := range mn.hosts {
+ h.Close()
+ }
+ for _, n := range mn.nets {
+ n.Close()
+ }
+ return nil
}
func (mn *mocknet) GenPeer() (host.Host, error) {
- sk, err := p2putil.RandTestBogusPrivateKey()
+ return mn.GenPeerWithOptions(PeerOptions{})
+}
+
+func (mn *mocknet) GenPeerWithOptions(opts PeerOptions) (host.Host, error) {
+ if err := mn.addDefaults(&opts); err != nil {
+ return nil, err
+ }
+ sk, _, err := ic.GenerateECDSAKeyPair(rand.Reader)
if err != nil {
return nil, err
}
+ id, err := peer.IDFromPrivateKey(sk)
+ if err != nil {
+ return nil, err
+ }
+ suffix := id
+ if len(id) > 8 {
+ suffix = id[len(id)-8:]
+ }
+ ip := append(net.IP{}, blackholeIP6...)
+ copy(ip[net.IPv6len-len(suffix):], suffix)
+ a, err := ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/4242", ip))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create test multiaddr: %s", err)
+ }
- a := testutil.RandLocalTCPAddress()
-
- h, err := mn.AddPeer(sk, a)
+ var ps peerstore.Peerstore
+ if opts.ps == nil {
+ ps, err = pstoremem.NewPeerstore()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ ps = opts.ps
+ }
+ p, err := mn.updatePeerstore(sk, a, ps)
+ if err != nil {
+ return nil, err
+ }
+ h, err := mn.AddPeerWithOptions(p, opts)
if err != nil {
return nil, err
}
@@ -65,35 +112,43 @@ func (mn *mocknet) GenPeer() (host.Host, error) {
}
func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (host.Host, error) {
- p, err := peer.IDFromPublicKey(k.GetPublic())
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return nil, err
+ }
+ p, err := mn.updatePeerstore(k, a, ps)
if err != nil {
return nil, err
}
-
- ps := pstore.NewPeerstore()
- ps.AddAddr(p, a, pstore.PermanentAddrTTL)
- ps.AddPrivKey(p, k)
- ps.AddPubKey(p, k.GetPublic())
return mn.AddPeerWithPeerstore(p, ps)
}
-func (mn *mocknet) AddPeerWithPeerstore(p peer.ID, ps pstore.Peerstore) (host.Host, error) {
- n, err := newPeernet(mn.ctx, mn, p, ps)
+func (mn *mocknet) AddPeerWithPeerstore(p peer.ID, ps peerstore.Peerstore) (host.Host, error) {
+ return mn.AddPeerWithOptions(p, PeerOptions{ps: ps})
+}
+
+func (mn *mocknet) AddPeerWithOptions(p peer.ID, opts PeerOptions) (host.Host, error) {
+ bus := eventbus.NewBus()
+ if err := mn.addDefaults(&opts); err != nil {
+ return nil, err
+ }
+ n, err := newPeernet(mn, p, opts, bus)
if err != nil {
return nil, err
}
- opts := &bhost.HostOpts{
- NegotiationTimeout: -1,
+ hostOpts := &bhost.HostOpts{
+ NegotiationTimeout: -1,
+ DisableSignedPeerRecord: true,
+ EventBus: bus,
}
- h, err := bhost.NewHost(mn.ctx, n, opts)
+ h, err := bhost.NewHost(n, hostOpts)
if err != nil {
return nil, err
}
-
- mn.proc.AddChild(n.proc)
+ h.Start()
mn.Lock()
mn.nets[n.peer] = n
@@ -102,6 +157,35 @@ func (mn *mocknet) AddPeerWithPeerstore(p peer.ID, ps pstore.Peerstore) (host.Ho
return h, nil
}
+func (mn *mocknet) addDefaults(opts *PeerOptions) error {
+ if opts.ps == nil {
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return err
+ }
+ opts.ps = ps
+ }
+ return nil
+}
+
+func (mn *mocknet) updatePeerstore(k ic.PrivKey, a ma.Multiaddr, ps peerstore.Peerstore) (peer.ID, error) {
+ p, err := peer.IDFromPublicKey(k.GetPublic())
+ if err != nil {
+ return "", err
+ }
+
+ ps.AddAddr(p, a, peerstore.PermanentAddrTTL)
+ err = ps.AddPrivKey(p, k)
+ if err != nil {
+ return "", err
+ }
+ err = ps.AddPubKey(p, k.GetPublic())
+ if err != nil {
+ return "", err
+ }
+ return p, nil
+}
+
func (mn *mocknet) Peers() []peer.ID {
mn.Lock()
defer mn.Unlock()
@@ -121,7 +205,7 @@ func (mn *mocknet) Host(pid peer.ID) host.Host {
return host
}
-func (mn *mocknet) Net(pid peer.ID) inet.Network {
+func (mn *mocknet) Net(pid peer.ID) network.Network {
mn.Lock()
n := mn.nets[pid]
mn.Unlock()
@@ -141,11 +225,11 @@ func (mn *mocknet) Hosts() []host.Host {
return cp
}
-func (mn *mocknet) Nets() []inet.Network {
+func (mn *mocknet) Nets() []network.Network {
mn.Lock()
defer mn.Unlock()
- cp := make([]inet.Network, 0, len(mn.nets))
+ cp := make([]network.Network, 0, len(mn.nets))
for _, n := range mn.nets {
cp = append(cp, n)
}
@@ -203,22 +287,22 @@ func (mn *mocknet) LinkPeers(p1, p2 peer.ID) (Link, error) {
return mn.LinkNets(n1, n2)
}
-func (mn *mocknet) validate(n inet.Network) (*peernet, error) {
+func (mn *mocknet) validate(n network.Network) (*peernet, error) {
// WARNING: assumes locks acquired
nr, ok := n.(*peernet)
if !ok {
- return nil, fmt.Errorf("Network not supported (use mock package nets only)")
+ return nil, fmt.Errorf("network not supported (use mock package nets only)")
}
if _, found := mn.nets[nr.peer]; !found {
- return nil, fmt.Errorf("Network not on mocknet. is it from another mocknet?")
+ return nil, fmt.Errorf("network not on mocknet. is it from another mocknet?")
}
return nr, nil
}
-func (mn *mocknet) LinkNets(n1, n2 inet.Network) (Link, error) {
+func (mn *mocknet) LinkNets(n1, n2 network.Network) (Link, error) {
mn.Lock()
n1r, err1 := mn.validate(n1)
n2r, err2 := mn.validate(n2)
@@ -263,11 +347,11 @@ func (mn *mocknet) UnlinkPeers(p1, p2 peer.ID) error {
return nil
}
-func (mn *mocknet) UnlinkNets(n1, n2 inet.Network) error {
+func (mn *mocknet) UnlinkNets(n1, n2 network.Network) error {
return mn.UnlinkPeers(n1.LocalPeer(), n2.LocalPeer())
}
-// get from the links map. and lazily contruct.
+// get from the links map. and lazily construct.
func (mn *mocknet) linksMapGet(p1, p2 peer.ID) map[*link]struct{} {
l1, found := mn.links[p1]
@@ -320,11 +404,11 @@ func (mn *mocknet) ConnectAllButSelf() error {
return nil
}
-func (mn *mocknet) ConnectPeers(a, b peer.ID) (inet.Conn, error) {
+func (mn *mocknet) ConnectPeers(a, b peer.ID) (network.Conn, error) {
return mn.Net(a).DialPeer(mn.ctx, b)
}
-func (mn *mocknet) ConnectNets(a, b inet.Network) (inet.Conn, error) {
+func (mn *mocknet) ConnectNets(a, b network.Network) (network.Conn, error) {
return a.DialPeer(mn.ctx, b.LocalPeer())
}
@@ -332,7 +416,7 @@ func (mn *mocknet) DisconnectPeers(p1, p2 peer.ID) error {
return mn.Net(p1).ClosePeer(p2)
}
-func (mn *mocknet) DisconnectNets(n1, n2 inet.Network) error {
+func (mn *mocknet) DisconnectNets(n1, n2 network.Network) error {
return n1.ClosePeer(n2.LocalPeer())
}
@@ -348,7 +432,7 @@ func (mn *mocknet) LinksBetweenPeers(p1, p2 peer.ID) []Link {
return cp
}
-func (mn *mocknet) LinksBetweenNets(n1, n2 inet.Network) []Link {
+func (mn *mocknet) LinksBetweenNets(n1, n2 network.Network) []Link {
return mn.LinksBetweenPeers(n1.LocalPeer(), n2.LocalPeer())
}
@@ -365,7 +449,7 @@ func (mn *mocknet) LinkDefaults() LinkOptions {
}
// netSlice for sorting by peer
-type netSlice []inet.Network
+type netSlice []network.Network
func (es netSlice) Len() int { return len(es) }
func (es netSlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] }
diff --git a/p2p/net/mock/mock_notif_test.go b/p2p/net/mock/mock_notif_test.go
index 3521d6f869..713e0a5d82 100644
--- a/p2p/net/mock/mock_notif_test.go
+++ b/p2p/net/mock/mock_notif_test.go
@@ -2,31 +2,36 @@ package mocknet
import (
"context"
+ "sync"
"testing"
"time"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
)
func TestNotifications(t *testing.T) {
const swarmSize = 5
+ const timeout = 10 * time.Second
- mn, err := FullMeshLinked(context.Background(), swarmSize)
+ mn, err := FullMeshLinked(swarmSize)
if err != nil {
t.Fatal(err)
}
-
- timeout := 10 * time.Second
+ defer mn.Close()
// signup notifs
nets := mn.Nets()
- notifiees := make([]*netNotifiee, len(nets))
- for i, pn := range nets {
- n := newNetNotifiee(swarmSize)
+ notifiees := make(map[peer.ID]*netNotifiee, len(nets))
+ for _, pn := range nets {
+ defer pn.Close()
+
+ n := newNetNotifiee(t, swarmSize)
pn.Notify(n)
- notifiees[i] = n
+ notifiees[pn.LocalPeer()] = n
}
// connect all but self
@@ -35,16 +40,16 @@ func TestNotifications(t *testing.T) {
}
// test everyone got the correct connection opened calls
- for i, s := range nets {
- n := notifiees[i]
- notifs := make(map[peer.ID][]inet.Conn)
- for j, s2 := range nets {
- if i == j {
+ for _, s1 := range nets {
+ n := notifiees[s1.LocalPeer()]
+ notifs := make(map[peer.ID][]network.Conn)
+ for _, s2 := range nets {
+ if s2 == s1 {
continue
}
// this feels a little sketchy, but its probably okay
- for len(s.ConnsToPeer(s2.LocalPeer())) != len(notifs[s2.LocalPeer()]) {
+ for len(s1.ConnsToPeer(s2.LocalPeer())) != len(notifs[s2.LocalPeer()]) {
select {
case c := <-n.connected:
nfp := notifs[c.RemotePeer()]
@@ -56,7 +61,7 @@ func TestNotifications(t *testing.T) {
}
for p, cons := range notifs {
- expect := s.ConnsToPeer(p)
+ expect := s1.ConnsToPeer(p)
if len(expect) != len(cons) {
t.Fatal("got different number of connections")
}
@@ -77,100 +82,45 @@ func TestNotifications(t *testing.T) {
}
}
- complement := func(c inet.Conn) (inet.Network, *netNotifiee, *conn) {
- for i, s := range nets {
- for _, c2 := range s.Conns() {
- if c2.(*conn).rconn == c {
- return s, notifiees[i], c2.(*conn)
- }
- }
- }
- t.Fatal("complementary conn not found", c)
- return nil, nil, nil
- }
-
- testOCStream := func(n *netNotifiee, s inet.Stream) {
- var s2 inet.Stream
- select {
- case s2 = <-n.openedStream:
- t.Log("got notif for opened stream")
- case <-time.After(timeout):
- t.Fatal("timeout")
- }
- if s != nil && s != s2 {
- t.Fatalf("got incorrect stream %p %p", s, s2)
- }
-
- select {
- case s2 = <-n.closedStream:
- t.Log("got notif for closed stream")
- case <-time.After(timeout):
- t.Fatal("timeout")
- }
- if s != nil && s != s2 {
- t.Fatalf("got incorrect stream %p %p", s, s2)
- }
- }
-
+ acceptedStream := make(chan struct{}, 1000)
for _, s := range nets {
- s.SetStreamHandler(func(s inet.Stream) {
+ s.SetStreamHandler(func(s network.Stream) {
+ acceptedStream <- struct{}{}
s.Close()
})
}
- // there's one stream per conn that we need to drain....
- // unsure where these are coming from
- for i := range nets {
- n := notifiees[i]
- for j := 0; j < len(nets)-1; j++ {
- testOCStream(n, nil)
- }
- }
-
- streams := make(chan inet.Stream)
+ // Make sure we've received at last one stream per conn.
for _, s := range nets {
- s.SetStreamHandler(func(s inet.Stream) {
- streams <- s
- s.Close()
- })
- }
-
- // open a streams in each conn
- for i, s := range nets {
conns := s.Conns()
for _, c := range conns {
- _, n2, c2 := complement(c)
- st1, err := c.NewStream()
+ st1, err := c.NewStream(context.Background())
if err != nil {
t.Error(err)
- } else {
- t.Logf("%s %s <--%p--> %s %s", c.LocalPeer(), c.LocalMultiaddr(), st1, c.RemotePeer(), c.RemoteMultiaddr())
- // st1.Write([]byte("hello"))
- st1.Close()
- st2 := <-streams
- t.Logf("%s %s <--%p--> %s %s", c2.LocalPeer(), c2.LocalMultiaddr(), st2, c2.RemotePeer(), c2.RemoteMultiaddr())
- testOCStream(notifiees[i], st1)
- testOCStream(n2, st2)
+ continue
}
+ t.Logf("%s %s <--%p--> %s %s", c.LocalPeer(), c.LocalMultiaddr(), st1, c.RemotePeer(), c.RemoteMultiaddr())
+ st1.Close()
}
}
// close conns
- for i, s := range nets {
- n := notifiees[i]
- for _, c := range s.Conns() {
- _, n2, c2 := complement(c)
- c.(*conn).Close()
- c2.Close()
-
- var c3, c4 inet.Conn
+ for _, s1 := range nets {
+ n1 := notifiees[s1.LocalPeer()]
+ for _, c1 := range s1.Conns() {
+ c2 := ConnComplement(c1)
+
+ n2 := notifiees[c2.LocalPeer()]
+ c1.Close()
+
+ var c3, c4 network.Conn
select {
- case c3 = <-n.disconnected:
+ case c3 = <-n1.disconnected:
case <-time.After(timeout):
t.Fatal("timeout")
}
- if c != c3 {
- t.Fatal("got incorrect conn", c, c3)
+ if c1 != c3 {
+ t.Fatal("got incorrect conn", c1, c3)
}
select {
@@ -179,47 +129,77 @@ func TestNotifications(t *testing.T) {
t.Fatal("timeout")
}
if c2 != c4 {
- t.Fatal("got incorrect conn", c, c2)
+ t.Fatal("got incorrect conn", c1, c2)
}
}
}
+
+ for _, n1 := range notifiees {
+ // Avoid holding this lock while waiting, otherwise we can deadlock.
+ streamStateCopy := map[network.Stream]chan struct{}{}
+ n1.streamState.Lock()
+ for str, ch := range n1.streamState.m {
+ streamStateCopy[str] = ch
+ }
+ n1.streamState.Unlock()
+
+ for str1, ch1 := range streamStateCopy {
+ <-ch1
+ str2 := StreamComplement(str1)
+ n2 := notifiees[str1.Conn().RemotePeer()]
+
+ // make sure the OpenedStream notification was processed first
+ var ch2 chan struct{}
+ require.Eventually(t, func() bool {
+ n2.streamState.Lock()
+ defer n2.streamState.Unlock()
+ ch, ok := n2.streamState.m[str2]
+ if ok {
+ ch2 = ch
+ }
+ return ok
+ }, time.Second, 10*time.Millisecond)
+
+ <-ch2
+ }
+ }
}
type netNotifiee struct {
+ t *testing.T
+
listen chan ma.Multiaddr
listenClose chan ma.Multiaddr
- connected chan inet.Conn
- disconnected chan inet.Conn
- openedStream chan inet.Stream
- closedStream chan inet.Stream
+ connected chan network.Conn
+ disconnected chan network.Conn
+
+ streamState struct {
+ sync.Mutex
+ m map[network.Stream]chan struct{}
+ }
}
-func newNetNotifiee(buffer int) *netNotifiee {
- return &netNotifiee{
- listen: make(chan ma.Multiaddr, buffer),
- listenClose: make(chan ma.Multiaddr, buffer),
- connected: make(chan inet.Conn, buffer),
- disconnected: make(chan inet.Conn, buffer),
- openedStream: make(chan inet.Stream, buffer),
- closedStream: make(chan inet.Stream, buffer),
+func newNetNotifiee(t *testing.T, buffer int) *netNotifiee {
+ nn := &netNotifiee{
+ t: t,
+ listen: make(chan ma.Multiaddr, 1),
+ listenClose: make(chan ma.Multiaddr, 1),
+ connected: make(chan network.Conn, buffer*2),
+ disconnected: make(chan network.Conn, buffer*2),
}
+ nn.streamState.m = make(map[network.Stream]chan struct{})
+ return nn
}
-func (nn *netNotifiee) Listen(n inet.Network, a ma.Multiaddr) {
+func (nn *netNotifiee) Listen(_ network.Network, a ma.Multiaddr) {
nn.listen <- a
}
-func (nn *netNotifiee) ListenClose(n inet.Network, a ma.Multiaddr) {
+func (nn *netNotifiee) ListenClose(_ network.Network, a ma.Multiaddr) {
nn.listenClose <- a
}
-func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) {
+func (nn *netNotifiee) Connected(_ network.Network, v network.Conn) {
nn.connected <- v
}
-func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) {
+func (nn *netNotifiee) Disconnected(_ network.Network, v network.Conn) {
nn.disconnected <- v
}
-func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {
- nn.openedStream <- v
-}
-func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {
- nn.closedStream <- v
-}
diff --git a/p2p/net/mock/mock_peernet.go b/p2p/net/mock/mock_peernet.go
index 42361ac34a..07d4d56c30 100644
--- a/p2p/net/mock/mock_peernet.go
+++ b/p2p/net/mock/mock_peernet.go
@@ -1,25 +1,27 @@
package mocknet
import (
+ "bytes"
"context"
"fmt"
"math/rand"
"sync"
- "github.com/jbenet/goprocess"
- goprocessctx "github.com/jbenet/goprocess/context"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
ma "github.com/multiformats/go-multiaddr"
)
-// peernet implements inet.Network
+// peernet implements network.Network
type peernet struct {
mocknet *mocknet // parent
- peer peer.ID
- ps pstore.Peerstore
+ peer peer.ID
+ ps peerstore.Peerstore
+ emitter event.Emitter
// conns are actual live connections between peers.
// many conns could run over each link.
@@ -27,42 +29,48 @@ type peernet struct {
connsByPeer map[peer.ID]map[*conn]struct{}
connsByLink map[*link]map[*conn]struct{}
- // implement inet.Network
- streamHandler inet.StreamHandler
- connHandler inet.ConnHandler
+ // connection gater to check before dialing or accepting connections. May be nil to allow all.
+ gater connmgr.ConnectionGater
+
+ // implement network.Network
+ streamHandler network.StreamHandler
notifmu sync.Mutex
- notifs map[inet.Notifiee]struct{}
+ notifs map[network.Notifiee]struct{}
- proc goprocess.Process
sync.RWMutex
}
// newPeernet constructs a new peernet
-func newPeernet(ctx context.Context, m *mocknet, p peer.ID, ps pstore.Peerstore) (*peernet, error) {
+func newPeernet(m *mocknet, p peer.ID, opts PeerOptions, bus event.Bus) (*peernet, error) {
+ emitter, err := bus.Emitter(&event.EvtPeerConnectednessChanged{})
+ if err != nil {
+ return nil, err
+ }
n := &peernet{
mocknet: m,
peer: p,
- ps: ps,
+ ps: opts.ps,
+ gater: opts.gater,
+ emitter: emitter,
connsByPeer: map[peer.ID]map[*conn]struct{}{},
connsByLink: map[*link]map[*conn]struct{}{},
- notifs: make(map[inet.Notifiee]struct{}),
+ notifs: make(map[network.Notifiee]struct{}),
}
- n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
return n, nil
}
-func (pn *peernet) teardown() error {
-
+func (pn *peernet) Close() error {
// close the connections
for _, c := range pn.allConns() {
c.Close()
}
- return nil
+ pn.emitter.Close()
+ return pn.ps.Close()
}
// allConns returns all the connections between this peer and others
@@ -78,12 +86,7 @@ func (pn *peernet) allConns() []*conn {
return cs
}
-// Close calls the ContextCloser func
-func (pn *peernet) Close() error {
- return pn.proc.Close()
-}
-
-func (pn *peernet) Peerstore() pstore.Peerstore {
+func (pn *peernet) Peerstore() peerstore.Peerstore {
return pn.ps
}
@@ -92,7 +95,7 @@ func (pn *peernet) String() string {
}
// handleNewStream is an internal function to trigger the client's handler
-func (pn *peernet) handleNewStream(s inet.Stream) {
+func (pn *peernet) handleNewStream(s network.Stream) {
pn.RLock()
handler := pn.streamHandler
pn.RUnlock()
@@ -101,23 +104,17 @@ func (pn *peernet) handleNewStream(s inet.Stream) {
}
}
-// handleNewConn is an internal function to trigger the client's handler
-func (pn *peernet) handleNewConn(c inet.Conn) {
- pn.RLock()
- handler := pn.connHandler
- pn.RUnlock()
- if handler != nil {
- go handler(c)
- }
-}
-
// DialPeer attempts to establish a connection to a given peer.
// Respects the context.
-func (pn *peernet) DialPeer(ctx context.Context, p peer.ID) (inet.Conn, error) {
+func (pn *peernet) DialPeer(_ context.Context, p peer.ID) (network.Conn, error) {
return pn.connect(p)
}
func (pn *peernet) connect(p peer.ID) (*conn, error) {
+ if p == pn.peer {
+ return nil, fmt.Errorf("attempted to dial self %s", p)
+ }
+
// first, check if we already have live connections
pn.RLock()
cs, found := pn.connsByPeer[p]
@@ -132,7 +129,11 @@ func (pn *peernet) connect(p peer.ID) (*conn, error) {
}
pn.RUnlock()
- log.Debugf("%s (newly) dialing %s", pn.peer, p)
+ if pn.gater != nil && !pn.gater.InterceptPeerDial(p) {
+ log.Debug("gater disallowed outbound connection to peer", "peer", p)
+ return nil, fmt.Errorf("%v connection gater disallowed connection to %v", pn.peer, p)
+ }
+ log.Debug("(newly) dialing peer", "source_peer", pn.peer, "destination_peer", p)
// ok, must create a new connection. we need a link
links := pn.mocknet.LinksBetweenPeers(pn.peer, p)
@@ -145,58 +146,112 @@ func (pn *peernet) connect(p peer.ID) (*conn, error) {
// links (network interfaces) and select properly
l := links[rand.Intn(len(links))]
- log.Debugf("%s dialing %s openingConn", pn.peer, p)
+ log.Debug("dialing peer openingConn", "source_peer", pn.peer, "destination_peer", p)
// create a new connection with link
- c := pn.openConn(p, l.(*link))
- return c, nil
+ return pn.openConn(p, l.(*link))
}
-func (pn *peernet) openConn(r peer.ID, l *link) *conn {
+func (pn *peernet) openConn(_ peer.ID, l *link) (*conn, error) {
lc, rc := l.newConnPair(pn)
- log.Debugf("%s opening connection to %s", pn.LocalPeer(), lc.RemotePeer())
+ addConnPair(pn, rc.net, lc, rc)
+ log.Debug("opening connection", "source_peer", pn.LocalPeer(), "destination_peer", lc.RemotePeer())
+ abort := func() {
+ _ = lc.Close()
+ _ = rc.Close()
+ }
+ if pn.gater != nil && !pn.gater.InterceptAddrDial(lc.remote, lc.remoteAddr) {
+ abort()
+ return nil, fmt.Errorf("%v rejected dial to %v on addr %v", lc.local, lc.remote, lc.remoteAddr)
+ }
+ if rc.net.gater != nil && !rc.net.gater.InterceptAccept(rc) {
+ abort()
+ return nil, fmt.Errorf("%v rejected connection from %v", rc.local, rc.remote)
+ }
+ if err := checkSecureAndUpgrade(network.DirOutbound, pn.gater, lc); err != nil {
+ abort()
+ return nil, err
+ }
+ if err := checkSecureAndUpgrade(network.DirInbound, rc.net.gater, rc); err != nil {
+ abort()
+ return nil, err
+ }
+
+ go rc.net.remoteOpenedConn(rc)
pn.addConn(lc)
- pn.notifyAll(func(n inet.Notifiee) {
- n.Connected(pn, lc)
- })
- rc.net.remoteOpenedConn(rc)
- return lc
+ return lc, nil
+}
+
+func checkSecureAndUpgrade(dir network.Direction, gater connmgr.ConnectionGater, c *conn) error {
+ if gater == nil {
+ return nil
+ }
+ if !gater.InterceptSecured(dir, c.remote, c) {
+ return fmt.Errorf("%v rejected secure handshake with %v", c.local, c.remote)
+ }
+ allow, _ := gater.InterceptUpgraded(c)
+ if !allow {
+ return fmt.Errorf("%v rejected upgrade with %v", c.local, c.remote)
+ }
+ return nil
+}
+
+// addConnPair adds connection to both peernets at the same time
+// must be followerd by pn1.addConn(c1) and pn2.addConn(c2)
+func addConnPair(pn1, pn2 *peernet, c1, c2 *conn) {
+ var l1, l2 = pn1, pn2 // peernets in lock order
+ // bytes compare as string compare is lexicographical
+ if bytes.Compare([]byte(l1.LocalPeer()), []byte(l2.LocalPeer())) > 0 {
+ l1, l2 = l2, l1
+ }
+
+ l1.Lock()
+ l2.Lock()
+
+ add := func(pn *peernet, c *conn) {
+ _, found := pn.connsByPeer[c.RemotePeer()]
+ if !found {
+ pn.connsByPeer[c.RemotePeer()] = map[*conn]struct{}{}
+ }
+ pn.connsByPeer[c.RemotePeer()][c] = struct{}{}
+
+ _, found = pn.connsByLink[c.link]
+ if !found {
+ pn.connsByLink[c.link] = map[*conn]struct{}{}
+ }
+ pn.connsByLink[c.link][c] = struct{}{}
+ }
+ add(pn1, c1)
+ add(pn2, c2)
+
+ c1.notifLk.Lock()
+ c2.notifLk.Lock()
+ l2.Unlock()
+ l1.Unlock()
}
func (pn *peernet) remoteOpenedConn(c *conn) {
- log.Debugf("%s accepting connection from %s", pn.LocalPeer(), c.RemotePeer())
+ log.Debug("accepting connection", "source_peer", pn.LocalPeer(), "destination_peer", c.RemotePeer())
pn.addConn(c)
- pn.handleNewConn(c)
- pn.notifyAll(func(n inet.Notifiee) {
- n.Connected(pn, c)
- })
}
// addConn constructs and adds a connection
// to given remote peer over given link
func (pn *peernet) addConn(c *conn) {
- pn.Lock()
- defer pn.Unlock()
+ defer c.notifLk.Unlock()
- cs, found := pn.connsByPeer[c.RemotePeer()]
- if !found {
- cs = map[*conn]struct{}{}
- pn.connsByPeer[c.RemotePeer()] = cs
- }
- pn.connsByPeer[c.RemotePeer()][c] = struct{}{}
+ pn.notifyAll(func(n network.Notifiee) {
+ n.Connected(pn, c)
+ })
- cs, found = pn.connsByLink[c.link]
- if !found {
- cs = map[*conn]struct{}{}
- pn.connsByLink[c.link] = cs
- }
- pn.connsByLink[c.link][c] = struct{}{}
+ pn.emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: c.remote,
+ Connectedness: network.Connected,
+ })
}
// removeConn removes a given conn
func (pn *peernet) removeConn(c *conn) {
pn.Lock()
- defer pn.Unlock()
-
cs, found := pn.connsByLink[c.link]
if !found || len(cs) < 1 {
panic(fmt.Sprintf("attempting to remove a conn that doesnt exist %p", c.link))
@@ -208,11 +263,22 @@ func (pn *peernet) removeConn(c *conn) {
panic(fmt.Sprintf("attempting to remove a conn that doesnt exist %v", c.remote))
}
delete(cs, c)
-}
+ pn.Unlock()
-// Process returns the network's Process
-func (pn *peernet) Process() goprocess.Process {
- return pn.proc
+ // notify asynchronously to mimic Swarm
+ // FIXME: IIRC, we wanted to make notify for Close synchronous
+ go func() {
+ c.notifLk.Lock()
+ defer c.notifLk.Unlock()
+ pn.notifyAll(func(n network.Notifiee) {
+ n.Disconnected(c.net, c)
+ })
+ }()
+
+ c.net.emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: c.remote,
+ Connectedness: network.NotConnected,
+ })
}
// LocalPeer the network's LocalPeer
@@ -236,11 +302,11 @@ func (pn *peernet) Peers() []peer.ID {
}
// Conns returns all the connections of this peer
-func (pn *peernet) Conns() []inet.Conn {
+func (pn *peernet) Conns() []network.Conn {
pn.RLock()
defer pn.RUnlock()
- out := make([]inet.Conn, 0, len(pn.connsByPeer))
+ out := make([]network.Conn, 0, len(pn.connsByPeer))
for _, cs := range pn.connsByPeer {
for c := range cs {
out = append(out, c)
@@ -249,7 +315,7 @@ func (pn *peernet) Conns() []inet.Conn {
return out
}
-func (pn *peernet) ConnsToPeer(p peer.ID) []inet.Conn {
+func (pn *peernet) ConnsToPeer(p peer.ID) []network.Conn {
pn.RLock()
defer pn.RUnlock()
@@ -258,7 +324,7 @@ func (pn *peernet) ConnsToPeer(p peer.ID) []inet.Conn {
return nil
}
- var cs2 []inet.Conn
+ cs2 := make([]network.Conn, 0, len(cs))
for c := range cs {
cs2 = append(cs2, c)
}
@@ -274,7 +340,7 @@ func (pn *peernet) ClosePeer(p peer.ID) error {
return nil
}
- var conns []*conn
+ conns := make([]*conn, 0, len(cs))
for c := range cs {
conns = append(conns, c)
}
@@ -294,7 +360,12 @@ func (pn *peernet) BandwidthTotals() (in uint64, out uint64) {
// Listen tells the network to start listening on given multiaddrs.
func (pn *peernet) Listen(addrs ...ma.Multiaddr) error {
- pn.Peerstore().AddAddrs(pn.LocalPeer(), addrs, pstore.PermanentAddrTTL)
+ pn.Peerstore().AddAddrs(pn.LocalPeer(), addrs, peerstore.PermanentAddrTTL)
+ for _, a := range addrs {
+ pn.notifyAll(func(n network.Notifiee) {
+ n.Listen(pn, a)
+ })
+ }
return nil
}
@@ -312,86 +383,63 @@ func (pn *peernet) InterfaceListenAddresses() ([]ma.Multiaddr, error) {
// Connectedness returns a state signaling connection capabilities
// For now only returns Connecter || NotConnected. Expand into more later.
-func (pn *peernet) Connectedness(p peer.ID) inet.Connectedness {
+func (pn *peernet) Connectedness(p peer.ID) network.Connectedness {
pn.Lock()
defer pn.Unlock()
cs, found := pn.connsByPeer[p]
if found && len(cs) > 0 {
- return inet.Connected
+ return network.Connected
}
- return inet.NotConnected
+ return network.NotConnected
}
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
-func (pn *peernet) NewStream(ctx context.Context, p peer.ID) (inet.Stream, error) {
- pn.Lock()
- cs, found := pn.connsByPeer[p]
- if !found || len(cs) < 1 {
- pn.Unlock()
- return nil, fmt.Errorf("no connection to peer")
- }
-
- // if many conns are found, how do we select? for now, randomly...
- // this would be an interesting place to test logic that can measure
- // links (network interfaces) and select properly
- n := rand.Intn(len(cs))
- var c *conn
- for c = range cs {
- if n == 0 {
- break
- }
- n--
+func (pn *peernet) NewStream(ctx context.Context, p peer.ID) (network.Stream, error) {
+ c, err := pn.DialPeer(ctx, p)
+ if err != nil {
+ return nil, err
}
- pn.Unlock()
-
- return c.NewStream()
+ return c.NewStream(ctx)
}
// SetStreamHandler sets the new stream handler on the Network.
-// This operation is threadsafe.
-func (pn *peernet) SetStreamHandler(h inet.StreamHandler) {
+// This operation is thread-safe.
+func (pn *peernet) SetStreamHandler(h network.StreamHandler) {
pn.Lock()
pn.streamHandler = h
pn.Unlock()
}
-// SetConnHandler sets the new conn handler on the Network.
-// This operation is threadsafe.
-func (pn *peernet) SetConnHandler(h inet.ConnHandler) {
- pn.Lock()
- pn.connHandler = h
- pn.Unlock()
-}
-
// Notify signs up Notifiee to receive signals when events happen
-func (pn *peernet) Notify(f inet.Notifiee) {
+func (pn *peernet) Notify(f network.Notifiee) {
pn.notifmu.Lock()
pn.notifs[f] = struct{}{}
pn.notifmu.Unlock()
}
-// StopNotify unregisters Notifiee fromr receiving signals
-func (pn *peernet) StopNotify(f inet.Notifiee) {
+// StopNotify unregisters Notifiee from receiving signals
+func (pn *peernet) StopNotify(f network.Notifiee) {
pn.notifmu.Lock()
delete(pn.notifs, f)
pn.notifmu.Unlock()
}
// notifyAll runs the notification function on all Notifiees
-func (pn *peernet) notifyAll(notification func(f inet.Notifiee)) {
+func (pn *peernet) notifyAll(notification func(f network.Notifiee)) {
pn.notifmu.Lock()
- var wg sync.WaitGroup
+ // notify synchronously to mimic Swarm
for n := range pn.notifs {
- // make sure we dont block
- // and they dont block each other.
- wg.Add(1)
- go func(n inet.Notifiee) {
- defer wg.Done()
- notification(n)
- }(n)
- }
- wg.Wait()
+ notification(n)
+ }
pn.notifmu.Unlock()
}
+
+func (pn *peernet) ResourceManager() network.ResourceManager {
+ return &network.NullResourceManager{}
+}
+
+func (pn *peernet) CanDial(_ peer.ID, _ ma.Multiaddr) bool {
+ return true
+}
diff --git a/p2p/net/mock/mock_printer.go b/p2p/net/mock/mock_printer.go
index 4990811a37..33e1b965ff 100644
--- a/p2p/net/mock/mock_printer.go
+++ b/p2p/net/mock/mock_printer.go
@@ -4,8 +4,8 @@ import (
"fmt"
"io"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
)
// separate object so our interfaces are separate :)
@@ -26,7 +26,7 @@ func (p *printer) MocknetLinks(mn Mocknet) {
fmt.Fprintf(p.w, "\n")
}
-func (p *printer) NetworkConns(ni inet.Network) {
+func (p *printer) NetworkConns(ni network.Network) {
fmt.Fprintf(p.w, "%s connected to:\n", ni.LocalPeer())
for _, c := range ni.Conns() {
diff --git a/p2p/net/mock/mock_stream.go b/p2p/net/mock/mock_stream.go
index 79a3834c0a..ed518b7ea8 100644
--- a/p2p/net/mock/mock_stream.go
+++ b/p2p/net/mock/mock_stream.go
@@ -5,125 +5,184 @@ import (
"errors"
"io"
"net"
+ "strconv"
+ "sync/atomic"
"time"
- inet "github.com/libp2p/go-libp2p-net"
- protocol "github.com/libp2p/go-libp2p-protocol"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
)
-// stream implements inet.Stream
+var streamCounter atomic.Int64
+
+// stream implements network.Stream
type stream struct {
+ rstream *stream
+ conn *conn
+ id int64
+
write *io.PipeWriter
read *io.PipeReader
- conn *conn
toDeliver chan *transportObject
reset chan struct{}
close chan struct{}
closed chan struct{}
- state error
+ writeErr error
- protocol protocol.ID
+ protocol atomic.Pointer[protocol.ID]
+ stat network.Stats
}
-var ErrReset error = errors.New("stream reset")
-var ErrClosed error = errors.New("stream closed")
+var ErrClosed = errors.New("stream closed")
type transportObject struct {
msg []byte
arrivalTime time.Time
}
-func NewStream(w *io.PipeWriter, r *io.PipeReader) *stream {
+func newStreamPair() (*stream, *stream) {
+ ra, wb := io.Pipe()
+ rb, wa := io.Pipe()
+
+ sa := newStream(wa, ra, network.DirOutbound)
+ sb := newStream(wb, rb, network.DirInbound)
+ sa.rstream = sb
+ sb.rstream = sa
+ return sa, sb
+}
+
+func newStream(w *io.PipeWriter, r *io.PipeReader, dir network.Direction) *stream {
s := &stream{
read: r,
write: w,
+ id: streamCounter.Add(1),
reset: make(chan struct{}, 1),
close: make(chan struct{}, 1),
closed: make(chan struct{}),
toDeliver: make(chan *transportObject),
+ stat: network.Stats{Direction: dir},
}
go s.transport()
return s
}
-// How to handle errors with writes?
+// How to handle errors with writes?
func (s *stream) Write(p []byte) (n int, err error) {
l := s.conn.link
delay := l.GetLatency() + l.RateLimit(len(p))
t := time.Now().Add(delay)
+
+ // Copy it.
+ cpy := make([]byte, len(p))
+ copy(cpy, p)
+
select {
case <-s.closed: // bail out if we're closing.
- return 0, s.state
- case s.toDeliver <- &transportObject{msg: p, arrivalTime: t}:
+ return 0, s.writeErr
+ case s.toDeliver <- &transportObject{msg: cpy, arrivalTime: t}:
}
return len(p), nil
}
+func (s *stream) ID() string {
+ return strconv.FormatInt(s.id, 10)
+}
+
func (s *stream) Protocol() protocol.ID {
- return s.protocol
+ p := s.protocol.Load()
+ if p == nil {
+ return ""
+ }
+ return *p
}
-func (s *stream) SetProtocol(proto protocol.ID) {
- s.protocol = proto
+func (s *stream) Stat() network.Stats {
+ return s.stat
}
-func (s *stream) Close() error {
+func (s *stream) SetProtocol(proto protocol.ID) error {
+ s.protocol.Store(&proto)
+ return nil
+}
+
+func (s *stream) CloseWrite() error {
select {
case s.close <- struct{}{}:
default:
}
<-s.closed
- if s.state != ErrClosed {
- return s.state
+ if s.writeErr != ErrClosed {
+ return s.writeErr
}
return nil
}
+func (s *stream) CloseRead() error {
+ return s.read.CloseWithError(ErrClosed)
+}
+
+func (s *stream) Close() error {
+ _ = s.CloseRead()
+ return s.CloseWrite()
+}
+
func (s *stream) Reset() error {
- // Cancel any pending writes.
- s.write.Close()
+ // Cancel any pending reads/writes with an error.
+ s.write.CloseWithError(network.ErrReset)
+ s.read.CloseWithError(network.ErrReset)
select {
case s.reset <- struct{}{}:
default:
}
<-s.closed
- if s.state != ErrReset {
- return s.state
+
+ // No meaningful error case here.
+ return nil
+}
+
+// ResetWithError resets the stream. It ignores the provided error code.
+// TODO: Implement error code support.
+func (s *stream) ResetWithError(_ network.StreamErrorCode) error {
+ // Cancel any pending reads/writes with an error.
+
+ s.write.CloseWithError(network.ErrReset)
+ s.read.CloseWithError(network.ErrReset)
+
+ select {
+ case s.reset <- struct{}{}:
+ default:
}
+ <-s.closed
+
+ // No meaningful error case here.
return nil
}
func (s *stream) teardown() {
- s.write.Close()
-
// at this point, no streams are writing.
s.conn.removeStream(s)
// Mark as closed.
close(s.closed)
-
- s.conn.net.notifyAll(func(n inet.Notifiee) {
- n.ClosedStream(s.conn.net, s)
- })
}
-func (s *stream) Conn() inet.Conn {
+func (s *stream) Conn() network.Conn {
return s.conn
}
-func (s *stream) SetDeadline(t time.Time) error {
+func (s *stream) SetDeadline(_ time.Time) error {
return &net.OpError{Op: "set", Net: "pipe", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
-func (s *stream) SetReadDeadline(t time.Time) error {
+func (s *stream) SetReadDeadline(_ time.Time) error {
return &net.OpError{Op: "set", Net: "pipe", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
-func (s *stream) SetWriteDeadline(t time.Time) error {
+func (s *stream) SetWriteDeadline(_ time.Time) error {
return &net.OpError{Op: "set", Net: "pipe", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
@@ -151,20 +210,21 @@ func (s *stream) transport() {
// writeBuf writes the contents of buf through to the s.Writer.
// done only when arrival time makes sense.
- drainBuf := func() {
+ drainBuf := func() error {
if buf.Len() > 0 {
_, err := s.write.Write(buf.Bytes())
if err != nil {
- return
+ return err
}
buf.Reset()
}
+ return nil
}
// deliverOrWait is a helper func that processes
// an incoming packet. it waits until the arrival time,
// and then writes things out.
- deliverOrWait := func(o *transportObject) {
+ deliverOrWait := func(o *transportObject) error {
buffered := len(o.msg) + buf.Len()
// Yes, we can end up extending a timer multiple times if we
@@ -178,7 +238,7 @@ func (s *stream) transport() {
default:
}
}
- delay := o.arrivalTime.Sub(time.Now())
+ delay := time.Until(o.arrivalTime)
if delay >= 0 {
timer.Reset(delay)
} else {
@@ -189,43 +249,68 @@ func (s *stream) transport() {
select {
case <-timer.C:
case <-s.reset:
- s.reset <- struct{}{}
- return
+ select {
+ case s.reset <- struct{}{}:
+ default:
+ }
+ return network.ErrReset
+ }
+ if err := drainBuf(); err != nil {
+ return err
}
- drainBuf()
// write this message.
_, err := s.write.Write(o.msg)
if err != nil {
- log.Error("mock_stream", err)
+ return err
}
} else {
buf.Write(o.msg)
}
+ return nil
}
for {
// Reset takes precedent.
select {
case <-s.reset:
- s.state = ErrReset
- s.read.CloseWithError(ErrReset)
+ s.writeErr = network.ErrReset
return
default:
}
select {
case <-s.reset:
- s.state = ErrReset
- s.read.CloseWithError(ErrReset)
+ s.writeErr = network.ErrReset
return
case <-s.close:
- s.state = ErrClosed
- drainBuf()
+ if err := drainBuf(); err != nil {
+ s.cancelWrite(err)
+ return
+ }
+ s.writeErr = s.write.Close()
+ if s.writeErr == nil {
+ s.writeErr = ErrClosed
+ }
return
case o := <-s.toDeliver:
- deliverOrWait(o)
+ if err := deliverOrWait(o); err != nil {
+ s.cancelWrite(err)
+ return
+ }
case <-timer.C: // ok, due to write it out.
- drainBuf()
+ if err := drainBuf(); err != nil {
+ s.cancelWrite(err)
+ return
+ }
}
}
}
+
+func (s *stream) Scope() network.StreamScope {
+ return &network.NullScope{}
+}
+
+func (s *stream) cancelWrite(err error) {
+ s.write.CloseWithError(err)
+ s.writeErr = err
+}
diff --git a/p2p/net/mock/mock_test.go b/p2p/net/mock/mock_test.go
index 09f480c8ee..d34c0728ab 100644
--- a/p2p/net/mock/mock_test.go
+++ b/p2p/net/mock/mock_test.go
@@ -3,65 +3,89 @@ package mocknet
import (
"bytes"
"context"
+ "crypto/rand"
+ "fmt"
"io"
"math"
- "math/rand"
"sync"
"testing"
"time"
- detectrace "github.com/ipfs/go-detect-race"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- protocol "github.com/libp2p/go-libp2p-protocol"
- testutil "github.com/libp2p/go-testutil"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/p2p/net/conngater"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ "github.com/libp2p/go-libp2p-testing/ci"
+ tetc "github.com/libp2p/go-libp2p-testing/etc"
+ "github.com/libp2p/go-libp2p-testing/race"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
)
-func randPeer(t *testing.T) peer.ID {
- p, err := testutil.RandPeerID()
- if err != nil {
- t.Fatal(err)
- }
- return p
+var lastPort = struct {
+ port int
+ sync.Mutex
+}{}
+
+// randLocalTCPAddress returns a random multiaddr. it suppresses errors
+// for nice composability-- do check the address isn't nil.
+//
+// NOTE: for real network tests, use a :0 address, so the kernel
+// assigns an unused TCP port. otherwise you may get clashes.
+func randLocalTCPAddress() ma.Multiaddr {
+ // chances are it will work out, but it **might** fail if the port is in use
+ // most ports above 10000 aren't in use by long running processes, so yay.
+ // (maybe there should be a range of "loopback" ports that are guaranteed
+ // to be open for the process, but naturally can only talk to self.)
+
+ lastPort.Lock()
+ if lastPort.port == 0 {
+ lastPort.port = 10000 + tetc.SeededRand.Intn(50000)
+ }
+ port := lastPort.port
+ lastPort.port++
+ lastPort.Unlock()
+
+ addr := fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port)
+ maddr, _ := ma.NewMultiaddr(addr)
+ return maddr
}
func TestNetworkSetup(t *testing.T) {
-
ctx := context.Background()
- sk1, _, err := testutil.RandTestKeyPair(512)
- if err != nil {
- t.Fatal(t)
- }
- sk2, _, err := testutil.RandTestKeyPair(512)
- if err != nil {
- t.Fatal(t)
- }
- sk3, _, err := testutil.RandTestKeyPair(512)
- if err != nil {
- t.Fatal(t)
- }
- mn := New(ctx)
- // peers := []peer.ID{p1, p2, p3}
+ priv1, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ priv2, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ priv3, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ mn := New()
+ defer mn.Close()
// add peers to mock net
- a1 := testutil.RandLocalTCPAddress()
- a2 := testutil.RandLocalTCPAddress()
- a3 := testutil.RandLocalTCPAddress()
+ a1 := randLocalTCPAddress()
+ a2 := randLocalTCPAddress()
+ a3 := randLocalTCPAddress()
- h1, err := mn.AddPeer(sk1, a1)
+ h1, err := mn.AddPeer(priv1, a1)
if err != nil {
t.Fatal(err)
}
p1 := h1.ID()
- h2, err := mn.AddPeer(sk2, a2)
+ h2, err := mn.AddPeer(priv2, a2)
if err != nil {
t.Fatal(err)
}
p2 := h2.ID()
- h3, err := mn.AddPeer(sk3, a3)
+ h3, err := mn.AddPeer(priv3, a3)
if err != nil {
t.Fatal(err)
}
@@ -197,8 +221,18 @@ func TestNetworkSetup(t *testing.T) {
t.Error(err)
}
- if len(n2.Conns()) != 1 || len(n3.Conns()) != 1 {
- t.Errorf("should have (1,1) conn. Got: (%d, %d)", len(n2.Conns()), len(n3.Conns()))
+ // should immediately have a conn on peer 1
+ if len(n2.Conns()) != 1 {
+ t.Errorf("should have 1 conn on initiator. Got: %d)", len(n2.Conns()))
+ }
+
+ // wait for reciever to see the conn.
+ for i := 0; i < 10 && len(n3.Conns()) == 0; i++ {
+ time.Sleep(time.Duration(10*i) * time.Millisecond)
+ }
+
+ if len(n3.Conns()) != 1 {
+ t.Errorf("should have 1 conn on reciever. Got: %d", len(n3.Conns()))
}
// p := PrinterTo(os.Stdout)
@@ -225,14 +259,14 @@ func TestNetworkSetup(t *testing.T) {
t.Error("should not be able to connect")
}
- // connect p1->p1 (should work)
- if _, err := n1.DialPeer(ctx, p1); err != nil {
- t.Error("p1 should be able to dial self.", err)
+ // connect p1->p1 (should fail)
+ if _, err := n1.DialPeer(ctx, p1); err == nil {
+ t.Error("p1 shouldn't be able to dial self")
}
// and a stream too
- if _, err := n1.NewStream(ctx, p1); err != nil {
- t.Error(err)
+ if _, err := n1.NewStream(ctx, p1); err == nil {
+ t.Error("p1 shouldn't be able to dial self")
}
// connect p1->p2
@@ -273,12 +307,13 @@ func TestNetworkSetup(t *testing.T) {
func TestStreams(t *testing.T) {
ctx := context.Background()
- mn, err := FullMeshConnected(context.Background(), 3)
+ mn, err := FullMeshConnected(3)
if err != nil {
t.Fatal(err)
}
+ defer mn.Close()
- handler := func(s inet.Stream) {
+ handler := func(s network.Stream) {
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
@@ -315,104 +350,18 @@ func TestStreams(t *testing.T) {
}
-func makePinger(st string, n int) func(inet.Stream) {
- return func(s inet.Stream) {
- go func() {
- defer s.Close()
-
- for i := 0; i < n; i++ {
- b := make([]byte, 4+len(st))
- if _, err := s.Write([]byte("ping" + st)); err != nil {
- panic(err)
- }
- if _, err := io.ReadFull(s, b); err != nil {
- panic(err)
- }
- if !bytes.Equal(b, []byte("pong"+st)) {
- panic("bytes mismatch")
- }
- }
- }()
- }
-}
-
-func makePonger(st string) func(inet.Stream) {
- return func(s inet.Stream) {
- go func() {
- defer s.Close()
-
- for {
- b := make([]byte, 4+len(st))
- if _, err := io.ReadFull(s, b); err != nil {
- if err == io.EOF {
- return
- }
- panic(err)
- }
- if !bytes.Equal(b, []byte("ping"+st)) {
- panic("bytes mismatch")
- }
- if _, err := s.Write([]byte("pong" + st)); err != nil {
- panic(err)
- }
- }
- }()
- }
-}
-
-func TestStreamsStress(t *testing.T) {
- ctx := context.Background()
- nnodes := 100
- if detectrace.WithRace() {
- nnodes = 50
- }
-
- mn, err := FullMeshConnected(context.Background(), nnodes)
- if err != nil {
- t.Fatal(err)
- }
-
- hosts := mn.Hosts()
- for _, h := range hosts {
- ponger := makePonger(string(protocol.TestingID))
- h.SetStreamHandler(protocol.TestingID, ponger)
- }
-
- var wg sync.WaitGroup
- for i := 0; i < 1000; i++ {
- wg.Add(1)
- go func(i int) {
- defer wg.Done()
- from := rand.Intn(len(hosts))
- to := rand.Intn(len(hosts))
- s, err := hosts[from].NewStream(ctx, hosts[to].ID(), protocol.TestingID)
- if err != nil {
- log.Debugf("%d (%s) %d (%s)", from, hosts[from], to, hosts[to])
- panic(err)
- }
-
- log.Infof("%d start pinging", i)
- makePinger("pingpong", rand.Intn(100))(s)
- log.Infof("%d done pinging", i)
- }(i)
- }
-
- wg.Wait()
-}
-
func TestAdding(t *testing.T) {
+ mn := New()
+ defer mn.Close()
- mn := New(context.Background())
-
- peers := []peer.ID{}
+ var peers []peer.ID
for i := 0; i < 3; i++ {
- sk, _, err := testutil.RandTestKeyPair(512)
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
if err != nil {
t.Fatal(err)
}
-
- a := testutil.RandLocalTCPAddress()
- h, err := mn.AddPeer(sk, a)
+ a := randLocalTCPAddress()
+ h, err := mn.AddPeer(priv, a)
if err != nil {
t.Fatal(err)
}
@@ -437,7 +386,7 @@ func TestAdding(t *testing.T) {
if h2 == nil {
t.Fatalf("no host for %s", p2)
}
- h2.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
+ h2.SetStreamHandler(protocol.TestingID, func(s network.Stream) {
defer s.Close()
b := make([]byte, 4)
@@ -484,38 +433,42 @@ func TestAdding(t *testing.T) {
}
func TestRateLimiting(t *testing.T) {
- rl := NewRatelimiter(10)
+ if ci.IsRunning() {
+ t.Skip("buggy in CI")
+ }
+
+ rl := NewRateLimiter(10)
- if !within(rl.Limit(10), time.Duration(float32(time.Second)), time.Millisecond/10) {
- t.Fail()
+ if !within(rl.Limit(10), time.Duration(float32(time.Second)), time.Millisecond) {
+ t.Fatal()
}
if !within(rl.Limit(10), time.Duration(float32(time.Second*2)), time.Millisecond) {
- t.Fail()
+ t.Fatal()
}
if !within(rl.Limit(10), time.Duration(float32(time.Second*3)), time.Millisecond) {
- t.Fail()
+ t.Fatal()
}
if within(rl.Limit(10), time.Duration(float32(time.Second*3)), time.Millisecond) {
- t.Fail()
+ t.Fatal()
}
rl.UpdateBandwidth(50)
- if !within(rl.Limit(75), time.Duration(float32(time.Second)*1.5), time.Millisecond/10) {
- t.Fail()
+ if !within(rl.Limit(75), time.Duration(float32(time.Second)*1.5), time.Millisecond) {
+ t.Fatal()
}
- if within(rl.Limit(75), time.Duration(float32(time.Second)*1.5), time.Millisecond/10) {
- t.Fail()
+ if within(rl.Limit(75), time.Duration(float32(time.Second)*1.5), time.Millisecond) {
+ t.Fatal()
}
rl.UpdateBandwidth(100)
- if !within(rl.Limit(1), time.Duration(time.Millisecond*10), time.Millisecond/10) {
- t.Fail()
+ if !within(rl.Limit(1), time.Millisecond*10, time.Millisecond) {
+ t.Fatal()
}
- if within(rl.Limit(1), time.Duration(time.Millisecond*10), time.Millisecond/10) {
- t.Fail()
+ if within(rl.Limit(1), time.Millisecond*10, time.Millisecond) {
+ t.Fatal()
}
}
@@ -524,22 +477,23 @@ func within(t1 time.Duration, t2 time.Duration, tolerance time.Duration) bool {
}
func TestLimitedStreams(t *testing.T) {
- mn, err := FullMeshConnected(context.Background(), 2)
+ mn, err := FullMeshConnected(2)
if err != nil {
t.Fatal(err)
}
+ defer mn.Close()
var wg sync.WaitGroup
messages := 4
messageSize := 500
- handler := func(s inet.Stream) {
+ handler := func(s network.Stream) {
b := make([]byte, messageSize)
for i := 0; i < messages; i++ {
if _, err := io.ReadFull(s, b); err != nil {
- log.Fatal(err)
+ t.Fatal(err)
}
if !bytes.Equal(b[:4], []byte("ping")) {
- log.Fatal("bytes mismatch")
+ t.Fatal("bytes mismatch")
}
wg.Done()
}
@@ -578,7 +532,220 @@ func TestLimitedStreams(t *testing.T) {
}
wg.Wait()
- if !within(time.Since(before), time.Duration(time.Second*2), time.Second/3) {
+ if !within(time.Since(before), time.Second*5/2, time.Second) {
t.Fatal("Expected 2ish seconds but got ", time.Since(before))
}
}
+func TestFuzzManyPeers(t *testing.T) {
+ peerCount := 500
+ if race.WithRace() {
+ peerCount = 100
+ }
+ for i := 0; i < peerCount; i++ {
+ mn, err := FullMeshConnected(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mn.Close()
+ }
+}
+
+func TestStreamsWithLatency(t *testing.T) {
+ latency := time.Millisecond * 500
+
+ mn, err := WithNPeers(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer mn.Close()
+
+ // configure the Mocknet with some latency and link/connect its peers
+ mn.SetLinkDefaults(LinkOptions{Latency: latency})
+ mn.LinkAll()
+ mn.ConnectAllButSelf()
+
+ msg := []byte("ping")
+ mln := len(msg)
+
+ var wg sync.WaitGroup
+
+ // we'll write once to a single stream
+ wg.Add(1)
+
+ handler := func(s network.Stream) {
+ b := make([]byte, mln)
+
+ if _, err := io.ReadFull(s, b); err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Done()
+ s.Close()
+ }
+
+ mn.Hosts()[0].SetStreamHandler(protocol.TestingID, handler)
+ mn.Hosts()[1].SetStreamHandler(protocol.TestingID, handler)
+
+ s, err := mn.Hosts()[0].NewStream(context.Background(), mn.Hosts()[1].ID(), protocol.TestingID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // writing to the stream will be subject to our configured latency
+ checkpoint := time.Now()
+ if _, err := s.Write(msg); err != nil {
+ t.Fatal(err)
+ }
+ wg.Wait()
+
+ delta := time.Since(checkpoint)
+ tolerance := time.Second
+ if !within(delta, latency, tolerance) {
+ t.Fatalf("Expected write to take ~%s (+/- %s), but took %s", latency.String(), tolerance.String(), delta.String())
+ }
+}
+
+func TestEventBus(t *testing.T) {
+ const peers = 2
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ mn, err := FullMeshLinked(peers)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer mn.Close()
+
+ sub0, err := mn.Hosts()[0].EventBus().Subscribe(new(event.EvtPeerConnectednessChanged))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub0.Close()
+ sub1, err := mn.Hosts()[1].EventBus().Subscribe(new(event.EvtPeerConnectednessChanged))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub1.Close()
+
+ id0, id1 := mn.Hosts()[0].ID(), mn.Hosts()[1].ID()
+
+ _, err = mn.ConnectPeers(id0, id1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for range make([]int, peers) {
+ select {
+ case evt := <-sub0.Out():
+ evnt := evt.(event.EvtPeerConnectednessChanged)
+ if evnt.Peer != id1 {
+ t.Fatal("wrong remote peer")
+ }
+ if evnt.Connectedness != network.Connected {
+ t.Fatal("wrong connectedness type")
+ }
+ case evt := <-sub1.Out():
+ evnt := evt.(event.EvtPeerConnectednessChanged)
+ if evnt.Peer != id0 {
+ t.Fatal("wrong remote peer")
+ }
+ if evnt.Connectedness != network.Connected {
+ t.Fatal("wrong connectedness type")
+ }
+ case <-ctx.Done():
+ t.Fatal("didn't get connectedness events in time")
+ }
+ }
+
+ err = mn.DisconnectPeers(id0, id1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for range make([]int, peers) {
+ select {
+ case evt := <-sub0.Out():
+ evnt := evt.(event.EvtPeerConnectednessChanged)
+ if evnt.Peer != id1 {
+ t.Fatal("wrong remote peer")
+ }
+ if evnt.Connectedness != network.NotConnected {
+ t.Fatal("wrong connectedness type")
+ }
+ case evt := <-sub1.Out():
+ evnt := evt.(event.EvtPeerConnectednessChanged)
+ if evnt.Peer != id0 {
+ t.Fatal("wrong remote peer")
+ }
+ if evnt.Connectedness != network.NotConnected {
+ t.Fatal("wrong connectedness type")
+ }
+ case <-ctx.Done():
+ t.Fatal("didn't get connectedness events in time")
+ }
+ }
+}
+
+func TestBlockByPeerID(t *testing.T) {
+ m, gater1, host1, _, host2 := WithConnectionGaters(t)
+
+ err := gater1.BlockPeer(host2.ID())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = m.ConnectPeers(host1.ID(), host2.ID())
+ if err == nil {
+ t.Fatal("Should have blocked connection to banned peer")
+ }
+
+ _, err = m.ConnectPeers(host2.ID(), host1.ID())
+ if err == nil {
+ t.Fatal("Should have blocked connection from banned peer")
+ }
+}
+
+func TestBlockByIP(t *testing.T) {
+ m, gater1, host1, _, host2 := WithConnectionGaters(t)
+
+ ip, err := manet.ToIP(host2.Addrs()[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = gater1.BlockAddr(ip)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = m.ConnectPeers(host1.ID(), host2.ID())
+ if err == nil {
+ t.Fatal("Should have blocked connection to banned IP")
+ }
+
+ _, err = m.ConnectPeers(host2.ID(), host1.ID())
+ if err == nil {
+ t.Fatal("Should have blocked connection from banned IP")
+ }
+}
+
+func WithConnectionGaters(t *testing.T) (Mocknet, *conngater.BasicConnectionGater, host.Host, *conngater.BasicConnectionGater, host.Host) {
+ m := New()
+ addPeer := func() (*conngater.BasicConnectionGater, host.Host) {
+ gater, err := conngater.NewBasicConnectionGater(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ h, err := m.GenPeerWithOptions(PeerOptions{gater: gater})
+ if err != nil {
+ t.Fatal(err)
+ }
+ return gater, h
+ }
+ gater1, host1 := addPeer()
+ gater2, host2 := addPeer()
+
+ err := m.LinkAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ return m, gater1, host1, gater2, host2
+}
diff --git a/p2p/net/mock/ratelimiter.go b/p2p/net/mock/ratelimiter.go
index 0b21c9a4d9..889b249983 100644
--- a/p2p/net/mock/ratelimiter.go
+++ b/p2p/net/mock/ratelimiter.go
@@ -5,9 +5,9 @@ import (
"time"
)
-// A ratelimiter is used by a link to determine how long to wait before sending
-// data given a bandwidth cap.
-type ratelimiter struct {
+// A RateLimiter is used by a link to determine how long to wait before sending
+// data given a bandwidth cap.
+type RateLimiter struct {
lock sync.Mutex
bandwidth float64 // bytes per nanosecond
allowance float64 // in bytes
@@ -17,11 +17,11 @@ type ratelimiter struct {
duration time.Duration // total delay introduced due to rate limiting
}
-// Creates a new ratelimiter with bandwidth (in bytes/sec)
-func NewRatelimiter(bandwidth float64) *ratelimiter {
+// Creates a new RateLimiter with bandwidth (in bytes/sec)
+func NewRateLimiter(bandwidth float64) *RateLimiter {
// convert bandwidth to bytes per nanosecond
b := bandwidth / float64(time.Second)
- return &ratelimiter{
+ return &RateLimiter{
bandwidth: b,
allowance: 0,
maxAllowance: bandwidth,
@@ -29,8 +29,8 @@ func NewRatelimiter(bandwidth float64) *ratelimiter {
}
}
-// Changes bandwidth of a ratelimiter and resets its allowance
-func (r *ratelimiter) UpdateBandwidth(bandwidth float64) {
+// Changes bandwidth of a RateLimiter and resets its allowance
+func (r *RateLimiter) UpdateBandwidth(bandwidth float64) {
r.lock.Lock()
defer r.lock.Unlock()
// Convert bandwidth from bytes/second to bytes/nanosecond
@@ -42,8 +42,8 @@ func (r *ratelimiter) UpdateBandwidth(bandwidth float64) {
r.lastUpdate = time.Now()
}
-// Returns how long to wait before sending data with length 'dataSize' bytes
-func (r *ratelimiter) Limit(dataSize int) time.Duration {
+// Returns how long to wait before sending data with length 'dataSize' bytes
+func (r *RateLimiter) Limit(dataSize int) time.Duration {
r.lock.Lock()
defer r.lock.Unlock()
// update time
diff --git a/p2p/net/nat/internal/nat/LICENSE b/p2p/net/nat/internal/nat/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/p2p/net/nat/internal/nat/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/p2p/net/nat/internal/nat/README.md b/p2p/net/nat/internal/nat/README.md
new file mode 100644
index 0000000000..a856a83072
--- /dev/null
+++ b/p2p/net/nat/internal/nat/README.md
@@ -0,0 +1 @@
+Originally forked from: [fd/go-nat](https://github.com/fd/go-nat).
diff --git a/p2p/net/nat/internal/nat/nat.go b/p2p/net/nat/internal/nat/nat.go
new file mode 100644
index 0000000000..bf4517202d
--- /dev/null
+++ b/p2p/net/nat/internal/nat/nat.go
@@ -0,0 +1,187 @@
+// Package nat implements NAT handling facilities
+package nat
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "net"
+ "strings"
+ "time"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/libp2p/go-netroute"
+)
+
+var log = logging.Logger("internal/nat")
+
+var ErrNoExternalAddress = errors.New("no external address")
+var ErrNoInternalAddress = errors.New("no internal address")
+
+type ErrNoNATFound struct {
+ Errs []error
+}
+
+func (e ErrNoNATFound) Unwrap() []error {
+ return e.Errs
+}
+
+func (e ErrNoNATFound) Error() string {
+ errStrs := make([]string, 0, len(e.Errs))
+ for _, err := range e.Errs {
+ errStrs = append(errStrs, err.Error())
+ }
+ return fmt.Sprintf("no NAT found: [%s]", strings.Join(errStrs, "; "))
+}
+
+// protocol is either "udp" or "tcp"
+type NAT interface {
+ // Type returns the kind of NAT port mapping service that is used
+ Type() string
+
+ // GetDeviceAddress returns the internal address of the gateway device.
+ GetDeviceAddress() (addr net.IP, err error)
+
+ // GetExternalAddress returns the external address of the gateway device.
+ GetExternalAddress() (addr net.IP, err error)
+
+ // GetInternalAddress returns the address of the local host.
+ GetInternalAddress() (addr net.IP, err error)
+
+ // AddPortMapping maps a port on the local host to an external port.
+ AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (mappedExternalPort int, err error)
+
+ // DeletePortMapping removes a port mapping.
+ DeletePortMapping(ctx context.Context, protocol string, internalPort int) (err error)
+}
+
+// discoverNATs returns all NATs discovered in the network.
+func discoverNATs(ctx context.Context) ([]NAT, []error) {
+ type natsAndErrs struct {
+ nats []NAT
+ errs []error
+ }
+ upnpCh := make(chan natsAndErrs)
+ pmpCh := make(chan natsAndErrs)
+
+ go func() {
+ defer close(upnpCh)
+
+ // We do these UPNP queries sequentially because some routers will fail to handle parallel requests.
+ nats, errs := discoverUPNP_IG1(ctx)
+
+ // Do IG2 after IG1 so that its NAT devices will appear as "better" when we
+ // find the best NAT to return below.
+ n, e := discoverUPNP_IG2(ctx)
+ nats = append(nats, n...)
+ errs = append(errs, e...)
+
+ if len(nats) == 0 {
+ // We don't have a NAT. We should try querying all devices over
+ // SSDP to find a InternetGatewayDevice. This shouldn't be necessary for
+ // a well behaved router.
+ n, e = discoverUPNP_GenIGDev(ctx)
+ nats = append(nats, n...)
+ errs = append(errs, e...)
+ }
+
+ select {
+ case upnpCh <- natsAndErrs{nats, errs}:
+ case <-ctx.Done():
+ }
+ }()
+
+ go func() {
+ defer close(pmpCh)
+ nat, err := discoverNATPMP(ctx)
+ var nats []NAT
+ var errs []error
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ nats = append(nats, nat)
+ }
+ select {
+ case pmpCh <- natsAndErrs{nats, errs}:
+ case <-ctx.Done():
+ }
+ }()
+
+ var nats []NAT
+ var errs []error
+
+ for upnpCh != nil || pmpCh != nil {
+ select {
+ case res := <-pmpCh:
+ pmpCh = nil
+ nats = append(nats, res.nats...)
+ errs = append(errs, res.errs...)
+ case res := <-upnpCh:
+ upnpCh = nil
+ nats = append(nats, res.nats...)
+ errs = append(errs, res.errs...)
+ case <-ctx.Done():
+ errs = append(errs, ctx.Err())
+ return nats, errs
+ }
+ }
+ return nats, errs
+}
+
+// DiscoverGateway attempts to find a gateway device.
+func DiscoverGateway(ctx context.Context) (NAT, error) {
+ nats, errs := discoverNATs(ctx)
+
+ switch len(nats) {
+ case 0:
+ return nil, ErrNoNATFound{Errs: errs}
+ case 1:
+ if len(errs) > 0 {
+ log.Debug("NAT found, but some potentially unrelated errors occurred", "errors", errs)
+ }
+
+ return nats[0], nil
+ }
+ gw, _ := getDefaultGateway()
+ bestNAT := nats[0]
+ natGw, _ := bestNAT.GetDeviceAddress()
+ bestNATIsGw := gw != nil && natGw.Equal(gw)
+ // 1. Prefer gateways discovered _last_. This is an OK heuristic for
+ // discovering the most-upstream (furthest) NAT.
+ // 2. Prefer gateways that actually match our known gateway address.
+ // Some relays like to claim to be NATs even if they aren't.
+ for _, nat := range nats[1:] {
+ natGw, _ := nat.GetDeviceAddress()
+ natIsGw := gw != nil && natGw.Equal(gw)
+
+ if bestNATIsGw && !natIsGw {
+ continue
+ }
+
+ bestNATIsGw = natIsGw
+ bestNAT = nat
+ }
+
+ if len(errs) > 0 {
+ log.Debug("NAT found, but some potentially unrelated errors occurred", "errors", errs)
+ }
+ return bestNAT, nil
+}
+
+var random = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+func randomPort() int {
+ return random.Intn(math.MaxUint16-10000) + 10000
+}
+
+func getDefaultGateway() (net.IP, error) {
+ router, err := netroute.New()
+ if err != nil {
+ return nil, err
+ }
+
+ _, ip, _, err := router.Route(net.IPv4zero)
+ return ip, err
+}
diff --git a/p2p/net/nat/internal/nat/natpmp.go b/p2p/net/nat/internal/nat/natpmp.go
new file mode 100644
index 0000000000..bffc0a99f2
--- /dev/null
+++ b/p2p/net/nat/internal/nat/natpmp.go
@@ -0,0 +1,138 @@
+package nat
+
+import (
+ "context"
+ "net"
+ "time"
+
+ natpmp "github.com/jackpal/go-nat-pmp"
+)
+
+var (
+ _ NAT = (*natpmpNAT)(nil)
+)
+
+func discoverNATPMP(ctx context.Context) (NAT, error) {
+ ip, err := getDefaultGateway()
+ if err != nil {
+ return nil, err
+ }
+
+ clientCh := make(chan *natpmp.Client, 1)
+ errCh := make(chan error, 1)
+
+ // We can't cancel the natpmp library, but we can at least still return
+ // on context cancellation by putting this in a goroutine
+ go func() {
+ client, err := discoverNATPMPWithAddr(ctx, ip)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ clientCh <- client
+ }()
+
+ select {
+ case client := <-clientCh:
+ return &natpmpNAT{client, ip, make(map[int]int)}, nil
+ case err := <-errCh:
+ return nil, err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func discoverNATPMPWithAddr(ctx context.Context, ip net.IP) (*natpmp.Client, error) {
+ var client *natpmp.Client
+ if deadline, ok := ctx.Deadline(); ok {
+ client = natpmp.NewClientWithTimeout(ip, time.Until(deadline))
+ } else {
+ client = natpmp.NewClient(ip)
+ }
+ _, err := client.GetExternalAddress()
+ if err != nil {
+ return nil, err
+ }
+ return client, nil
+}
+
+type natpmpNAT struct {
+ c *natpmp.Client
+ gateway net.IP
+ ports map[int]int
+}
+
+func (n *natpmpNAT) GetDeviceAddress() (addr net.IP, err error) {
+ return n.gateway, nil
+}
+
+func (n *natpmpNAT) GetInternalAddress() (addr net.IP, err error) {
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, iface := range ifaces {
+ addrs, err := iface.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, addr := range addrs {
+ switch x := addr.(type) {
+ case *net.IPNet:
+ if x.Contains(n.gateway) {
+ return x.IP, nil
+ }
+ }
+ }
+ }
+
+ return nil, ErrNoInternalAddress
+}
+
+func (n *natpmpNAT) GetExternalAddress() (addr net.IP, err error) {
+ res, err := n.c.GetExternalAddress()
+ if err != nil {
+ return nil, err
+ }
+
+ d := res.ExternalIPAddress
+ return net.IPv4(d[0], d[1], d[2], d[3]), nil
+}
+
+func (n *natpmpNAT) AddPortMapping(_ context.Context, protocol string, internalPort int, _ string, timeout time.Duration) (int, error) {
+ var (
+ err error
+ )
+
+ timeoutInSeconds := int(timeout / time.Second)
+
+ if externalPort := n.ports[internalPort]; externalPort > 0 {
+ _, err = n.c.AddPortMapping(protocol, internalPort, externalPort, timeoutInSeconds)
+ if err == nil {
+ n.ports[internalPort] = externalPort
+ return externalPort, nil
+ }
+ }
+
+ for i := 0; i < 3; i++ {
+ externalPort := randomPort()
+ _, err = n.c.AddPortMapping(protocol, internalPort, externalPort, timeoutInSeconds)
+ if err == nil {
+ n.ports[internalPort] = externalPort
+ return externalPort, nil
+ }
+ }
+
+ return 0, err
+}
+
+func (n *natpmpNAT) DeletePortMapping(_ context.Context, _ string, internalPort int) (err error) {
+ delete(n.ports, internalPort)
+ return nil
+}
+
+func (n *natpmpNAT) Type() string {
+ return "NAT-PMP"
+}
diff --git a/p2p/net/nat/internal/nat/upnp.go b/p2p/net/nat/internal/nat/upnp.go
new file mode 100644
index 0000000000..13d898e58d
--- /dev/null
+++ b/p2p/net/nat/internal/nat/upnp.go
@@ -0,0 +1,256 @@
+package nat
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/huin/goupnp"
+ "github.com/huin/goupnp/dcps/internetgateway1"
+ "github.com/huin/goupnp/dcps/internetgateway2"
+
+ "github.com/koron/go-ssdp"
+)
+
+var _ NAT = (*upnp_NAT)(nil)
+
+func discoverUPNP_IG1(ctx context.Context) ([]NAT, []error) {
+ return discoverSearchTarget(ctx, internetgateway1.URN_WANConnectionDevice_1)
+}
+
+func discoverUPNP_IG2(ctx context.Context) ([]NAT, []error) {
+ return discoverSearchTarget(ctx, internetgateway2.URN_WANConnectionDevice_2)
+}
+
+func discoverSearchTarget(ctx context.Context, target string) (nats []NAT, errs []error) {
+ // find devices
+ devs, err := goupnp.DiscoverDevicesCtx(ctx, target)
+ if err != nil {
+ errs = append(errs, err)
+ return
+ }
+
+ for _, dev := range devs {
+ if dev.Err != nil {
+ errs = append(errs, dev.Err)
+ continue
+ }
+ dev.Root.Device.VisitServices(serviceVisitor(ctx, dev.Root, &nats, &errs))
+ }
+ return
+}
+
+// discoverUPNP_GenIGDev is a fallback for routers that fail to respond to our
+// targetted SSDP queries. It will query all devices and try to find any
+// InternetGatewayDevice.
+func discoverUPNP_GenIGDev(ctx context.Context) (nats []NAT, errs []error) {
+ DeviceList, err := ssdp.Search(ssdp.All, 5, "")
+ if err != nil {
+ errs = append(errs, err)
+ return
+ }
+
+ // Limit the number of InternetGateways we'll query. Normally we'd only
+ // expect 1 or 2, but in case of a weird network we also don't want to do
+ // too much work.
+ const maxIGDevs = 3
+ foundIGDevs := 0
+ for _, Service := range DeviceList {
+ if !strings.Contains(Service.Type, "InternetGatewayDevice") {
+ continue
+ }
+ if foundIGDevs >= maxIGDevs {
+ log.Debug("found more than maxIGDevs UPnP devices, stopping search")
+ break
+ }
+ foundIGDevs++
+
+ DeviceURL, err := url.Parse(Service.Location)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ RootDevice, err := goupnp.DeviceByURLCtx(ctx, DeviceURL)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+
+ RootDevice.Device.VisitServices(serviceVisitor(ctx, RootDevice, &nats, &errs))
+ }
+ return
+}
+
+// serviceVisitor is a vistor function that visits all services of a root
+// device and collects NATs.
+//
+// It works on InternetGateway V1 and V2 devices. For V1 devices, V2 services should not be encountered, and the visitor will collect an error in that case.
+func serviceVisitor(ctx context.Context, rootDevice *goupnp.RootDevice, outNats *[]NAT, outErrs *[]error) func(srv *goupnp.Service) {
+ return func(srv *goupnp.Service) {
+ if ctx.Err() != nil {
+ return
+ }
+ switch srv.ServiceType {
+ case internetgateway2.URN_WANIPConnection_1:
+ client := &internetgateway2.WANIPConnection1{ServiceClient: goupnp.ServiceClient{
+ SOAPClient: srv.NewSOAPClient(),
+ RootDevice: rootDevice,
+ Service: srv,
+ }}
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
+ if err != nil {
+ *outErrs = append(*outErrs, err)
+ } else if isNat {
+ *outNats = append(*outNats, &upnp_NAT{client, make(map[int]int), "UPNP (IP1)", rootDevice})
+ }
+
+ case internetgateway2.URN_WANIPConnection_2:
+ if rootDevice.Device.DeviceType == internetgateway2.URN_WANConnectionDevice_1 {
+ *outErrs = append(*outErrs, fmt.Errorf("found V2 service on V1 device"))
+ return
+ }
+ client := &internetgateway2.WANIPConnection2{ServiceClient: goupnp.ServiceClient{
+ SOAPClient: srv.NewSOAPClient(),
+ RootDevice: rootDevice,
+ Service: srv,
+ }}
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
+ if err != nil {
+ *outErrs = append(*outErrs, err)
+ } else if isNat {
+ *outNats = append(*outNats, &upnp_NAT{client, make(map[int]int), "UPNP (IP2)", rootDevice})
+ }
+
+ case internetgateway2.URN_WANPPPConnection_1:
+ client := &internetgateway2.WANPPPConnection1{ServiceClient: goupnp.ServiceClient{
+ SOAPClient: srv.NewSOAPClient(),
+ RootDevice: rootDevice,
+ Service: srv,
+ }}
+ _, isNat, err := client.GetNATRSIPStatusCtx(ctx)
+ if err != nil {
+ *outErrs = append(*outErrs, err)
+ } else if isNat {
+ *outNats = append(*outNats, &upnp_NAT{client, make(map[int]int), "UPNP (PPP1)", rootDevice})
+ }
+ }
+ }
+}
+
+type upnp_NAT_Client interface {
+ GetExternalIPAddress() (string, error)
+ AddPortMappingCtx(context.Context, string, uint16, string, uint16, string, bool, string, uint32) error
+ DeletePortMappingCtx(context.Context, string, uint16, string) error
+}
+
+type upnp_NAT struct {
+ c upnp_NAT_Client
+ ports map[int]int
+ typ string
+ rootDevice *goupnp.RootDevice
+}
+
+func (u *upnp_NAT) GetExternalAddress() (addr net.IP, err error) {
+ ipString, err := u.c.GetExternalIPAddress()
+ if err != nil {
+ return nil, err
+ }
+
+ ip := net.ParseIP(ipString)
+ if ip == nil {
+ return nil, ErrNoExternalAddress
+ }
+
+ return ip, nil
+}
+
+func mapProtocol(s string) string {
+ switch s {
+ case "udp":
+ return "UDP"
+ case "tcp":
+ return "TCP"
+ default:
+ panic("invalid protocol: " + s)
+ }
+}
+
+func (u *upnp_NAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
+ ip, err := u.GetInternalAddress()
+ if err != nil {
+ return 0, nil
+ }
+
+ timeoutInSeconds := uint32(timeout / time.Second)
+
+ if externalPort := u.ports[internalPort]; externalPort > 0 {
+ err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
+ if err == nil {
+ return externalPort, nil
+ }
+ }
+
+ for i := 0; i < 3; i++ {
+ externalPort := randomPort()
+ err = u.c.AddPortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol), uint16(internalPort), ip.String(), true, description, timeoutInSeconds)
+ if err == nil {
+ u.ports[internalPort] = externalPort
+ return externalPort, nil
+ }
+ }
+
+ return 0, err
+}
+
+func (u *upnp_NAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
+ if externalPort := u.ports[internalPort]; externalPort > 0 {
+ delete(u.ports, internalPort)
+ return u.c.DeletePortMappingCtx(ctx, "", uint16(externalPort), mapProtocol(protocol))
+ }
+
+ return nil
+}
+
+func (u *upnp_NAT) GetDeviceAddress() (net.IP, error) {
+ addr, err := net.ResolveUDPAddr("udp4", u.rootDevice.URLBase.Host)
+ if err != nil {
+ return nil, err
+ }
+
+ return addr.IP, nil
+}
+
+func (u *upnp_NAT) GetInternalAddress() (net.IP, error) {
+ devAddr, err := u.GetDeviceAddress()
+ if err != nil {
+ return nil, err
+ }
+
+ ifaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, iface := range ifaces {
+ addrs, err := iface.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, addr := range addrs {
+ switch x := addr.(type) {
+ case *net.IPNet:
+ if x.Contains(devAddr) {
+ return x.IP, nil
+ }
+ }
+ }
+ }
+
+ return nil, ErrNoInternalAddress
+}
+
+func (n *upnp_NAT) Type() string { return n.typ }
diff --git a/p2p/net/nat/mock_nat_test.go b/p2p/net/nat/mock_nat_test.go
new file mode 100644
index 0000000000..ed8b6a1a61
--- /dev/null
+++ b/p2p/net/nat/mock_nat_test.go
@@ -0,0 +1,131 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/p2p/net/nat/internal/nat (interfaces: NAT)
+//
+// Generated by this command:
+//
+// mockgen -package nat -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/net/nat/internal/nat NAT
+//
+
+// Package nat is a generated GoMock package.
+package nat
+
+import (
+ context "context"
+ net "net"
+ reflect "reflect"
+ time "time"
+
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockNAT is a mock of NAT interface.
+type MockNAT struct {
+ ctrl *gomock.Controller
+ recorder *MockNATMockRecorder
+ isgomock struct{}
+}
+
+// MockNATMockRecorder is the mock recorder for MockNAT.
+type MockNATMockRecorder struct {
+ mock *MockNAT
+}
+
+// NewMockNAT creates a new mock instance.
+func NewMockNAT(ctrl *gomock.Controller) *MockNAT {
+ mock := &MockNAT{ctrl: ctrl}
+ mock.recorder = &MockNATMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockNAT) EXPECT() *MockNATMockRecorder {
+ return m.recorder
+}
+
+// AddPortMapping mocks base method.
+func (m *MockNAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "AddPortMapping", ctx, protocol, internalPort, description, timeout)
+ ret0, _ := ret[0].(int)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// AddPortMapping indicates an expected call of AddPortMapping.
+func (mr *MockNATMockRecorder) AddPortMapping(ctx, protocol, internalPort, description, timeout any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPortMapping", reflect.TypeOf((*MockNAT)(nil).AddPortMapping), ctx, protocol, internalPort, description, timeout)
+}
+
+// DeletePortMapping mocks base method.
+func (m *MockNAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeletePortMapping", ctx, protocol, internalPort)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeletePortMapping indicates an expected call of DeletePortMapping.
+func (mr *MockNATMockRecorder) DeletePortMapping(ctx, protocol, internalPort any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePortMapping", reflect.TypeOf((*MockNAT)(nil).DeletePortMapping), ctx, protocol, internalPort)
+}
+
+// GetDeviceAddress mocks base method.
+func (m *MockNAT) GetDeviceAddress() (net.IP, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetDeviceAddress")
+ ret0, _ := ret[0].(net.IP)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetDeviceAddress indicates an expected call of GetDeviceAddress.
+func (mr *MockNATMockRecorder) GetDeviceAddress() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceAddress", reflect.TypeOf((*MockNAT)(nil).GetDeviceAddress))
+}
+
+// GetExternalAddress mocks base method.
+func (m *MockNAT) GetExternalAddress() (net.IP, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetExternalAddress")
+ ret0, _ := ret[0].(net.IP)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetExternalAddress indicates an expected call of GetExternalAddress.
+func (mr *MockNATMockRecorder) GetExternalAddress() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExternalAddress", reflect.TypeOf((*MockNAT)(nil).GetExternalAddress))
+}
+
+// GetInternalAddress mocks base method.
+func (m *MockNAT) GetInternalAddress() (net.IP, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetInternalAddress")
+ ret0, _ := ret[0].(net.IP)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetInternalAddress indicates an expected call of GetInternalAddress.
+func (mr *MockNATMockRecorder) GetInternalAddress() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetInternalAddress", reflect.TypeOf((*MockNAT)(nil).GetInternalAddress))
+}
+
+// Type mocks base method.
+func (m *MockNAT) Type() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Type")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// Type indicates an expected call of Type.
+func (mr *MockNATMockRecorder) Type() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Type", reflect.TypeOf((*MockNAT)(nil).Type))
+}
diff --git a/p2p/net/nat/nat.go b/p2p/net/nat/nat.go
new file mode 100644
index 0000000000..ccb9d7571b
--- /dev/null
+++ b/p2p/net/nat/nat.go
@@ -0,0 +1,261 @@
+package nat
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/netip"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+
+ "github.com/libp2p/go-libp2p/p2p/net/nat/internal/nat"
+)
+
+// ErrNoMapping signals no mapping exists for an address
+var ErrNoMapping = errors.New("mapping not established")
+
+var log = logging.Logger("nat")
+
+// MappingDuration is a default port mapping duration.
+// Port mappings are renewed every (MappingDuration / 3)
+const MappingDuration = time.Minute
+
+// CacheTime is the time a mapping will cache an external address for
+const CacheTime = 15 * time.Second
+
+type entry struct {
+ protocol string
+ port int
+}
+
+// so we can mock it in tests
+var discoverGateway = nat.DiscoverGateway
+
+// DiscoverNAT looks for a NAT device in the network and returns an object that can manage port mappings.
+func DiscoverNAT(ctx context.Context) (*NAT, error) {
+ natInstance, err := discoverGateway(ctx)
+ if err != nil {
+ return nil, err
+ }
+ var extAddr netip.Addr
+ extIP, err := natInstance.GetExternalAddress()
+ if err == nil {
+ extAddr, _ = netip.AddrFromSlice(extIP)
+ }
+
+ // Log the device addr.
+ addr, err := natInstance.GetDeviceAddress()
+ if err != nil {
+ log.Debug("DiscoverGateway address error", "err", err)
+ } else {
+ log.Debug("DiscoverGateway address", "address", addr)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ nat := &NAT{
+ nat: natInstance,
+ mappings: make(map[entry]int),
+ ctx: ctx,
+ ctxCancel: cancel,
+ }
+ nat.extAddr.Store(&extAddr)
+ nat.refCount.Add(1)
+ go func() {
+ defer nat.refCount.Done()
+ nat.background()
+ }()
+ return nat, nil
+}
+
+// NAT is an object that manages address port mappings in
+// NATs (Network Address Translators). It is a long-running
+// service that will periodically renew port mappings,
+// and keep an up-to-date list of all the external addresses.
+type NAT struct {
+ natmu sync.Mutex
+ nat nat.NAT
+ // External IP of the NAT. Will be renewed periodically (every CacheTime).
+ extAddr atomic.Pointer[netip.Addr]
+
+ refCount sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ mappingmu sync.RWMutex // guards mappings
+ closed bool
+ mappings map[entry]int
+}
+
+// Close shuts down all port mappings. NAT can no longer be used.
+func (nat *NAT) Close() error {
+ nat.mappingmu.Lock()
+ nat.closed = true
+ nat.mappingmu.Unlock()
+
+ nat.ctxCancel()
+ nat.refCount.Wait()
+ return nil
+}
+
+func (nat *NAT) GetMapping(protocol string, port int) (addr netip.AddrPort, found bool) {
+ nat.mappingmu.Lock()
+ defer nat.mappingmu.Unlock()
+
+ if !nat.extAddr.Load().IsValid() {
+ return netip.AddrPort{}, false
+ }
+ extPort, found := nat.mappings[entry{protocol: protocol, port: port}]
+ // The mapping may have an invalid port.
+ if !found || extPort == 0 {
+ return netip.AddrPort{}, false
+ }
+ return netip.AddrPortFrom(*nat.extAddr.Load(), uint16(extPort)), true
+}
+
+// AddMapping attempts to construct a mapping on protocol and internal port.
+// It blocks until a mapping was established. Once added, it periodically renews the mapping.
+//
+// May not succeed, and mappings may change over time;
+// NAT devices may not respect our port requests, and even lie.
+func (nat *NAT) AddMapping(ctx context.Context, protocol string, port int) error {
+ switch protocol {
+ case "tcp", "udp":
+ default:
+ return fmt.Errorf("invalid protocol: %s", protocol)
+ }
+
+ nat.mappingmu.Lock()
+ defer nat.mappingmu.Unlock()
+
+ if nat.closed {
+ return errors.New("closed")
+ }
+
+ // do it once synchronously, so first mapping is done right away, and before exiting,
+ // allowing users -- in the optimistic case -- to use results right after.
+ extPort := nat.establishMapping(ctx, protocol, port)
+ // Don't validate the mapping here, we refresh the mappings based on this map.
+ // We can try getting a port again in case it succeeds. In the worst case,
+ // this is one extra LAN request every few minutes.
+ nat.mappings[entry{protocol: protocol, port: port}] = extPort
+ return nil
+}
+
+// RemoveMapping removes a port mapping.
+// It blocks until the NAT has removed the mapping.
+func (nat *NAT) RemoveMapping(ctx context.Context, protocol string, port int) error {
+ nat.mappingmu.Lock()
+ defer nat.mappingmu.Unlock()
+
+ switch protocol {
+ case "tcp", "udp":
+ e := entry{protocol: protocol, port: port}
+ if _, ok := nat.mappings[e]; ok {
+ delete(nat.mappings, e)
+ return nat.nat.DeletePortMapping(ctx, protocol, port)
+ }
+ return errors.New("unknown mapping")
+ default:
+ return fmt.Errorf("invalid protocol: %s", protocol)
+ }
+}
+
+func (nat *NAT) background() {
+ const mappingUpdate = MappingDuration / 3
+
+ now := time.Now()
+ nextMappingUpdate := now.Add(mappingUpdate)
+ nextAddrUpdate := now.Add(CacheTime)
+
+ t := time.NewTimer(minTime(nextMappingUpdate, nextAddrUpdate).Sub(now)) // don't use a ticker here. We don't know how long establishing the mappings takes.
+ defer t.Stop()
+
+ var in []entry
+ var out []int // port numbers
+ for {
+ select {
+ case now := <-t.C:
+ if now.After(nextMappingUpdate) {
+ in = in[:0]
+ out = out[:0]
+ nat.mappingmu.Lock()
+ for e := range nat.mappings {
+ in = append(in, e)
+ }
+ nat.mappingmu.Unlock()
+ // Establishing the mapping involves network requests.
+ // Don't hold the mutex, just save the ports.
+ for _, e := range in {
+ out = append(out, nat.establishMapping(nat.ctx, e.protocol, e.port))
+ }
+ nat.mappingmu.Lock()
+ for i, p := range in {
+ if _, ok := nat.mappings[p]; !ok {
+ continue // entry might have been deleted
+ }
+ nat.mappings[p] = out[i]
+ }
+ nat.mappingmu.Unlock()
+ nextMappingUpdate = time.Now().Add(mappingUpdate)
+ }
+ if now.After(nextAddrUpdate) {
+ var extAddr netip.Addr
+ extIP, err := nat.nat.GetExternalAddress()
+ if err == nil {
+ extAddr, _ = netip.AddrFromSlice(extIP)
+ }
+ nat.extAddr.Store(&extAddr)
+ nextAddrUpdate = time.Now().Add(CacheTime)
+ }
+ t.Reset(time.Until(minTime(nextAddrUpdate, nextMappingUpdate)))
+ case <-nat.ctx.Done():
+ nat.mappingmu.Lock()
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ for e := range nat.mappings {
+ delete(nat.mappings, e)
+ nat.nat.DeletePortMapping(ctx, e.protocol, e.port)
+ }
+ nat.mappingmu.Unlock()
+ return
+ }
+ }
+}
+
+func (nat *NAT) establishMapping(ctx context.Context, protocol string, internalPort int) (externalPort int) {
+ log.Debug("Attempting port map", "protocol", protocol, "internal_port", internalPort)
+ const comment = "libp2p"
+
+ nat.natmu.Lock()
+ var err error
+ externalPort, err = nat.nat.AddPortMapping(ctx, protocol, internalPort, comment, MappingDuration)
+ if err != nil {
+ // Some hardware does not support mappings with timeout, so try that
+ externalPort, err = nat.nat.AddPortMapping(ctx, protocol, internalPort, comment, 0)
+ }
+ nat.natmu.Unlock()
+
+ if err != nil || externalPort == 0 {
+ if err != nil {
+ log.Warn("NAT port mapping failed", "protocol", protocol, "internal_port", internalPort, "err", err)
+ } else {
+ log.Warn("NAT port mapping failed", "protocol", protocol, "internal_port", internalPort, "external_port", 0)
+ }
+ // we do not close if the mapping failed,
+ // because it may work again next time.
+ return 0
+ }
+
+ log.Debug("NAT Mapping", "external_port", externalPort, "internal_port", internalPort, "protocol", protocol)
+ return externalPort
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
diff --git a/p2p/net/nat/nat_test.go b/p2p/net/nat/nat_test.go
new file mode 100644
index 0000000000..9e19f8ea6a
--- /dev/null
+++ b/p2p/net/nat/nat_test.go
@@ -0,0 +1,84 @@
+package nat
+
+import (
+ "context"
+ "errors"
+ "net"
+ "net/netip"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/p2p/net/nat/internal/nat"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package nat -destination mock_nat_test.go github.com/libp2p/go-libp2p/p2p/net/nat/internal/nat NAT"
+
+func setupMockNAT(t *testing.T) (mockNAT *MockNAT, reset func()) {
+ t.Helper()
+ ctrl := gomock.NewController(t)
+ mockNAT = NewMockNAT(ctrl)
+ mockNAT.EXPECT().GetDeviceAddress().Return(nil, errors.New("nope")) // is only used for logging
+ origDiscoverGateway := discoverGateway
+ discoverGateway = func(_ context.Context) (nat.NAT, error) { return mockNAT, nil }
+ return mockNAT, func() {
+ discoverGateway = origDiscoverGateway
+ ctrl.Finish()
+ }
+}
+
+func TestAddMapping(t *testing.T) {
+ mockNAT, reset := setupMockNAT(t)
+ defer reset()
+
+ mockNAT.EXPECT().GetExternalAddress().Return(net.IPv4(1, 2, 3, 4), nil)
+ nat, err := DiscoverNAT(context.Background())
+ require.NoError(t, err)
+
+ mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000, gomock.Any(), MappingDuration).Return(1234, nil)
+ require.NoError(t, nat.AddMapping(context.Background(), "tcp", 10000))
+
+ _, found := nat.GetMapping("tcp", 9999)
+ require.False(t, found, "didn't expect a port mapping for unmapped port")
+ _, found = nat.GetMapping("udp", 10000)
+ require.False(t, found, "didn't expect a port mapping for unmapped protocol")
+ mapped, found := nat.GetMapping("tcp", 10000)
+ require.True(t, found, "expected port mapping")
+ addr, _ := netip.AddrFromSlice(net.IPv4(1, 2, 3, 4))
+ require.Equal(t, netip.AddrPortFrom(addr, 1234), mapped)
+}
+
+func TestRemoveMapping(t *testing.T) {
+ mockNAT, reset := setupMockNAT(t)
+ defer reset()
+
+ mockNAT.EXPECT().GetExternalAddress().Return(net.IPv4(1, 2, 3, 4), nil)
+ nat, err := DiscoverNAT(context.Background())
+ require.NoError(t, err)
+ mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000, gomock.Any(), MappingDuration).Return(1234, nil)
+ require.NoError(t, nat.AddMapping(context.Background(), "tcp", 10000))
+ _, found := nat.GetMapping("tcp", 10000)
+ require.True(t, found, "expected port mapping")
+
+ require.Error(t, nat.RemoveMapping(context.Background(), "tcp", 9999), "expected error for unknown mapping")
+ mockNAT.EXPECT().DeletePortMapping(gomock.Any(), "tcp", 10000)
+ require.NoError(t, nat.RemoveMapping(context.Background(), "tcp", 10000))
+
+ _, found = nat.GetMapping("tcp", 10000)
+ require.False(t, found, "didn't expect port mapping for deleted mapping")
+}
+
+func TestAddMappingInvalidPort(t *testing.T) {
+ mockNAT, reset := setupMockNAT(t)
+ defer reset()
+
+ mockNAT.EXPECT().GetExternalAddress().Return(net.IPv4(1, 2, 3, 4), nil)
+ nat, err := DiscoverNAT(context.Background())
+ require.NoError(t, err)
+
+ mockNAT.EXPECT().AddPortMapping(gomock.Any(), "tcp", 10000, gomock.Any(), MappingDuration).Return(0, nil)
+ require.NoError(t, nat.AddMapping(context.Background(), "tcp", 10000))
+
+ _, found := nat.GetMapping("tcp", 10000)
+ require.False(t, found, "didn't expect a port mapping for invalid nat-ed port")
+}
diff --git a/p2p/net/pnet/protector.go b/p2p/net/pnet/protector.go
new file mode 100644
index 0000000000..643e904d29
--- /dev/null
+++ b/p2p/net/pnet/protector.go
@@ -0,0 +1,18 @@
+package pnet
+
+import (
+ "errors"
+ "net"
+
+ ipnet "github.com/libp2p/go-libp2p/core/pnet"
+)
+
+// NewProtectedConn creates a new protected connection
+func NewProtectedConn(psk ipnet.PSK, conn net.Conn) (net.Conn, error) {
+ if len(psk) != 32 {
+ return nil, errors.New("expected 32 byte PSK")
+ }
+ var p [32]byte
+ copy(p[:], psk)
+ return newPSKConn(&p, conn)
+}
diff --git a/p2p/net/pnet/psk_conn.go b/p2p/net/pnet/psk_conn.go
new file mode 100644
index 0000000000..b36d434904
--- /dev/null
+++ b/p2p/net/pnet/psk_conn.go
@@ -0,0 +1,84 @@
+package pnet
+
+import (
+ "crypto/cipher"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/pnet"
+
+ "github.com/davidlazar/go-crypto/salsa20"
+ pool "github.com/libp2p/go-buffer-pool"
+)
+
+// we are using buffer pool as user needs their slice back
+// so we can't do XOR cripter in place
+var (
+ errShortNonce = pnet.NewError("could not read full nonce")
+ errInsecureNil = pnet.NewError("insecure is nil")
+ errPSKNil = pnet.NewError("pre-shread key is nil")
+)
+
+type pskConn struct {
+ net.Conn
+ psk *[32]byte
+
+ writeS20 cipher.Stream
+ readS20 cipher.Stream
+}
+
+func (c *pskConn) Read(out []byte) (int, error) {
+ if c.readS20 == nil {
+ nonce := make([]byte, 24)
+ _, err := io.ReadFull(c.Conn, nonce)
+ if err != nil {
+ return 0, fmt.Errorf("%w: %w", errShortNonce, err)
+ }
+ c.readS20 = salsa20.New(c.psk, nonce)
+ }
+
+ n, err := c.Conn.Read(out) // read to in
+ if n > 0 {
+ c.readS20.XORKeyStream(out[:n], out[:n]) // decrypt to out buffer
+ }
+ return n, err
+}
+
+func (c *pskConn) Write(in []byte) (int, error) {
+ if c.writeS20 == nil {
+ nonce := make([]byte, 24)
+ _, err := rand.Read(nonce)
+ if err != nil {
+ return 0, err
+ }
+ _, err = c.Conn.Write(nonce)
+ if err != nil {
+ return 0, err
+ }
+
+ c.writeS20 = salsa20.New(c.psk, nonce)
+ }
+ out := pool.Get(len(in))
+ defer pool.Put(out)
+
+ c.writeS20.XORKeyStream(out, in) // encrypt
+
+ return c.Conn.Write(out) // send
+}
+
+var _ net.Conn = (*pskConn)(nil)
+
+func newPSKConn(psk *[32]byte, insecure net.Conn) (net.Conn, error) {
+ if insecure == nil {
+ return nil, errInsecureNil
+ }
+ if psk == nil {
+ return nil, errPSKNil
+ }
+ return &pskConn{
+ Conn: insecure,
+ psk: psk,
+ }, nil
+}
diff --git a/p2p/net/pnet/psk_conn_test.go b/p2p/net/pnet/psk_conn_test.go
new file mode 100644
index 0000000000..b331065624
--- /dev/null
+++ b/p2p/net/pnet/psk_conn_test.go
@@ -0,0 +1,90 @@
+package pnet
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "net"
+ "testing"
+)
+
+func setupPSKConns(_ context.Context, t *testing.T) (net.Conn, net.Conn) {
+ testPSK := make([]byte, 32) // null bytes are as good test key as any other key
+ conn1, conn2 := net.Pipe()
+
+ psk1, err := NewProtectedConn(testPSK, conn1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ psk2, err := NewProtectedConn(testPSK, conn2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return psk1, psk2
+}
+
+func TestPSKSimpelMessges(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+
+ psk1, psk2 := setupPSKConns(ctx, t)
+ msg1 := []byte("hello world")
+ out1 := make([]byte, len(msg1))
+
+ wch := make(chan error)
+ go func() {
+ _, err := psk1.Write(msg1)
+ wch <- err
+ }()
+ n, err := psk2.Read(out1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = <-wch
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != len(out1) {
+ t.Fatalf("expected to read %d bytes, read: %d", len(out1), n)
+ }
+
+ if !bytes.Equal(msg1, out1) {
+ t.Fatalf("input and output are not the same")
+ }
+}
+
+func TestPSKFragmentation(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.TODO())
+ defer cancel()
+
+ psk1, psk2 := setupPSKConns(ctx, t)
+
+ in := make([]byte, 1000)
+ if _, err := rand.Read(in); err != nil {
+ t.Fatal(err)
+ }
+
+ out := make([]byte, 100)
+
+ wch := make(chan error)
+ go func() {
+ _, err := psk1.Write(in)
+ wch <- err
+ }()
+
+ for i := 0; i < 10; i++ {
+ if _, err := psk2.Read(out); err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(in[:100], out) {
+ t.Fatalf("input and output are not the same")
+ }
+ in = in[100:]
+ }
+
+ if err := <-wch; err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/p2p/net/reuseport/dial.go b/p2p/net/reuseport/dial.go
new file mode 100644
index 0000000000..e162813d51
--- /dev/null
+++ b/p2p/net/reuseport/dial.go
@@ -0,0 +1,62 @@
+package reuseport
+
+import (
+ "context"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// Dial dials the given multiaddr, reusing ports we're currently listening on if
+// possible.
+//
+// Dial attempts to be smart about choosing the source port. For example, If
+// we're dialing a loopback address and we're listening on one or more loopback
+// ports, Dial will randomly choose one of the loopback ports and addresses and
+// reuse it.
+func (t *Transport) Dial(raddr ma.Multiaddr) (manet.Conn, error) {
+ return t.DialContext(context.Background(), raddr)
+}
+
+// DialContext is like Dial but takes a context.
+func (t *Transport) DialContext(ctx context.Context, raddr ma.Multiaddr) (manet.Conn, error) {
+ network, addr, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+ var d *dialer
+ switch network {
+ case "tcp4":
+ d = t.v4.getDialer(network)
+ case "tcp6":
+ d = t.v6.getDialer(network)
+ default:
+ return nil, ErrWrongProto
+ }
+ conn, err := d.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ maconn, err := manet.WrapNetConn(conn)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ return maconn, nil
+}
+
+func (n *network) getDialer(_ string) *dialer {
+ n.mu.RLock()
+ d := n.dialer
+ n.mu.RUnlock()
+ if d == nil {
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ if n.dialer == nil {
+ n.dialer = newDialer(n.listeners)
+ }
+ d = n.dialer
+ }
+ return d
+}
diff --git a/p2p/net/reuseport/dialer.go b/p2p/net/reuseport/dialer.go
new file mode 100644
index 0000000000..ec3769a7ac
--- /dev/null
+++ b/p2p/net/reuseport/dialer.go
@@ -0,0 +1,114 @@
+package reuseport
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "net"
+
+ "github.com/libp2p/go-netroute"
+)
+
+type dialer struct {
+ // All address that are _not_ loopback or unspecified (0.0.0.0 or ::).
+ specific []*net.TCPAddr
+ // All loopback addresses (127.*.*.*, ::1).
+ loopback []*net.TCPAddr
+ // Unspecified addresses (0.0.0.0, ::)
+ unspecified []*net.TCPAddr
+}
+
+func (d *dialer) Dial(network, addr string) (net.Conn, error) {
+ return d.DialContext(context.Background(), network, addr)
+}
+
+func randAddr(addrs []*net.TCPAddr) *net.TCPAddr {
+ if len(addrs) > 0 {
+ return addrs[rand.Intn(len(addrs))]
+ }
+ return nil
+}
+
+// DialContext dials a target addr.
+//
+// In-order:
+//
+// 1. If we're _explicitly_ listening on the preferred source address for the destination address
+// (per the system's routes), we'll use that listener's port as the source port.
+// 2. If we're listening on one or more _unspecified_ addresses (zero address), we'll pick a source
+// port from one of these listener's.
+// 3. Otherwise, we'll let the system pick the source port.
+func (d *dialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
+ // We only check this case if the user is listening on a specific address (loopback or
+ // otherwise). Generally, users will listen on the "unspecified" address (0.0.0.0 or ::) and
+ // we can skip this section.
+ //
+ // This lets us avoid resolving the address twice, in most cases.
+ if len(d.specific) > 0 || len(d.loopback) > 0 {
+ tcpAddr, err := net.ResolveTCPAddr(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ ip := tcpAddr.IP
+ if !ip.IsLoopback() && !ip.IsGlobalUnicast() {
+ return nil, fmt.Errorf("undialable IP: %s", ip)
+ }
+
+ // If we're listening on some specific address and that specific address happens to
+ // be the preferred source address for the target destination address, we try to
+ // dial with that address/port.
+ //
+ // We skip this check if we _aren't_ listening on any specific addresses, because
+ // checking routing tables can be expensive and users rarely listen on specific IP
+ // addresses.
+ if len(d.specific) > 0 {
+ if router, err := netroute.New(); err == nil {
+ if _, _, preferredSrc, err := router.Route(ip); err == nil {
+ for _, optAddr := range d.specific {
+ if optAddr.IP.Equal(preferredSrc) {
+ return reuseDial(ctx, optAddr, network, addr)
+ }
+ }
+ }
+ }
+ }
+
+ // Otherwise, if we are listening on a loopback address and the destination is also
+ // a loopback address, use the port from our loopback listener.
+ if len(d.loopback) > 0 && ip.IsLoopback() {
+ return reuseDial(ctx, randAddr(d.loopback), network, addr)
+ }
+ }
+
+ // If we're listening on any uspecified addresses, use a randomly chosen port from one of
+ // these listeners.
+ if len(d.unspecified) > 0 {
+ return reuseDial(ctx, randAddr(d.unspecified), network, addr)
+ }
+
+ // Finally, just pick a random port.
+ var dialer net.Dialer
+ return dialer.DialContext(ctx, network, addr)
+}
+
+func newDialer(listeners map[*listener]struct{}) *dialer {
+ specific := make([]*net.TCPAddr, 0)
+ loopback := make([]*net.TCPAddr, 0)
+ unspecified := make([]*net.TCPAddr, 0)
+
+ for l := range listeners {
+ addr := l.Addr().(*net.TCPAddr)
+ if addr.IP.IsLoopback() {
+ loopback = append(loopback, addr)
+ } else if addr.IP.IsUnspecified() {
+ unspecified = append(unspecified, addr)
+ } else {
+ specific = append(specific, addr)
+ }
+ }
+ return &dialer{
+ specific: specific,
+ loopback: loopback,
+ unspecified: unspecified,
+ }
+}
diff --git a/p2p/net/reuseport/listen.go b/p2p/net/reuseport/listen.go
new file mode 100644
index 0000000000..4388b35c8e
--- /dev/null
+++ b/p2p/net/reuseport/listen.go
@@ -0,0 +1,80 @@
+package reuseport
+
+import (
+ "net"
+
+ "github.com/libp2p/go-reuseport"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type listener struct {
+ manet.Listener
+ network *network
+}
+
+func (l *listener) Close() error {
+ l.network.mu.Lock()
+ delete(l.network.listeners, l)
+ l.network.dialer = nil
+ l.network.mu.Unlock()
+ return l.Listener.Close()
+}
+
+// Listen listens on the given multiaddr.
+//
+// If reuseport is supported, it will be enabled for this listener and future
+// dials from this transport may reuse the port.
+//
+// Note: You can listen on the same multiaddr as many times as you want
+// (although only *one* listener will end up handling the inbound connection).
+func (t *Transport) Listen(laddr ma.Multiaddr) (manet.Listener, error) {
+ nw, naddr, err := manet.DialArgs(laddr)
+ if err != nil {
+ return nil, err
+ }
+ var n *network
+ switch nw {
+ case "tcp4":
+ n = &t.v4
+ case "tcp6":
+ n = &t.v6
+ default:
+ return nil, ErrWrongProto
+ }
+
+ if !reuseport.Available() {
+ return manet.Listen(laddr)
+ }
+ nl, err := reuseport.Listen(nw, naddr)
+ if err != nil {
+ return manet.Listen(laddr)
+ }
+
+ if _, ok := nl.Addr().(*net.TCPAddr); !ok {
+ nl.Close()
+ return nil, ErrWrongProto
+ }
+
+ malist, err := manet.WrapNetListener(nl)
+ if err != nil {
+ nl.Close()
+ return nil, err
+ }
+
+ list := &listener{
+ Listener: malist,
+ network: n,
+ }
+
+ n.mu.Lock()
+ defer n.mu.Unlock()
+
+ if n.listeners == nil {
+ n.listeners = make(map[*listener]struct{})
+ }
+ n.listeners[list] = struct{}{}
+ n.dialer = nil
+
+ return list, nil
+}
diff --git a/p2p/net/reuseport/reuseport.go b/p2p/net/reuseport/reuseport.go
new file mode 100644
index 0000000000..dbb67bb149
--- /dev/null
+++ b/p2p/net/reuseport/reuseport.go
@@ -0,0 +1,35 @@
+package reuseport
+
+import (
+ "context"
+ "net"
+
+ "github.com/libp2p/go-reuseport"
+)
+
+var fallbackDialer net.Dialer
+
+// Dials using reuseport and then redials normally if that fails.
+func reuseDial(ctx context.Context, laddr *net.TCPAddr, network, raddr string) (con net.Conn, err error) {
+ if laddr == nil {
+ return fallbackDialer.DialContext(ctx, network, raddr)
+ }
+
+ d := net.Dialer{
+ LocalAddr: laddr,
+ Control: reuseport.Control,
+ }
+
+ con, err = d.DialContext(ctx, network, raddr)
+ if err == nil {
+ return con, nil
+ }
+
+ if reuseErrShouldRetry(err) && ctx.Err() == nil {
+ // We could have an existing socket open or we could have one
+ // stuck in TIME-WAIT.
+ log.Debug("failed to reuse port, will try again with a random port", "err", err)
+ con, err = fallbackDialer.DialContext(ctx, network, raddr)
+ }
+ return con, err
+}
diff --git a/p2p/net/reuseport/reuseport_plan9.go b/p2p/net/reuseport/reuseport_plan9.go
new file mode 100644
index 0000000000..c40eb2e8be
--- /dev/null
+++ b/p2p/net/reuseport/reuseport_plan9.go
@@ -0,0 +1,44 @@
+package reuseport
+
+import (
+ "net"
+ "os"
+)
+
+const (
+ EADDRINUSE = "address in use"
+ ECONNREFUSED = "connection refused"
+)
+
+// reuseErrShouldRetry diagnoses whether to retry after a reuse error.
+// if we failed to bind, we should retry. if bind worked and this is a
+// real dial error (remote end didnt answer) then we should not retry.
+func reuseErrShouldRetry(err error) bool {
+ if err == nil {
+ return false // hey, it worked! no need to retry.
+ }
+
+ // if it's a network timeout error, it's a legitimate failure.
+ if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+ return false
+ }
+
+ e, ok := err.(*net.OpError)
+ if !ok {
+ return true
+ }
+
+ e1, ok := e.Err.(*os.PathError)
+ if !ok {
+ return true
+ }
+
+ switch e1.Err.Error() {
+ case EADDRINUSE:
+ return true
+ case ECONNREFUSED:
+ return false
+ default:
+ return true // optimistically default to retry.
+ }
+}
diff --git a/p2p/net/reuseport/reuseport_posix.go b/p2p/net/reuseport/reuseport_posix.go
new file mode 100644
index 0000000000..0b5771bb7d
--- /dev/null
+++ b/p2p/net/reuseport/reuseport_posix.go
@@ -0,0 +1,36 @@
+//go:build !plan9
+
+package reuseport
+
+import (
+ "net"
+ "syscall"
+)
+
+// reuseErrShouldRetry diagnoses whether to retry after a reuse error.
+// if we failed to bind, we should retry. if bind worked and this is a
+// real dial error (remote end didnt answer) then we should not retry.
+func reuseErrShouldRetry(err error) bool {
+ if err == nil {
+ return false // hey, it worked! no need to retry.
+ }
+
+ // if it's a network timeout error, it's a legitimate failure.
+ if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+ return false
+ }
+
+ errno, ok := err.(syscall.Errno)
+ if !ok { // not an errno? who knows what this is. retry.
+ return true
+ }
+
+ switch errno {
+ case syscall.EADDRINUSE, syscall.EADDRNOTAVAIL:
+ return true // failure to bind. retry.
+ case syscall.ECONNREFUSED:
+ return false // real dial error
+ default:
+ return true // optimistically default to retry.
+ }
+}
diff --git a/p2p/net/reuseport/reuseport_test.go b/p2p/net/reuseport/reuseport_test.go
new file mode 100644
index 0000000000..21e1dcb190
--- /dev/null
+++ b/p2p/net/reuseport/reuseport_test.go
@@ -0,0 +1,50 @@
+//go:build !plan9
+
+package reuseport
+
+import (
+ "net"
+ "syscall"
+ "testing"
+)
+
+type netTimeoutErr struct {
+ timeout bool
+}
+
+func (e netTimeoutErr) Error() string {
+ return ""
+}
+
+func (e netTimeoutErr) Timeout() bool {
+ return e.timeout
+}
+
+func (e netTimeoutErr) Temporary() bool {
+ panic("not checked")
+}
+
+func TestReuseError(t *testing.T) {
+ var nte1 net.Error = &netTimeoutErr{true}
+ var nte2 net.Error = &netTimeoutErr{false}
+
+ cases := map[error]bool{
+ nil: false,
+ syscall.EADDRINUSE: true,
+ syscall.EADDRNOTAVAIL: true,
+ syscall.ECONNREFUSED: false,
+
+ nte1: false,
+ nte2: true, // this ones a little weird... we should check neterror.Temporary() too
+
+ // test 'default' to true
+ syscall.EBUSY: true,
+ }
+
+ for k, v := range cases {
+ if reuseErrShouldRetry(k) != v {
+ t.Fatalf("expected %t for %#v", v, k)
+ }
+ }
+
+}
diff --git a/p2p/net/reuseport/transport.go b/p2p/net/reuseport/transport.go
new file mode 100644
index 0000000000..cb589f0aa4
--- /dev/null
+++ b/p2p/net/reuseport/transport.go
@@ -0,0 +1,35 @@
+// Package reuseport provides a basic transport for automatically (and intelligently) reusing TCP ports.
+//
+// To use, construct a new Transport and configure listeners tr.Listen(...).
+// When dialing (tr.Dial(...)), the transport will attempt to reuse the ports it's currently listening on,
+// choosing the best one depending on the destination address.
+//
+// It is recommended to set SO_LINGER to 0 for all connections, otherwise
+// reusing the port may fail when re-dialing a recently closed connection.
+// See https://hea-www.harvard.edu/~fine/Tech/addrinuse.html for details.
+package reuseport
+
+import (
+ "errors"
+ "sync"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("reuseport-transport")
+
+// ErrWrongProto is returned when dialing a protocol other than tcp.
+var ErrWrongProto = errors.New("can only dial TCP over IPv4 or IPv6")
+
+// Transport is a TCP reuse transport that reuses listener ports.
+// The zero value is safe to use.
+type Transport struct {
+ v4 network
+ v6 network
+}
+
+type network struct {
+ mu sync.RWMutex
+ listeners map[*listener]struct{}
+ dialer *dialer
+}
diff --git a/p2p/net/reuseport/transport_test.go b/p2p/net/reuseport/transport_test.go
new file mode 100644
index 0000000000..c46d3a1f0f
--- /dev/null
+++ b/p2p/net/reuseport/transport_test.go
@@ -0,0 +1,277 @@
+package reuseport
+
+import (
+ "context"
+ "net"
+ "runtime"
+ "testing"
+ "time"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var loopbackV4, _ = ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
+var loopbackV6, _ = ma.NewMultiaddr("/ip6/::1/tcp/0")
+var unspecV6, _ = ma.NewMultiaddr("/ip6/::/tcp/0")
+var unspecV4, _ = ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
+
+var globalV4 ma.Multiaddr
+var globalV6 ma.Multiaddr
+
+func init() {
+ addrs, err := manet.InterfaceMultiaddrs()
+ if err != nil {
+ return
+ }
+ for _, addr := range addrs {
+ if !manet.IsIP6LinkLocal(addr) && !manet.IsIPLoopback(addr) {
+ tcp, _ := ma.NewMultiaddr("/tcp/0")
+ switch addr.Protocols()[0].Code {
+ case ma.P_IP4:
+ if globalV4 == nil {
+ globalV4 = addr.Encapsulate(tcp)
+ }
+ case ma.P_IP6:
+ if globalV6 == nil {
+ globalV6 = addr.Encapsulate(tcp)
+ }
+ }
+ }
+ }
+}
+
+func setLingerZero(c manet.Conn) {
+ if runtime.GOOS == "darwin" {
+ c.(interface{ SetLinger(int) error }).SetLinger(0)
+ }
+}
+
+func acceptOne(t *testing.T, listener manet.Listener) <-chan manet.Conn {
+ t.Helper()
+ done := make(chan manet.Conn, 1)
+ go func() {
+ defer close(done)
+ c, err := listener.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ setLingerZero(c)
+ done <- c
+ }()
+ return done
+}
+
+func dialOne(t *testing.T, tr *Transport, listener manet.Listener, expected ...int) int {
+ t.Helper()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ connChan := acceptOne(t, listener)
+ c, err := tr.DialContext(ctx, listener.Multiaddr())
+ if err != nil {
+ t.Fatal(err)
+ }
+ setLingerZero(c)
+ port := c.LocalAddr().(*net.TCPAddr).Port
+ serverConn := <-connChan
+ serverConn.Close()
+ c.Close()
+ if len(expected) == 0 {
+ return port
+ }
+ for _, p := range expected {
+ if p == port {
+ return port
+ }
+ }
+ t.Errorf("dialed %s from %v. expected to dial from port %v", listener.Multiaddr(), c.LocalAddr(), expected)
+ return 0
+}
+
+func TestNoneAndSingle(t *testing.T) {
+ var trA Transport
+ var trB Transport
+ listenerA, err := trA.Listen(loopbackV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerA.Close()
+
+ dialOne(t, &trB, listenerA)
+
+ listenerB, err := trB.Listen(loopbackV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB.Close()
+
+ dialOne(t, &trB, listenerA, listenerB.Addr().(*net.TCPAddr).Port)
+}
+
+func TestTwoLocal(t *testing.T) {
+ var trA Transport
+ var trB Transport
+ listenerA, err := trA.Listen(loopbackV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerA.Close()
+
+ listenerB1, err := trB.Listen(loopbackV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB1.Close()
+
+ listenerB2, err := trB.Listen(loopbackV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB2.Close()
+
+ dialOne(t, &trB, listenerA,
+ listenerB1.Addr().(*net.TCPAddr).Port,
+ listenerB2.Addr().(*net.TCPAddr).Port)
+}
+
+func TestGlobalPreferenceV4(t *testing.T) {
+ if globalV4 == nil {
+ t.Skip("no global IPv4 addresses configured")
+ return
+ }
+ t.Logf("when listening on %v, should prefer %v over %v", loopbackV4, loopbackV4, globalV4)
+ testPrefer(t, loopbackV4, loopbackV4, globalV4)
+ t.Logf("when listening on %v, should prefer %v over %v", loopbackV4, unspecV4, globalV4)
+ testPrefer(t, loopbackV4, unspecV4, globalV4)
+ t.Logf("when listening on %v, should prefer %v over %v", globalV4, unspecV4, loopbackV4)
+ testPrefer(t, globalV4, unspecV4, loopbackV4)
+}
+
+func TestGlobalPreferenceV6(t *testing.T) {
+ if globalV6 == nil {
+ t.Skip("no global IPv6 addresses configured")
+ return
+ }
+ testPrefer(t, loopbackV6, loopbackV6, globalV6)
+ testPrefer(t, loopbackV6, unspecV6, globalV6)
+
+ testPrefer(t, globalV6, unspecV6, loopbackV6)
+}
+
+func TestLoopbackPreference(t *testing.T) {
+ testPrefer(t, loopbackV4, loopbackV4, unspecV4)
+ testPrefer(t, loopbackV6, loopbackV6, unspecV6)
+}
+
+func testPrefer(t *testing.T, listen, prefer, avoid ma.Multiaddr) {
+ var trA Transport
+ var trB Transport
+ listenerA, err := trA.Listen(listen)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerA.Close()
+
+ listenerB1, err := trB.Listen(avoid)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB1.Close()
+
+ listenerB2, err := trB.Listen(prefer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB2.Close()
+
+ dialOne(t, &trB, listenerA, listenerB2.Addr().(*net.TCPAddr).Port)
+}
+
+func TestV6V4(t *testing.T) {
+ if runtime.GOOS == "darwin" {
+ t.Skip("This test is failing on OSX: https://github.com/libp2p/go-reuseport-transport/issues/40")
+ }
+ testUseFirst(t, loopbackV4, loopbackV4, loopbackV6)
+ testUseFirst(t, loopbackV6, loopbackV6, loopbackV4)
+}
+
+func TestGlobalToGlobal(t *testing.T) {
+ if globalV4 == nil {
+ t.Skip("no globalV4 addresses configured")
+ return
+ }
+ testUseFirst(t, globalV4, globalV4, loopbackV4)
+ testUseFirst(t, globalV6, globalV6, loopbackV6)
+}
+
+func testUseFirst(t *testing.T, _, _, _ ma.Multiaddr) {
+ var trA Transport
+ var trB Transport
+ listenerA, err := trA.Listen(globalV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerA.Close()
+
+ listenerB1, err := trB.Listen(loopbackV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB1.Close()
+
+ // It works (random port)
+ dialOne(t, &trB, listenerA)
+
+ listenerB2, err := trB.Listen(globalV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB2.Close()
+
+ // Uses globalV4 port.
+ dialOne(t, &trB, listenerA, listenerB2.Addr().(*net.TCPAddr).Port)
+
+ // Closing the listener should reset the dialer.
+ listenerB2.Close()
+
+ // It still works.
+ dialOne(t, &trB, listenerA)
+}
+
+func TestDuplicateGlobal(t *testing.T) {
+ if globalV4 == nil {
+ t.Skip("no globalV4 addresses configured")
+ return
+ }
+
+ var trA Transport
+ var trB Transport
+ listenerA, err := trA.Listen(globalV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerA.Close()
+
+ listenerB1, err := trB.Listen(globalV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB1.Close()
+
+ listenerB2, err := trB.Listen(globalV4)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer listenerB2.Close()
+
+ // Check which port we're using
+ port := dialOne(t, &trB, listenerA)
+
+ // Check consistency
+ for i := 0; i < 10; i++ {
+ dialOne(t, &trB, listenerA, port)
+ }
+}
diff --git a/p2p/net/swarm/black_hole_detector.go b/p2p/net/swarm/black_hole_detector.go
new file mode 100644
index 0000000000..0e006b9c73
--- /dev/null
+++ b/p2p/net/swarm/black_hole_detector.go
@@ -0,0 +1,271 @@
+package swarm
+
+import (
+ "fmt"
+ "sync"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type BlackHoleState int
+
+const (
+ blackHoleStateProbing BlackHoleState = iota
+ blackHoleStateAllowed
+ blackHoleStateBlocked
+)
+
+func (st BlackHoleState) String() string {
+ switch st {
+ case blackHoleStateProbing:
+ return "Probing"
+ case blackHoleStateAllowed:
+ return "Allowed"
+ case blackHoleStateBlocked:
+ return "Blocked"
+ default:
+ return fmt.Sprintf("Unknown %d", st)
+ }
+}
+
+// BlackHoleSuccessCounter provides black hole filtering for dials. This filter should be used in concert
+// with a UDP or IPv6 address filter to detect UDP or IPv6 black hole. In a black holed environment,
+// dial requests are refused Requests are blocked if the number of successes in the last N dials is
+// less than MinSuccesses.
+// If a request succeeds in Blocked state, the filter state is reset and N subsequent requests are
+// allowed before reevaluating black hole state. Dials cancelled when some other concurrent dial
+// succeeded are counted as failures. A sufficiently large N prevents false negatives in such cases.
+type BlackHoleSuccessCounter struct {
+ // N is
+ // 1. The minimum number of completed dials required before evaluating black hole state
+ // 2. the minimum number of requests after which we probe the state of the black hole in
+ // blocked state
+ N int
+ // MinSuccesses is the minimum number of Success required in the last n dials
+ // to consider we are not blocked.
+ MinSuccesses int
+ // Name for the detector.
+ Name string
+
+ mu sync.Mutex
+ // requests counts number of dial requests to peers. We handle request at a peer
+ // level and record results at individual address dial level.
+ requests int
+ // dialResults of the last `n` dials. A successful dial is true.
+ dialResults []bool
+ // successes is the count of successful dials in outcomes
+ successes int
+ // state is the current state of the detector
+ state BlackHoleState
+}
+
+// RecordResult records the outcome of a dial. A successful dial in Blocked state will change the
+// state of the filter to Probing. A failed dial only blocks subsequent requests if the success
+// fraction over the last n outcomes is less than the minSuccessFraction of the filter.
+func (b *BlackHoleSuccessCounter) RecordResult(success bool) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if b.state == blackHoleStateBlocked && success {
+ // If the call succeeds in a blocked state we reset to allowed.
+ // This is better than slowly accumulating values till we cross the minSuccessFraction
+ // threshold since a black hole is a binary property.
+ b.reset()
+ return
+ }
+
+ if success {
+ b.successes++
+ }
+ b.dialResults = append(b.dialResults, success)
+
+ if len(b.dialResults) > b.N {
+ if b.dialResults[0] {
+ b.successes--
+ }
+ b.dialResults = b.dialResults[1:]
+ }
+
+ b.updateState()
+}
+
+// HandleRequest returns the result of applying the black hole filter for the request.
+func (b *BlackHoleSuccessCounter) HandleRequest() BlackHoleState {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ b.requests++
+
+ if b.state == blackHoleStateAllowed {
+ return blackHoleStateAllowed
+ } else if b.state == blackHoleStateProbing || b.requests%b.N == 0 {
+ return blackHoleStateProbing
+ } else {
+ return blackHoleStateBlocked
+ }
+}
+
+func (b *BlackHoleSuccessCounter) reset() {
+ b.successes = 0
+ b.dialResults = b.dialResults[:0]
+ b.requests = 0
+ b.updateState()
+}
+
+func (b *BlackHoleSuccessCounter) updateState() {
+ st := b.state
+
+ if len(b.dialResults) < b.N {
+ b.state = blackHoleStateProbing
+ } else if b.successes >= b.MinSuccesses {
+ b.state = blackHoleStateAllowed
+ } else {
+ b.state = blackHoleStateBlocked
+ }
+
+ if st != b.state {
+ log.Debug("blackHoleDetector state changed", "name", b.Name, "from", st, "to", b.state)
+ }
+}
+
+func (b *BlackHoleSuccessCounter) State() BlackHoleState {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ return b.state
+}
+
+type blackHoleInfo struct {
+ name string
+ state BlackHoleState
+ nextProbeAfter int
+ successFraction float64
+}
+
+func (b *BlackHoleSuccessCounter) info() blackHoleInfo {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ nextProbeAfter := 0
+ if b.state == blackHoleStateBlocked {
+ nextProbeAfter = b.N - (b.requests % b.N)
+ }
+
+ successFraction := 0.0
+ if len(b.dialResults) > 0 {
+ successFraction = float64(b.successes) / float64(len(b.dialResults))
+ }
+
+ return blackHoleInfo{
+ name: b.Name,
+ state: b.state,
+ nextProbeAfter: nextProbeAfter,
+ successFraction: successFraction,
+ }
+}
+
+// blackHoleDetector provides UDP and IPv6 black hole detection using a `BlackHoleSuccessCounter` for each.
+// For details of the black hole detection logic see `BlackHoleSuccessCounter`.
+// In Read Only mode, detector doesn't update the state of underlying filters and refuses requests
+// when black hole state is unknown. This is useful for Swarms made specifically for services like
+// AutoNAT where we care about accurately reporting the reachability of a peer.
+//
+// Black hole filtering is done at a peer dial level to ensure that periodic probes to detect change
+// of the black hole state are actually dialed and are not skipped because of dial prioritisation
+// logic.
+type blackHoleDetector struct {
+ udp, ipv6 *BlackHoleSuccessCounter
+ mt MetricsTracer
+ readOnly bool
+}
+
+// FilterAddrs filters the peer's addresses removing black holed addresses
+func (d *blackHoleDetector) FilterAddrs(addrs []ma.Multiaddr) (valid []ma.Multiaddr, blackHoled []ma.Multiaddr) {
+ hasUDP, hasIPv6 := false, false
+ for _, a := range addrs {
+ if !manet.IsPublicAddr(a) {
+ continue
+ }
+ if isProtocolAddr(a, ma.P_UDP) {
+ hasUDP = true
+ }
+ if isProtocolAddr(a, ma.P_IP6) {
+ hasIPv6 = true
+ }
+ }
+
+ udpRes := blackHoleStateAllowed
+ if d.udp != nil && hasUDP {
+ udpRes = d.getFilterState(d.udp)
+ d.trackMetrics(d.udp)
+ }
+
+ ipv6Res := blackHoleStateAllowed
+ if d.ipv6 != nil && hasIPv6 {
+ ipv6Res = d.getFilterState(d.ipv6)
+ d.trackMetrics(d.ipv6)
+ }
+
+ blackHoled = make([]ma.Multiaddr, 0, len(addrs))
+ return ma.FilterAddrs(
+ addrs,
+ func(a ma.Multiaddr) bool {
+ if !manet.IsPublicAddr(a) {
+ return true
+ }
+ // allow all UDP addresses while probing irrespective of IPv6 black hole state
+ if udpRes == blackHoleStateProbing && isProtocolAddr(a, ma.P_UDP) {
+ return true
+ }
+ // allow all IPv6 addresses while probing irrespective of UDP black hole state
+ if ipv6Res == blackHoleStateProbing && isProtocolAddr(a, ma.P_IP6) {
+ return true
+ }
+
+ if udpRes == blackHoleStateBlocked && isProtocolAddr(a, ma.P_UDP) {
+ blackHoled = append(blackHoled, a)
+ return false
+ }
+ if ipv6Res == blackHoleStateBlocked && isProtocolAddr(a, ma.P_IP6) {
+ blackHoled = append(blackHoled, a)
+ return false
+ }
+ return true
+ },
+ ), blackHoled
+}
+
+// RecordResult updates the state of the relevant BlackHoleSuccessCounters for addr
+func (d *blackHoleDetector) RecordResult(addr ma.Multiaddr, success bool) {
+ if d.readOnly || !manet.IsPublicAddr(addr) {
+ return
+ }
+ if d.udp != nil && isProtocolAddr(addr, ma.P_UDP) {
+ d.udp.RecordResult(success)
+ d.trackMetrics(d.udp)
+ }
+ if d.ipv6 != nil && isProtocolAddr(addr, ma.P_IP6) {
+ d.ipv6.RecordResult(success)
+ d.trackMetrics(d.ipv6)
+ }
+}
+
+func (d *blackHoleDetector) getFilterState(f *BlackHoleSuccessCounter) BlackHoleState {
+ if d.readOnly {
+ if f.State() != blackHoleStateAllowed {
+ return blackHoleStateBlocked
+ }
+ return blackHoleStateAllowed
+ }
+ return f.HandleRequest()
+}
+
+func (d *blackHoleDetector) trackMetrics(f *BlackHoleSuccessCounter) {
+ if d.readOnly || d.mt == nil {
+ return
+ }
+ // Track metrics only in non readOnly state
+ info := f.info()
+ d.mt.UpdatedBlackHoleSuccessCounter(info.name, info.state, info.nextProbeAfter, info.successFraction)
+}
diff --git a/p2p/net/swarm/black_hole_detector_test.go b/p2p/net/swarm/black_hole_detector_test.go
new file mode 100644
index 0000000000..1d59eb544c
--- /dev/null
+++ b/p2p/net/swarm/black_hole_detector_test.go
@@ -0,0 +1,244 @@
+package swarm
+
+import (
+ "fmt"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBlackHoleSuccessCounterReset(t *testing.T) {
+ n := 10
+ bhf := &BlackHoleSuccessCounter{N: n, MinSuccesses: 2, Name: "test"}
+ // calls up to n should be probing
+ for i := 1; i <= n; i++ {
+ if bhf.HandleRequest() != blackHoleStateProbing {
+ t.Fatalf("expected calls up to n to be probes")
+ }
+ if bhf.State() != blackHoleStateProbing {
+ t.Fatalf("expected state to be probing got %s", bhf.State())
+ }
+ bhf.RecordResult(false)
+ }
+
+ // after threshold calls every nth call should be a probe
+ for i := n + 1; i < 42; i++ {
+ result := bhf.HandleRequest()
+ if (i%n == 0 && result != blackHoleStateProbing) || (i%n != 0 && result != blackHoleStateBlocked) {
+ t.Fatalf("expected every nth dial to be a probe")
+ }
+ if bhf.State() != blackHoleStateBlocked {
+ t.Fatalf("expected state to be blocked, got %s", bhf.State())
+ }
+ }
+
+ bhf.RecordResult(true)
+ // check if calls up to n are probes again
+ for i := 0; i < n; i++ {
+ if bhf.HandleRequest() != blackHoleStateProbing {
+ t.Fatalf("expected black hole detector state to reset after success")
+ }
+ if bhf.State() != blackHoleStateProbing {
+ t.Fatalf("expected state to be probing got %s", bhf.State())
+ }
+ bhf.RecordResult(false)
+ }
+
+ // next call should be blocked
+ if bhf.HandleRequest() != blackHoleStateBlocked {
+ t.Fatalf("expected dial to be blocked")
+ if bhf.State() != blackHoleStateBlocked {
+ t.Fatalf("expected state to be blocked, got %s", bhf.State())
+ }
+ }
+}
+
+func TestBlackHoleSuccessCounterSuccessFraction(t *testing.T) {
+ n := 10
+ tests := []struct {
+ minSuccesses, successes int
+ result BlackHoleState
+ }{
+ {minSuccesses: 5, successes: 5, result: blackHoleStateAllowed},
+ {minSuccesses: 3, successes: 3, result: blackHoleStateAllowed},
+ {minSuccesses: 5, successes: 4, result: blackHoleStateBlocked},
+ {minSuccesses: 5, successes: 7, result: blackHoleStateAllowed},
+ {minSuccesses: 3, successes: 1, result: blackHoleStateBlocked},
+ {minSuccesses: 0, successes: 0, result: blackHoleStateAllowed},
+ {minSuccesses: 10, successes: 10, result: blackHoleStateAllowed},
+ }
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) {
+ bhf := BlackHoleSuccessCounter{N: n, MinSuccesses: tc.minSuccesses}
+ for i := 0; i < tc.successes; i++ {
+ bhf.RecordResult(true)
+ }
+ for i := 0; i < n-tc.successes; i++ {
+ bhf.RecordResult(false)
+ }
+ got := bhf.HandleRequest()
+ if got != tc.result {
+ t.Fatalf("expected %d got %d", tc.result, got)
+ }
+ })
+ }
+}
+
+func TestBlackHoleDetectorInApplicableAddress(t *testing.T) {
+ udpF := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5}
+ ipv6F := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5}
+ bhd := &blackHoleDetector{udp: udpF, ipv6: ipv6F}
+ addrs := []ma.Multiaddr{
+ ma.StringCast("/ip4/1.2.3.4/tcp/1234"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/1233"),
+ ma.StringCast("/ip6/::1/udp/1234/quic-v1"),
+ ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1"),
+ }
+ for i := 0; i < 1000; i++ {
+ filteredAddrs, _ := bhd.FilterAddrs(addrs)
+ require.ElementsMatch(t, addrs, filteredAddrs)
+ for j := 0; j < len(addrs); j++ {
+ bhd.RecordResult(addrs[j], false)
+ }
+ }
+}
+
+func TestBlackHoleDetectorUDPDisabled(t *testing.T) {
+ ipv6F := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5}
+ bhd := &blackHoleDetector{ipv6: ipv6F}
+ publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1")
+ privAddr := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1")
+ for i := 0; i < 100; i++ {
+ bhd.RecordResult(publicAddr, false)
+ }
+ wantAddrs := []ma.Multiaddr{publicAddr, privAddr}
+ wantRemovedAddrs := make([]ma.Multiaddr, 0)
+
+ gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(wantAddrs)
+ require.ElementsMatch(t, wantAddrs, gotAddrs)
+ require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs)
+}
+
+func TestBlackHoleDetectorIPv6Disabled(t *testing.T) {
+ udpF := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5}
+ bhd := &blackHoleDetector{udp: udpF}
+ publicAddr := ma.StringCast("/ip6/2001::1/tcp/1234")
+ privAddr := ma.StringCast("/ip6/::1/tcp/1234")
+ for i := 0; i < 100; i++ {
+ bhd.RecordResult(publicAddr, false)
+ }
+
+ wantAddrs := []ma.Multiaddr{publicAddr, privAddr}
+ wantRemovedAddrs := make([]ma.Multiaddr, 0)
+
+ gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(wantAddrs)
+ require.ElementsMatch(t, wantAddrs, gotAddrs)
+ require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs)
+}
+
+func TestBlackHoleDetectorProbes(t *testing.T) {
+ bhd := &blackHoleDetector{
+ udp: &BlackHoleSuccessCounter{N: 2, MinSuccesses: 1, Name: "udp"},
+ ipv6: &BlackHoleSuccessCounter{N: 3, MinSuccesses: 1, Name: "ipv6"},
+ }
+ udp6Addr := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1")
+ addrs := []ma.Multiaddr{udp6Addr}
+ for i := 0; i < 3; i++ {
+ bhd.RecordResult(udp6Addr, false)
+ }
+ for i := 1; i < 100; i++ {
+ filteredAddrs, _ := bhd.FilterAddrs(addrs)
+ if i%2 == 0 || i%3 == 0 {
+ if len(filteredAddrs) == 0 {
+ t.Fatalf("expected probe to be allowed irrespective of the state of other black hole filter")
+ }
+ } else {
+ if len(filteredAddrs) != 0 {
+ t.Fatalf("expected dial to be blocked %s", filteredAddrs)
+ }
+ }
+ }
+
+}
+
+func TestBlackHoleDetectorAddrFiltering(t *testing.T) {
+ udp6Pub := ma.StringCast("/ip6/2001::1/udp/1234/quic-v1")
+ udp6Pri := ma.StringCast("/ip6/::1/udp/1234/quic-v1")
+ udp4Pub := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1")
+ udp4Pri := ma.StringCast("/ip4/192.168.1.5/udp/1234/quic-v1")
+ tcp6Pub := ma.StringCast("/ip6/2001::1/tcp/1234/quic-v1")
+ tcp6Pri := ma.StringCast("/ip6/::1/tcp/1234/quic-v1")
+ tcp4Pub := ma.StringCast("/ip4/1.2.3.4/tcp/1234/quic-v1")
+ tcp4Pri := ma.StringCast("/ip4/192.168.1.5/tcp/1234/quic-v1")
+
+ makeBHD := func(udpBlocked, ipv6Blocked bool) *blackHoleDetector {
+ bhd := &blackHoleDetector{
+ udp: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "udp"},
+ ipv6: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 10, Name: "ipv6"},
+ }
+ for i := 0; i < 100; i++ {
+ bhd.RecordResult(udp4Pub, !udpBlocked)
+ }
+ for i := 0; i < 100; i++ {
+ bhd.RecordResult(tcp6Pub, !ipv6Blocked)
+ }
+ return bhd
+ }
+
+ allInput := []ma.Multiaddr{udp6Pub, udp6Pri, udp4Pub, udp4Pri, tcp6Pub, tcp6Pri,
+ tcp4Pub, tcp4Pri}
+
+ udpBlockedOutput := []ma.Multiaddr{udp6Pri, udp4Pri, tcp6Pub, tcp6Pri, tcp4Pub, tcp4Pri}
+ udpPublicAddrs := []ma.Multiaddr{udp6Pub, udp4Pub}
+ bhd := makeBHD(true, false)
+ gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(allInput)
+ require.ElementsMatch(t, udpBlockedOutput, gotAddrs)
+ require.ElementsMatch(t, udpPublicAddrs, gotRemovedAddrs)
+
+ ip6BlockedOutput := []ma.Multiaddr{udp6Pri, udp4Pub, udp4Pri, tcp6Pri, tcp4Pub, tcp4Pri}
+ ip6PublicAddrs := []ma.Multiaddr{udp6Pub, tcp6Pub}
+ bhd = makeBHD(false, true)
+ gotAddrs, gotRemovedAddrs = bhd.FilterAddrs(allInput)
+ require.ElementsMatch(t, ip6BlockedOutput, gotAddrs)
+ require.ElementsMatch(t, ip6PublicAddrs, gotRemovedAddrs)
+
+ bothBlockedOutput := []ma.Multiaddr{udp6Pri, udp4Pri, tcp6Pri, tcp4Pub, tcp4Pri}
+ bothPublicAddrs := []ma.Multiaddr{udp6Pub, tcp6Pub, udp4Pub}
+ bhd = makeBHD(true, true)
+ gotAddrs, gotRemovedAddrs = bhd.FilterAddrs(allInput)
+ require.ElementsMatch(t, bothBlockedOutput, gotAddrs)
+ require.ElementsMatch(t, bothPublicAddrs, gotRemovedAddrs)
+}
+
+func TestBlackHoleDetectorReadOnlyMode(t *testing.T) {
+ udpF := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5}
+ ipv6F := &BlackHoleSuccessCounter{N: 10, MinSuccesses: 5}
+ bhd := &blackHoleDetector{udp: udpF, ipv6: ipv6F, readOnly: true}
+ publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1")
+ privAddr := ma.StringCast("/ip6/::1/tcp/1234")
+ for i := 0; i < 100; i++ {
+ bhd.RecordResult(publicAddr, true)
+ }
+ allAddr := []ma.Multiaddr{privAddr, publicAddr}
+ // public addr filtered because state is probing
+ wantAddrs := []ma.Multiaddr{privAddr}
+ wantRemovedAddrs := []ma.Multiaddr{publicAddr}
+
+ gotAddrs, gotRemovedAddrs := bhd.FilterAddrs(allAddr)
+ require.ElementsMatch(t, wantAddrs, gotAddrs)
+ require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs)
+
+ // a non readonly shared state black hole detector
+ nbhd := &blackHoleDetector{udp: bhd.udp, ipv6: bhd.ipv6, readOnly: false}
+ for i := 0; i < 100; i++ {
+ nbhd.RecordResult(publicAddr, true)
+ }
+ // no addresses filtered because state is allowed
+ wantAddrs = []ma.Multiaddr{privAddr, publicAddr}
+ wantRemovedAddrs = []ma.Multiaddr{}
+
+ gotAddrs, gotRemovedAddrs = bhd.FilterAddrs(allAddr)
+ require.ElementsMatch(t, wantAddrs, gotAddrs)
+ require.ElementsMatch(t, wantRemovedAddrs, gotRemovedAddrs)
+}
diff --git a/p2p/net/swarm/clock.go b/p2p/net/swarm/clock.go
new file mode 100644
index 0000000000..6b63ac9c87
--- /dev/null
+++ b/p2p/net/swarm/clock.go
@@ -0,0 +1,49 @@
+package swarm
+
+import "time"
+
+// InstantTimer is a timer that triggers at some instant rather than some duration
+type InstantTimer interface {
+ Reset(d time.Time) bool
+ Stop() bool
+ Ch() <-chan time.Time
+}
+
+// Clock is a clock that can create timers that trigger at some
+// instant rather than some duration
+type Clock interface {
+ Now() time.Time
+ Since(t time.Time) time.Duration
+ InstantTimer(when time.Time) InstantTimer
+}
+
+type RealTimer struct{ t *time.Timer }
+
+var _ InstantTimer = (*RealTimer)(nil)
+
+func (t RealTimer) Ch() <-chan time.Time {
+ return t.t.C
+}
+
+func (t RealTimer) Reset(d time.Time) bool {
+ return t.t.Reset(time.Until(d))
+}
+
+func (t RealTimer) Stop() bool {
+ return t.t.Stop()
+}
+
+type RealClock struct{}
+
+var _ Clock = RealClock{}
+
+func (RealClock) Now() time.Time {
+ return time.Now()
+}
+func (RealClock) Since(t time.Time) time.Duration {
+ return time.Since(t)
+}
+func (RealClock) InstantTimer(when time.Time) InstantTimer {
+ t := time.NewTimer(time.Until(when))
+ return &RealTimer{t}
+}
diff --git a/p2p/net/swarm/connectedness_event_emitter.go b/p2p/net/swarm/connectedness_event_emitter.go
new file mode 100644
index 0000000000..07db583fc9
--- /dev/null
+++ b/p2p/net/swarm/connectedness_event_emitter.go
@@ -0,0 +1,143 @@
+package swarm
+
+import (
+ "context"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// connectednessEventEmitter emits PeerConnectednessChanged events.
+// We ensure that for any peer we connected to we always sent atleast 1 NotConnected Event after
+// the peer disconnects. This is because peers can observe a connection before they are notified
+// of the connection by a peer connectedness changed event.
+type connectednessEventEmitter struct {
+ mx sync.RWMutex
+ // newConns is the channel that holds the peerIDs we recently connected to
+ newConns chan peer.ID
+ removeConnsMx sync.Mutex
+ // removeConns is a slice of peerIDs we have recently closed connections to
+ removeConns []peer.ID
+ // lastEvent is the last connectedness event sent for a particular peer.
+ lastEvent map[peer.ID]network.Connectedness
+ // connectedness is the function that gives the peers current connectedness state
+ connectedness func(peer.ID) network.Connectedness
+ // emitter is the PeerConnectednessChanged event emitter
+ emitter event.Emitter
+ wg sync.WaitGroup
+ removeConnNotif chan struct{}
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func newConnectednessEventEmitter(connectedness func(peer.ID) network.Connectedness, emitter event.Emitter) *connectednessEventEmitter {
+ ctx, cancel := context.WithCancel(context.Background())
+ c := &connectednessEventEmitter{
+ newConns: make(chan peer.ID, 32),
+ lastEvent: make(map[peer.ID]network.Connectedness),
+ removeConnNotif: make(chan struct{}, 1),
+ connectedness: connectedness,
+ emitter: emitter,
+ ctx: ctx,
+ cancel: cancel,
+ }
+ c.wg.Add(1)
+ go c.runEmitter()
+ return c
+}
+
+func (c *connectednessEventEmitter) AddConn(p peer.ID) {
+ c.mx.RLock()
+ defer c.mx.RUnlock()
+ if c.ctx.Err() != nil {
+ return
+ }
+
+ c.newConns <- p
+}
+
+func (c *connectednessEventEmitter) RemoveConn(p peer.ID) {
+ c.mx.RLock()
+ defer c.mx.RUnlock()
+ if c.ctx.Err() != nil {
+ return
+ }
+
+ c.removeConnsMx.Lock()
+ // This queue is roughly bounded by the total number of added connections we
+ // have. If consumers of connectedness events are slow, we apply
+ // backpressure to AddConn operations.
+ //
+ // We purposefully don't block/backpressure here to avoid deadlocks, since it's
+ // reasonable for a consumer of the event to want to remove a connection.
+ c.removeConns = append(c.removeConns, p)
+
+ c.removeConnsMx.Unlock()
+
+ select {
+ case c.removeConnNotif <- struct{}{}:
+ default:
+ }
+}
+
+func (c *connectednessEventEmitter) Close() {
+ c.cancel()
+ c.wg.Wait()
+}
+
+func (c *connectednessEventEmitter) runEmitter() {
+ defer c.wg.Done()
+ for {
+ select {
+ case p := <-c.newConns:
+ c.notifyPeer(p, true)
+ case <-c.removeConnNotif:
+ c.sendConnRemovedNotifications()
+ case <-c.ctx.Done():
+ c.mx.Lock() // Wait for all pending AddConn & RemoveConn operations to complete
+ defer c.mx.Unlock()
+ for {
+ select {
+ case p := <-c.newConns:
+ c.notifyPeer(p, true)
+ case <-c.removeConnNotif:
+ c.sendConnRemovedNotifications()
+ default:
+ return
+ }
+ }
+ }
+ }
+}
+
+// notifyPeer sends the peer connectedness event using the emitter.
+// Use forceNotConnectedEvent = true to send a NotConnected event even if
+// no Connected event was sent for this peer.
+// In case a peer is disconnected before we sent the Connected event, we still
+// send the Disconnected event because a connection to the peer can be observed
+// in such cases.
+func (c *connectednessEventEmitter) notifyPeer(p peer.ID, forceNotConnectedEvent bool) {
+ oldState := c.lastEvent[p]
+ c.lastEvent[p] = c.connectedness(p)
+ if c.lastEvent[p] == network.NotConnected {
+ delete(c.lastEvent, p)
+ }
+ if (forceNotConnectedEvent && c.lastEvent[p] == network.NotConnected) || c.lastEvent[p] != oldState {
+ c.emitter.Emit(event.EvtPeerConnectednessChanged{
+ Peer: p,
+ Connectedness: c.lastEvent[p],
+ })
+ }
+}
+
+func (c *connectednessEventEmitter) sendConnRemovedNotifications() {
+ c.removeConnsMx.Lock()
+ removeConns := c.removeConns
+ c.removeConns = nil
+ c.removeConnsMx.Unlock()
+ for _, p := range removeConns {
+ c.notifyPeer(p, false)
+ }
+}
diff --git a/p2p/net/swarm/dial_error.go b/p2p/net/swarm/dial_error.go
new file mode 100644
index 0000000000..faa1047d54
--- /dev/null
+++ b/p2p/net/swarm/dial_error.go
@@ -0,0 +1,82 @@
+package swarm
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// maxDialDialErrors is the maximum number of dial errors we record
+const maxDialDialErrors = 16
+
+// DialError is the error type returned when dialing.
+type DialError struct {
+ Peer peer.ID
+ DialErrors []TransportError
+ Cause error
+ Skipped int
+}
+
+func (e *DialError) Timeout() bool {
+ return os.IsTimeout(e.Cause)
+}
+
+func (e *DialError) recordErr(addr ma.Multiaddr, err error) {
+ if len(e.DialErrors) >= maxDialDialErrors {
+ e.Skipped++
+ return
+ }
+ e.DialErrors = append(e.DialErrors, TransportError{Address: addr, Cause: err})
+}
+
+func (e *DialError) Error() string {
+ var builder strings.Builder
+ fmt.Fprintf(&builder, "failed to dial %s:", e.Peer)
+ if e.Cause != nil {
+ fmt.Fprintf(&builder, " %s", e.Cause)
+ }
+ for _, te := range e.DialErrors {
+ fmt.Fprintf(&builder, "\n * [%s] %s", te.Address, te.Cause)
+ }
+ if e.Skipped > 0 {
+ fmt.Fprintf(&builder, "\n ... skipping %d errors ...", e.Skipped)
+ }
+ return builder.String()
+}
+
+func (e *DialError) Unwrap() []error {
+ if e == nil {
+ return nil
+ }
+
+ errs := make([]error, 0, len(e.DialErrors)+1)
+ if e.Cause != nil {
+ errs = append(errs, e.Cause)
+ }
+ for i := 0; i < len(e.DialErrors); i++ {
+ errs = append(errs, &e.DialErrors[i])
+ }
+ return errs
+}
+
+var _ error = (*DialError)(nil)
+
+// TransportError is the error returned when dialing a specific address.
+type TransportError struct {
+ Address ma.Multiaddr
+ Cause error
+}
+
+func (e *TransportError) Error() string {
+ return fmt.Sprintf("failed to dial %s: %s", e.Address, e.Cause)
+}
+
+func (e *TransportError) Unwrap() error {
+ return e.Cause
+}
+
+var _ error = (*TransportError)(nil)
diff --git a/p2p/net/swarm/dial_error_test.go b/p2p/net/swarm/dial_error_test.go
new file mode 100644
index 0000000000..3231d6f010
--- /dev/null
+++ b/p2p/net/swarm/dial_error_test.go
@@ -0,0 +1,51 @@
+package swarm
+
+import (
+ "net"
+ "os"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTransportError(t *testing.T) {
+ aa := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
+ te := &TransportError{Address: aa, Cause: ErrDialBackoff}
+ require.ErrorIs(t, te, ErrDialBackoff, "TransportError should implement Unwrap")
+}
+
+func TestDialError(t *testing.T) {
+ de := &DialError{Peer: "pid", Cause: ErrGaterDisallowedConnection}
+ require.ErrorIs(t, de, ErrGaterDisallowedConnection,
+ "DialError Unwrap should handle DialError.Cause")
+ require.ErrorIs(t, de, de, "DialError Unwrap should handle match to self")
+
+ aa := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
+ ab := ma.StringCast("/ip6/1::1/udp/1234/quic-v1")
+ de = &DialError{
+ Peer: "pid",
+ DialErrors: []TransportError{
+ {Address: aa, Cause: ErrDialBackoff}, {Address: ab, Cause: ErrNoTransport},
+ },
+ }
+ require.ErrorIs(t, de, ErrDialBackoff, "DialError.Unwrap should traverse TransportErrors")
+ require.ErrorIs(t, de, ErrNoTransport, "DialError.Unwrap should traverse TransportErrors")
+
+ de = &DialError{
+ Peer: "pid",
+ DialErrors: []TransportError{{Address: ab, Cause: ErrNoTransport},
+ // wrapped error 2 levels deep
+ {Address: aa, Cause: &net.OpError{
+ Op: "write",
+ Net: "tcp",
+ Err: &os.SyscallError{
+ Syscall: "connect",
+ Err: os.ErrPermission,
+ },
+ }},
+ },
+ }
+ require.ErrorIs(t, de, os.ErrPermission, "DialError.Unwrap should traverse TransportErrors")
+
+}
diff --git a/p2p/net/swarm/dial_ranker.go b/p2p/net/swarm/dial_ranker.go
new file mode 100644
index 0000000000..154a0344a1
--- /dev/null
+++ b/p2p/net/swarm/dial_ranker.go
@@ -0,0 +1,283 @@
+package swarm
+
+import (
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// The 250ms value is from happy eyeballs RFC 8305. This is a rough estimate of 1 RTT
+const (
+ // duration by which TCP dials are delayed relative to the last QUIC dial
+ PublicTCPDelay = 250 * time.Millisecond
+ PrivateTCPDelay = 30 * time.Millisecond
+
+ // duration by which QUIC dials are delayed relative to previous QUIC dial
+ PublicQUICDelay = 250 * time.Millisecond
+ PrivateQUICDelay = 30 * time.Millisecond
+
+ // RelayDelay is the duration by which relay dials are delayed relative to direct addresses
+ RelayDelay = 500 * time.Millisecond
+
+ // delay for other transport addresses. This will apply to /webrtc-direct.
+ PublicOtherDelay = 1 * time.Second
+ PrivateOtherDelay = 100 * time.Millisecond
+)
+
+// NoDelayDialRanker ranks addresses with no delay. This is useful for simultaneous connect requests.
+func NoDelayDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
+ return getAddrDelay(addrs, 0, 0, 0, 0)
+}
+
+// DefaultDialRanker determines the ranking of outgoing connection attempts.
+//
+// Addresses are grouped into three distinct groups:
+//
+// - private addresses (localhost and local networks (RFC 1918))
+// - public addresses
+// - relay addresses
+//
+// Within each group, the addresses are ranked according to the ranking logic described below.
+// We then dial addresses according to this ranking, with short timeouts applied between dial attempts.
+// This ranking logic dramatically reduces the number of simultaneous dial attempts, while introducing
+// no additional latency in the vast majority of cases.
+//
+// Private and public address groups are dialed in parallel.
+// Dialing relay addresses is delayed by 500 ms, if we have any non-relay alternatives.
+//
+// Within each group (private, public, relay addresses) we apply the following ranking logic:
+//
+// 1. If both IPv6 QUIC and IPv4 QUIC addresses are present, we do a Happy Eyeballs RFC 8305 style ranking.
+// First dial the IPv6 QUIC address with the lowest port. After this we dial the IPv4 QUIC address with
+// the lowest port delayed by 250ms (PublicQUICDelay) for public addresses, and 30ms (PrivateQUICDelay)
+// for local addresses. After this we dial all the rest of the addresses delayed by 250ms (PublicQUICDelay)
+// for public addresses, and 30ms (PrivateQUICDelay) for local addresses.
+// 2. If only one of QUIC IPv6 or QUIC IPv4 addresses are present, dial the QUIC address with the lowest port
+// first. After this we dial the rest of the QUIC addresses delayed by 250ms (PublicQUICDelay) for public
+// addresses, and 30ms (PrivateQUICDelay) for local addresses.
+// 3. If a QUIC or WebTransport address is present, TCP addresses dials are delayed relative to the last QUIC dial:
+// We prefer to end up with a QUIC connection. For public addresses, the delay introduced is 250ms (PublicTCPDelay),
+// and for private addresses 30ms (PrivateTCPDelay).
+// 4. For the TCP addresses we follow a strategy similar to QUIC with an optimisation for handling the long TCP
+// handshake time described in 6. If both IPv6 TCP and IPv4 TCP addresses are present, we do a Happy Eyeballs
+// style ranking. First dial the IPv6 TCP address with the lowest port. After this, dial the IPv4 TCP address
+// with the lowest port delayed by 250ms (PublicTCPDelay) for public addresses, and 30ms (PrivateTCPDelay)
+// for local addresses. After this we dial all the rest of the addresses delayed by 250ms (PublicTCPDelay) for
+// public addresses, and 30ms (PrivateTCPDelay) for local addresses.
+// 5. If only one of TCP IPv6 or TCP IPv4 addresses are present, dial the TCP address with the lowest port
+// first. After this we dial the rest of the TCP addresses delayed by 250ms (PublicTCPDelay) for public
+// addresses, and 30ms (PrivateTCPDelay) for local addresses.
+// 6. When a TCP socket is connected and awaiting security and muxer upgrade, we stop new dials for 2*PublicTCPDelay
+// to allow for the upgrade to complete.
+// 7. WebRTC Direct, and other IP transport addresses are dialed 1 second after the last QUIC or TCP dial.
+// We only ever need to dial these if the peer doesn't have any other transport available, in which
+// case these are dialed immediately.
+//
+// We dial lowest ports first as they are more likely to be the listen port.
+func DefaultDialRanker(addrs []ma.Multiaddr) []network.AddrDelay {
+ relay, addrs := filterAddrs(addrs, isRelayAddr)
+ pvt, addrs := filterAddrs(addrs, manet.IsPrivateAddr)
+ public, addrs := filterAddrs(addrs, func(a ma.Multiaddr) bool { return isProtocolAddr(a, ma.P_IP4) || isProtocolAddr(a, ma.P_IP6) })
+
+ var relayOffset time.Duration
+ if len(public) > 0 {
+ // if there is a public direct address available delay relay dials
+ relayOffset = RelayDelay
+ }
+
+ res := make([]network.AddrDelay, 0, len(addrs))
+ res = append(res, getAddrDelay(pvt, PrivateTCPDelay, PrivateQUICDelay, PrivateOtherDelay, 0)...)
+ res = append(res, getAddrDelay(public, PublicTCPDelay, PublicQUICDelay, PublicOtherDelay, 0)...)
+ res = append(res, getAddrDelay(relay, PublicTCPDelay, PublicQUICDelay, PublicOtherDelay, relayOffset)...)
+ var maxDelay time.Duration
+ if len(res) > 0 {
+ maxDelay = res[len(res)-1].Delay
+ }
+
+ for i := 0; i < len(addrs); i++ {
+ res = append(res, network.AddrDelay{Addr: addrs[i], Delay: maxDelay + PublicOtherDelay})
+ }
+
+ return res
+}
+
+// getAddrDelay ranks a group of addresses according to the ranking logic explained in
+// documentation for defaultDialRanker.
+// offset is used to delay all addresses by a fixed duration. This is useful for delaying all relay
+// addresses relative to direct addresses.
+func getAddrDelay(addrs []ma.Multiaddr, tcpDelay time.Duration, quicDelay time.Duration,
+ otherDelay time.Duration, offset time.Duration) []network.AddrDelay {
+ if len(addrs) == 0 {
+ return nil
+ }
+
+ sort.Slice(addrs, func(i, j int) bool { return score(addrs[i]) < score(addrs[j]) })
+
+ // addrs is now sorted by (Transport, IPVersion). Reorder addrs for happy eyeballs dialing.
+ // For QUIC and TCP, if we have both IPv6 and IPv4 addresses, move the
+ // highest priority IPv4 address to the second position.
+ happyEyeballsQUIC := false
+ happyEyeballsTCP := false
+ // tcpStartIdx is the index of the first TCP Address
+ var tcpStartIdx int
+ {
+ i := 0
+ // If the first QUIC address is IPv6 move the first QUIC IPv4 address to second position
+ if isQUICAddr(addrs[0]) && isProtocolAddr(addrs[0], ma.P_IP6) {
+ for j := 1; j < len(addrs); j++ {
+ if isQUICAddr(addrs[j]) && isProtocolAddr(addrs[j], ma.P_IP4) {
+ // The first IPv4 address is at position j
+ // Move the jth element at position 1 shifting the affected elements
+ if j > 1 {
+ a := addrs[j]
+ copy(addrs[2:], addrs[1:j])
+ addrs[1] = a
+ }
+ happyEyeballsQUIC = true
+ i = j + 1
+ break
+ }
+ }
+ }
+
+ for tcpStartIdx = i; tcpStartIdx < len(addrs); tcpStartIdx++ {
+ if isProtocolAddr(addrs[tcpStartIdx], ma.P_TCP) {
+ break
+ }
+ }
+
+ // If the first TCP address is IPv6 move the first TCP IPv4 address to second position
+ if tcpStartIdx < len(addrs) && isProtocolAddr(addrs[tcpStartIdx], ma.P_IP6) {
+ for j := tcpStartIdx + 1; j < len(addrs); j++ {
+ if isProtocolAddr(addrs[j], ma.P_TCP) && isProtocolAddr(addrs[j], ma.P_IP4) {
+ // First TCP IPv4 address is at position j, move it to position tcpStartIdx+1
+ // which is the second priority TCP address
+ if j > tcpStartIdx+1 {
+ a := addrs[j]
+ copy(addrs[tcpStartIdx+2:], addrs[tcpStartIdx+1:j])
+ addrs[tcpStartIdx+1] = a
+ }
+ happyEyeballsTCP = true
+ break
+ }
+ }
+ }
+ }
+
+ res := make([]network.AddrDelay, 0, len(addrs))
+ var tcpFirstDialDelay time.Duration
+ var lastQUICOrTCPDelay time.Duration
+ for i, addr := range addrs {
+ var delay time.Duration
+ switch {
+ case isQUICAddr(addr):
+ // We dial an IPv6 address, then after quicDelay an IPv4
+ // address, then after a further quicDelay we dial the rest of the addresses.
+ if i == 1 {
+ delay = quicDelay
+ }
+ if i > 1 {
+ // If we have happy eyeballs for QUIC, dials after the second position
+ // will be delayed by 2*quicDelay
+ if happyEyeballsQUIC {
+ delay = 2 * quicDelay
+ } else {
+ delay = quicDelay
+ }
+ }
+ lastQUICOrTCPDelay = delay
+ tcpFirstDialDelay = delay + tcpDelay
+ case isProtocolAddr(addr, ma.P_TCP):
+ // We dial an IPv6 address, then after tcpDelay an IPv4
+ // address, then after a further tcpDelay we dial the rest of the addresses.
+ if i == tcpStartIdx+1 {
+ delay = tcpDelay
+ }
+ if i > tcpStartIdx+1 {
+ // If we have happy eyeballs for TCP, dials after the second position
+ // will be delayed by 2*tcpDelay
+ if happyEyeballsTCP {
+ delay = 2 * tcpDelay
+ } else {
+ delay = tcpDelay
+ }
+ }
+ delay += tcpFirstDialDelay
+ lastQUICOrTCPDelay = delay
+ // if it's neither quic, webtransport, tcp, or websocket address
+ default:
+ delay = lastQUICOrTCPDelay + otherDelay
+ }
+ res = append(res, network.AddrDelay{Addr: addr, Delay: offset + delay})
+ }
+ return res
+}
+
+// score scores a multiaddress for dialing delay. Lower is better.
+// The lower 16 bits of the result are the port. Low ports are ranked higher because they're
+// more likely to be listen addresses.
+// The addresses are ranked as:
+// QUICv1 IPv6 > QUICdraft29 IPv6 > QUICv1 IPv4 > QUICdraft29 IPv4 >
+// WebTransport IPv6 > WebTransport IPv4 > TCP IPv6 > TCP IPv4
+func score(a ma.Multiaddr) int {
+ ip4Weight := 0
+ if isProtocolAddr(a, ma.P_IP4) {
+ ip4Weight = 1 << 18
+ }
+
+ if _, err := a.ValueForProtocol(ma.P_WEBTRANSPORT); err == nil {
+ p, _ := a.ValueForProtocol(ma.P_UDP)
+ pi, _ := strconv.Atoi(p)
+ return ip4Weight + (1 << 19) + pi
+ }
+ if _, err := a.ValueForProtocol(ma.P_QUIC); err == nil {
+ p, _ := a.ValueForProtocol(ma.P_UDP)
+ pi, _ := strconv.Atoi(p)
+ return ip4Weight + pi + (1 << 17)
+ }
+ if _, err := a.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ p, _ := a.ValueForProtocol(ma.P_UDP)
+ pi, _ := strconv.Atoi(p)
+ return ip4Weight + pi
+ }
+ if p, err := a.ValueForProtocol(ma.P_TCP); err == nil {
+ pi, _ := strconv.Atoi(p)
+ return ip4Weight + pi + (1 << 20)
+ }
+ if _, err := a.ValueForProtocol(ma.P_WEBRTC_DIRECT); err == nil {
+ return 1 << 21
+ }
+ return (1 << 30)
+}
+
+func isProtocolAddr(a ma.Multiaddr, p int) bool {
+ found := false
+ ma.ForEach(a, func(c ma.Component) bool {
+ if c.Protocol().Code == p {
+ found = true
+ return false
+ }
+ return true
+ })
+ return found
+}
+
+func isQUICAddr(a ma.Multiaddr) bool {
+ return isProtocolAddr(a, ma.P_QUIC) || isProtocolAddr(a, ma.P_QUIC_V1)
+}
+
+// filterAddrs filters an address slice in place
+func filterAddrs(addrs []ma.Multiaddr, f func(a ma.Multiaddr) bool) (filtered, rest []ma.Multiaddr) {
+ j := 0
+ for i := 0; i < len(addrs); i++ {
+ if f(addrs[i]) {
+ addrs[i], addrs[j] = addrs[j], addrs[i]
+ j++
+ }
+ }
+ return addrs[:j], addrs[j:]
+}
diff --git a/p2p/net/swarm/dial_ranker_test.go b/p2p/net/swarm/dial_ranker_test.go
new file mode 100644
index 0000000000..2f9a506c08
--- /dev/null
+++ b/p2p/net/swarm/dial_ranker_test.go
@@ -0,0 +1,357 @@
+package swarm
+
+import (
+ "fmt"
+ "sort"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/test"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func sortAddrDelays(addrDelays []network.AddrDelay) {
+ sort.Slice(addrDelays, func(i, j int) bool {
+ if addrDelays[i].Delay == addrDelays[j].Delay {
+ return addrDelays[i].Addr.String() < addrDelays[j].Addr.String()
+ }
+ return addrDelays[i].Delay < addrDelays[j].Delay
+ })
+}
+
+func TestNoDelayDialRanker(t *testing.T) {
+ q1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ wt1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/")
+ q2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ q3 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1")
+ q3v1 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1")
+ q4 := ma.StringCast("/ip4/1.2.3.4/udp/4/quic-v1")
+ t1 := ma.StringCast("/ip4/1.2.3.5/tcp/1/")
+ wrtc1 := ma.StringCast("/ip4/1.1.1.1/udp/1/webrtc-direct")
+
+ testCase := []struct {
+ name string
+ addrs []ma.Multiaddr
+ output []network.AddrDelay
+ }{
+ {
+ name: "quic+webtransport filtered when quicv1",
+ addrs: []ma.Multiaddr{q1, q2, q3, q4, q1v1, q2v1, q3v1, wt1, t1, wrtc1},
+ output: []network.AddrDelay{
+ {Addr: q1, Delay: 0},
+ {Addr: q2, Delay: 0},
+ {Addr: q3, Delay: 0},
+ {Addr: q4, Delay: 0},
+ {Addr: q1v1, Delay: 0},
+ {Addr: q2v1, Delay: 0},
+ {Addr: q3v1, Delay: 0},
+ {Addr: wt1, Delay: 0},
+ {Addr: t1, Delay: 0},
+ {Addr: wrtc1, Delay: 0},
+ },
+ },
+ }
+ for _, tc := range testCase {
+ t.Run(tc.name, func(t *testing.T) {
+ res := NoDelayDialRanker(tc.addrs)
+ if len(res) != len(tc.output) {
+ log.Error("expected output mismatch", "expected", tc.output, "got", res)
+ t.Errorf("expected elems: %d got: %d", len(tc.output), len(res))
+ }
+ sortAddrDelays(res)
+ sortAddrDelays(tc.output)
+ for i := 0; i < len(tc.output); i++ {
+ if !tc.output[i].Addr.Equal(res[i].Addr) || tc.output[i].Delay != res[i].Delay {
+ t.Fatalf("expected %+v got %+v", tc.output, res)
+ }
+ }
+ })
+ }
+}
+
+func TestDelayRankerQUICDelay(t *testing.T) {
+ q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ wt1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/")
+ q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ q3v1 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1")
+
+ q1v16 := ma.StringCast("/ip6/1::2/udp/1/quic-v1")
+ q2v16 := ma.StringCast("/ip6/1::2/udp/2/quic-v1")
+ q3v16 := ma.StringCast("/ip6/1::2/udp/3/quic-v1")
+
+ testCase := []struct {
+ name string
+ addrs []ma.Multiaddr
+ output []network.AddrDelay
+ }{
+ {
+ name: "quic-ipv4",
+ addrs: []ma.Multiaddr{q1v1, q2v1, q3v1},
+ output: []network.AddrDelay{
+ {Addr: q1v1, Delay: 0},
+ {Addr: q2v1, Delay: PublicQUICDelay},
+ {Addr: q3v1, Delay: PublicQUICDelay},
+ },
+ },
+ {
+ name: "quic-ipv6",
+ addrs: []ma.Multiaddr{q1v16, q2v16, q3v16},
+ output: []network.AddrDelay{
+ {Addr: q1v16, Delay: 0},
+ {Addr: q2v16, Delay: PublicQUICDelay},
+ {Addr: q3v16, Delay: PublicQUICDelay},
+ },
+ },
+ {
+ name: "quic-ip4-ip6",
+ addrs: []ma.Multiaddr{q1v16, q2v1},
+ output: []network.AddrDelay{
+ {Addr: q1v16, Delay: 0},
+ {Addr: q2v1, Delay: PublicQUICDelay},
+ },
+ },
+ {
+ name: "quic-quic-v1-webtransport",
+ addrs: []ma.Multiaddr{q1v16, q1v1, q2v1, q3v1, wt1},
+ output: []network.AddrDelay{
+ {Addr: q1v16, Delay: 0},
+ {Addr: q1v1, Delay: PublicQUICDelay},
+ {Addr: q2v1, Delay: 2 * PublicQUICDelay},
+ {Addr: q3v1, Delay: 2 * PublicQUICDelay},
+ {Addr: wt1, Delay: 2 * PublicQUICDelay},
+ },
+ },
+ {
+ name: "wt-ranking",
+ addrs: []ma.Multiaddr{q1v16, q2v16, q3v16, wt1},
+ output: []network.AddrDelay{
+ {Addr: q1v16, Delay: 0},
+ {Addr: wt1, Delay: PublicQUICDelay},
+ {Addr: q2v16, Delay: 2 * PublicQUICDelay},
+ {Addr: q3v16, Delay: 2 * PublicQUICDelay},
+ },
+ },
+ }
+ for _, tc := range testCase {
+ t.Run(tc.name, func(t *testing.T) {
+ res := DefaultDialRanker(tc.addrs)
+ if len(res) != len(tc.output) {
+ log.Error("expected output mismatch", "expected", tc.output, "got", res)
+ t.Errorf("expected elems: %d got: %d", len(tc.output), len(res))
+ }
+ sortAddrDelays(res)
+ sortAddrDelays(tc.output)
+ for i := 0; i < len(tc.output); i++ {
+ if !tc.output[i].Addr.Equal(res[i].Addr) || tc.output[i].Delay != res[i].Delay {
+ t.Fatalf("expected %+v got %+v", tc.output, res)
+ }
+ }
+ })
+ }
+}
+
+func TestDelayRankerTCPDelay(t *testing.T) {
+ q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+
+ q1v16 := ma.StringCast("/ip6/1::2/udp/1/quic-v1")
+ q2v16 := ma.StringCast("/ip6/1::2/udp/2/quic-v1")
+ q3v16 := ma.StringCast("/ip6/1::2/udp/3/quic-v1")
+
+ t1 := ma.StringCast("/ip4/1.2.3.5/tcp/1/")
+ t1v6 := ma.StringCast("/ip6/1::2/tcp/1")
+ t2 := ma.StringCast("/ip4/1.2.3.4/tcp/2")
+ t3 := ma.StringCast("/ip4/1.2.3.4/tcp/3")
+
+ testCase := []struct {
+ name string
+ addrs []ma.Multiaddr
+ output []network.AddrDelay
+ }{
+ {
+ name: "quic-with-tcp-ip6-ip4",
+ addrs: []ma.Multiaddr{q1v1, q1v16, q2v16, q3v16, q2v1, t1, t1v6, t2, t3},
+ output: []network.AddrDelay{
+ {Addr: q1v16, Delay: 0},
+ {Addr: q1v1, Delay: PublicQUICDelay},
+ {Addr: q2v16, Delay: 2 * PublicQUICDelay},
+ {Addr: q3v16, Delay: 2 * PublicQUICDelay},
+ {Addr: q2v1, Delay: 2 * PublicQUICDelay},
+ {Addr: t1v6, Delay: 3 * PublicQUICDelay},
+ {Addr: t1, Delay: 4 * PublicQUICDelay},
+ {Addr: t2, Delay: 5 * PublicQUICDelay},
+ {Addr: t3, Delay: 5 * PublicQUICDelay},
+ },
+ },
+ {
+ name: "quic-ip4-with-tcp",
+ addrs: []ma.Multiaddr{q1v1, t2, t1v6, t1},
+ output: []network.AddrDelay{
+ {Addr: q1v1, Delay: 0},
+ {Addr: t1v6, Delay: PublicQUICDelay},
+ {Addr: t1, Delay: 2 * PublicQUICDelay},
+ {Addr: t2, Delay: 3 * PublicQUICDelay},
+ },
+ },
+ {
+ name: "quic-ip4-with-tcp-ipv4",
+ addrs: []ma.Multiaddr{q1v1, t2, t3, t1},
+ output: []network.AddrDelay{
+ {Addr: q1v1, Delay: 0},
+ {Addr: t1, Delay: PublicTCPDelay},
+ {Addr: t2, Delay: 2 * PublicQUICDelay},
+ {Addr: t3, Delay: 2 * PublicTCPDelay},
+ },
+ },
+ {
+ name: "quic-ip4-with-two-tcp",
+ addrs: []ma.Multiaddr{q1v1, t1v6, t2},
+ output: []network.AddrDelay{
+ {Addr: q1v1, Delay: 0},
+ {Addr: t1v6, Delay: PublicTCPDelay},
+ {Addr: t2, Delay: 2 * PublicTCPDelay},
+ },
+ },
+ {
+ name: "tcp-ip4-ip6",
+ addrs: []ma.Multiaddr{t1, t2, t1v6, t3},
+ output: []network.AddrDelay{
+ {Addr: t1v6, Delay: 0},
+ {Addr: t1, Delay: PublicTCPDelay},
+ {Addr: t2, Delay: 2 * PublicTCPDelay},
+ {Addr: t3, Delay: 2 * PublicTCPDelay},
+ },
+ },
+ {
+ name: "empty",
+ addrs: []ma.Multiaddr{},
+ output: []network.AddrDelay{},
+ },
+ }
+ for _, tc := range testCase {
+ t.Run(tc.name, func(t *testing.T) {
+ res := DefaultDialRanker(tc.addrs)
+ if len(res) != len(tc.output) {
+ log.Error("expected output mismatch", "expected", tc.output, "got", res)
+ t.Errorf("expected elems: %d got: %d", len(tc.output), len(res))
+ }
+ sortAddrDelays(res)
+ sortAddrDelays(tc.output)
+ for i := 0; i < len(tc.output); i++ {
+ if !tc.output[i].Addr.Equal(res[i].Addr) || tc.output[i].Delay != res[i].Delay {
+ t.Fatalf("expected %+v got %+v", tc.output, res)
+ }
+ }
+ })
+ }
+}
+
+func TestDelayRankerRelay(t *testing.T) {
+ q1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ q2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+
+ pid := test.RandPeerIDFatal(t)
+ r1 := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/1/p2p-circuit/p2p/%s", pid))
+ r2 := ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/udp/1/quic/p2p-circuit/p2p/%s", pid))
+
+ testCase := []struct {
+ name string
+ addrs []ma.Multiaddr
+ output []network.AddrDelay
+ }{
+ {
+ name: "relay address delayed",
+ addrs: []ma.Multiaddr{q1, q2, r1, r2},
+ output: []network.AddrDelay{
+ {Addr: q1, Delay: 0},
+ {Addr: q2, Delay: PublicQUICDelay},
+ {Addr: r2, Delay: RelayDelay},
+ {Addr: r1, Delay: PublicTCPDelay + RelayDelay},
+ },
+ },
+ }
+ for _, tc := range testCase {
+ t.Run(tc.name, func(t *testing.T) {
+ res := DefaultDialRanker(tc.addrs)
+ if len(res) != len(tc.output) {
+ log.Error("expected output mismatch", "expected", tc.output, "got", res)
+ t.Errorf("expected elems: %d got: %d", len(tc.output), len(res))
+ }
+ sortAddrDelays(res)
+ sortAddrDelays(tc.output)
+ for i := 0; i < len(tc.output); i++ {
+ if !tc.output[i].Addr.Equal(res[i].Addr) || tc.output[i].Delay != res[i].Delay {
+ t.Fatalf("expected %+v got %+v", tc.output, res)
+ }
+ }
+ })
+ }
+}
+
+func TestDelayRankerOtherTransportDelay(t *testing.T) {
+ q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ q1v16 := ma.StringCast("/ip6/1::2/udp/1/quic-v1")
+ t1 := ma.StringCast("/ip4/1.2.3.5/tcp/1/")
+ t1v6 := ma.StringCast("/ip6/1::2/tcp/1")
+ wrtc1 := ma.StringCast("/ip4/1.2.3.4/udp/1/webrtc-direct")
+ wrtc1v6 := ma.StringCast("/ip6/1::2/udp/1/webrtc-direct")
+ onion1 := ma.StringCast("/onion3/vww6ybal4bd7szmgncyruucpgfkqahzddi37ktceo3ah7ngmcopnpyyd:1234")
+ onlyIP := ma.StringCast("/ip4/1.2.3.4/")
+ testCase := []struct {
+ name string
+ addrs []ma.Multiaddr
+ output []network.AddrDelay
+ }{
+ {
+ name: "quic-with-other",
+ addrs: []ma.Multiaddr{q1v1, q1v16, wrtc1, wrtc1v6, onion1, onlyIP},
+ output: []network.AddrDelay{
+ {Addr: q1v16, Delay: 0},
+ {Addr: q1v1, Delay: PublicQUICDelay},
+ {Addr: wrtc1, Delay: PublicQUICDelay + PublicOtherDelay},
+ {Addr: wrtc1v6, Delay: PublicQUICDelay + PublicOtherDelay},
+ {Addr: onlyIP, Delay: PublicQUICDelay + PublicOtherDelay},
+ {Addr: onion1, Delay: PublicQUICDelay + 2*PublicOtherDelay},
+ },
+ },
+ {
+ name: "quic-and-tcp-with-other",
+ addrs: []ma.Multiaddr{q1v1, t1, t1v6, wrtc1, wrtc1v6, onion1, onlyIP},
+ output: []network.AddrDelay{
+ {Addr: q1v1, Delay: 0},
+ {Addr: t1v6, Delay: PublicQUICDelay},
+ {Addr: t1, Delay: 2 * PublicQUICDelay},
+ {Addr: wrtc1, Delay: 2*PublicQUICDelay + PublicOtherDelay},
+ {Addr: wrtc1v6, Delay: 2*PublicQUICDelay + PublicOtherDelay},
+ {Addr: onlyIP, Delay: 2*PublicQUICDelay + PublicOtherDelay},
+ {Addr: onion1, Delay: 2*PublicQUICDelay + 2*PublicOtherDelay},
+ },
+ },
+ {
+ name: "only-non-ip-addr",
+ addrs: []ma.Multiaddr{onion1},
+ output: []network.AddrDelay{
+ {Addr: onion1, Delay: PublicOtherDelay},
+ },
+ },
+ }
+ for _, tc := range testCase {
+ t.Run(tc.name, func(t *testing.T) {
+ res := DefaultDialRanker(tc.addrs)
+ if len(res) != len(tc.output) {
+ log.Error("expected output mismatch", "expected", tc.output, "got", res)
+ t.Errorf("expected elems: %d got: %d", len(tc.output), len(res))
+ return
+ }
+ sortAddrDelays(res)
+ sortAddrDelays(tc.output)
+ for i := 0; i < len(tc.output); i++ {
+ if !tc.output[i].Addr.Equal(res[i].Addr) || tc.output[i].Delay != res[i].Delay {
+ t.Fatalf("expected %+v got %+v", tc.output, res)
+ }
+ }
+ })
+ }
+}
diff --git a/p2p/net/swarm/dial_sync.go b/p2p/net/swarm/dial_sync.go
new file mode 100644
index 0000000000..3cc8547281
--- /dev/null
+++ b/p2p/net/swarm/dial_sync.go
@@ -0,0 +1,115 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+// dialWorkerFunc is used by dialSync to spawn a new dial worker
+type dialWorkerFunc func(peer.ID, <-chan dialRequest)
+
+// errConcurrentDialSuccessful is used to signal that a concurrent dial succeeded
+var errConcurrentDialSuccessful = errors.New("concurrent dial successful")
+
+// newDialSync constructs a new dialSync
+func newDialSync(worker dialWorkerFunc) *dialSync {
+ return &dialSync{
+ dials: make(map[peer.ID]*activeDial),
+ dialWorker: worker,
+ }
+}
+
+// dialSync is a dial synchronization helper that ensures that at most one dial
+// to any given peer is active at any given time.
+type dialSync struct {
+ mutex sync.Mutex
+ dials map[peer.ID]*activeDial
+ dialWorker dialWorkerFunc
+}
+
+type activeDial struct {
+ refCnt int
+
+ ctx context.Context
+ cancelCause func(error)
+
+ reqch chan dialRequest
+}
+
+func (ad *activeDial) dial(ctx context.Context) (*Conn, error) {
+ dialCtx := ad.ctx
+
+ if forceDirect, reason := network.GetForceDirectDial(ctx); forceDirect {
+ dialCtx = network.WithForceDirectDial(dialCtx, reason)
+ }
+ if simConnect, isClient, reason := network.GetSimultaneousConnect(ctx); simConnect {
+ dialCtx = network.WithSimultaneousConnect(dialCtx, isClient, reason)
+ }
+
+ resch := make(chan dialResponse, 1)
+ select {
+ case ad.reqch <- dialRequest{ctx: dialCtx, resch: resch}:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+
+ select {
+ case res := <-resch:
+ return res.conn, res.err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func (ds *dialSync) getActiveDial(p peer.ID) (*activeDial, error) {
+ ds.mutex.Lock()
+ defer ds.mutex.Unlock()
+
+ actd, ok := ds.dials[p]
+ if !ok {
+ // This code intentionally uses the background context. Otherwise, if the first call
+ // to Dial is canceled, subsequent dial calls will also be canceled.
+ ctx, cancel := context.WithCancelCause(context.Background())
+ actd = &activeDial{
+ ctx: ctx,
+ cancelCause: cancel,
+ reqch: make(chan dialRequest),
+ }
+ go ds.dialWorker(p, actd.reqch)
+ ds.dials[p] = actd
+ }
+ // increase ref count before dropping mutex
+ actd.refCnt++
+ return actd, nil
+}
+
+// Dial initiates a dial to the given peer if there are none in progress
+// then waits for the dial to that peer to complete.
+func (ds *dialSync) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
+ ad, err := ds.getActiveDial(p)
+ if err != nil {
+ return nil, err
+ }
+
+ conn, err := ad.dial(ctx)
+
+ ds.mutex.Lock()
+ defer ds.mutex.Unlock()
+
+ ad.refCnt--
+ if ad.refCnt == 0 {
+ if err == nil {
+ ad.cancelCause(errConcurrentDialSuccessful)
+ } else {
+ ad.cancelCause(err)
+ }
+ close(ad.reqch)
+ delete(ds.dials, p)
+ }
+
+ return conn, err
+}
diff --git a/p2p/net/swarm/dial_sync_test.go b/p2p/net/swarm/dial_sync_test.go
new file mode 100644
index 0000000000..e44e33176e
--- /dev/null
+++ b/p2p/net/swarm/dial_sync_test.go
@@ -0,0 +1,228 @@
+package swarm
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/stretchr/testify/require"
+)
+
+func getMockDialFunc() (dialWorkerFunc, func(), context.Context, <-chan struct{}) {
+ dfcalls := make(chan struct{}, 512) // buffer it large enough that we won't care
+ dialctx, cancel := context.WithCancel(context.Background())
+ ch := make(chan struct{})
+ f := func(_ peer.ID, reqch <-chan dialRequest) {
+ defer cancel()
+ dfcalls <- struct{}{}
+ go func() {
+ for req := range reqch {
+ <-ch
+ req.resch <- dialResponse{conn: new(Conn)}
+ }
+ }()
+ }
+
+ var once sync.Once
+ return f, func() { once.Do(func() { close(ch) }) }, dialctx, dfcalls
+}
+
+func TestBasicDialSync(t *testing.T) {
+ df, done, _, callsch := getMockDialFunc()
+ dsync := newDialSync(df)
+ p := peer.ID("testpeer")
+
+ finished := make(chan struct{}, 2)
+ go func() {
+ if _, err := dsync.Dial(context.Background(), p); err != nil {
+ t.Error(err)
+ }
+ finished <- struct{}{}
+ }()
+
+ go func() {
+ if _, err := dsync.Dial(context.Background(), p); err != nil {
+ t.Error(err)
+ }
+ finished <- struct{}{}
+ }()
+
+ // short sleep just to make sure we've moved around in the scheduler
+ time.Sleep(time.Millisecond * 20)
+ done()
+
+ <-finished
+ <-finished
+
+ if len(callsch) > 1 {
+ t.Fatal("should only have called dial func once!")
+ }
+}
+
+func TestDialSyncCancel(t *testing.T) {
+ df, done, _, dcall := getMockDialFunc()
+
+ dsync := newDialSync(df)
+
+ p := peer.ID("testpeer")
+
+ ctx1, cancel1 := context.WithCancel(context.Background())
+
+ finished := make(chan struct{})
+ go func() {
+ _, err := dsync.Dial(ctx1, p)
+ if err != ctx1.Err() {
+ t.Error("should have gotten context error")
+ }
+ finished <- struct{}{}
+ }()
+
+ // make sure the above makes it through the wait code first
+ select {
+ case <-dcall:
+ case <-time.After(time.Second):
+ t.Fatal("timed out waiting for dial to start")
+ }
+
+ // Add a second dialwait in so two actors are waiting on the same dial
+ go func() {
+ _, err := dsync.Dial(context.Background(), p)
+ if err != nil {
+ t.Error(err)
+ }
+ finished <- struct{}{}
+ }()
+
+ time.Sleep(time.Millisecond * 20)
+
+ // cancel the first dialwait, it should not affect the second at all
+ cancel1()
+ select {
+ case <-finished:
+ case <-time.After(time.Second):
+ t.Fatal("timed out waiting for wait to exit")
+ }
+
+ // short sleep just to make sure we've moved around in the scheduler
+ time.Sleep(time.Millisecond * 20)
+ done()
+
+ <-finished
+}
+
+func TestDialSyncAllCancel(t *testing.T) {
+ df, done, dctx, _ := getMockDialFunc()
+
+ dsync := newDialSync(df)
+ p := peer.ID("testpeer")
+ ctx, cancel := context.WithCancel(context.Background())
+
+ finished := make(chan struct{})
+ go func() {
+ if _, err := dsync.Dial(ctx, p); err != ctx.Err() {
+ t.Error("should have gotten context error")
+ }
+ finished <- struct{}{}
+ }()
+
+ // Add a second dialwait in so two actors are waiting on the same dial
+ go func() {
+ if _, err := dsync.Dial(ctx, p); err != ctx.Err() {
+ t.Error("should have gotten context error")
+ }
+ finished <- struct{}{}
+ }()
+
+ cancel()
+ for i := 0; i < 2; i++ {
+ select {
+ case <-finished:
+ case <-time.After(time.Second):
+ t.Fatal("timed out waiting for wait to exit")
+ }
+ }
+
+ // the dial should have exited now
+ select {
+ case <-dctx.Done():
+ case <-time.After(time.Second):
+ t.Fatal("timed out waiting for dial to return")
+ }
+
+ // should be able to successfully dial that peer again
+ done()
+ if _, err := dsync.Dial(context.Background(), p); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFailFirst(t *testing.T) {
+ var handledFirst atomic.Bool
+ dialErr := fmt.Errorf("gophers ate the modem")
+ f := func(_ peer.ID, reqch <-chan dialRequest) {
+ go func() {
+ for {
+ req, ok := <-reqch
+ if !ok {
+ return
+ }
+
+ if handledFirst.CompareAndSwap(false, true) {
+ req.resch <- dialResponse{err: dialErr}
+ } else {
+ req.resch <- dialResponse{conn: new(Conn)}
+ }
+ }
+ }()
+ }
+
+ ds := newDialSync(f)
+ p := peer.ID("testing")
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ _, err := ds.Dial(ctx, p)
+ require.ErrorIs(t, err, dialErr, "expected gophers to have eaten the modem")
+
+ c, err := ds.Dial(ctx, p)
+ require.NoError(t, err)
+ require.NotNil(t, c, "should have gotten a 'real' conn back")
+}
+
+func TestStressActiveDial(_ *testing.T) {
+ ds := newDialSync(func(_ peer.ID, reqch <-chan dialRequest) {
+ go func() {
+ for {
+ req, ok := <-reqch
+ if !ok {
+ return
+ }
+ req.resch <- dialResponse{}
+ }
+ }()
+ })
+
+ wg := sync.WaitGroup{}
+
+ pid := peer.ID("foo")
+
+ makeDials := func() {
+ for i := 0; i < 10000; i++ {
+ ds.Dial(context.Background(), pid)
+ }
+ wg.Done()
+ }
+
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go makeDials()
+ }
+
+ wg.Wait()
+}
diff --git a/p2p/net/swarm/dial_test.go b/p2p/net/swarm/dial_test.go
new file mode 100644
index 0000000000..101ce99126
--- /dev/null
+++ b/p2p/net/swarm/dial_test.go
@@ -0,0 +1,653 @@
+package swarm_test
+
+import (
+ "context"
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ testutil "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/libp2p/go-libp2p-testing/ci"
+
+ ma "github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/stretchr/testify/require"
+)
+
+func closeSwarms(swarms []*swarm.Swarm) {
+ for _, s := range swarms {
+ s.Close()
+ }
+}
+
+func TestBasicDialPeer(t *testing.T) {
+ swarms := makeSwarms(t, 2)
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL)
+
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+
+ s, err := c.NewStream(context.Background())
+ require.NoError(t, err)
+ s.Close()
+}
+
+func TestBasicDialPeerWithResolver(t *testing.T) {
+ mockResolver := madns.MockResolver{IP: make(map[string][]net.IPAddr)}
+ ipaddr, err := net.ResolveIPAddr("ip4", "127.0.0.1")
+ require.NoError(t, err)
+ mockResolver.IP["example.com"] = []net.IPAddr{*ipaddr}
+ resolver, err := madns.NewResolver(madns.WithDomainResolver("example.com", &mockResolver))
+ require.NoError(t, err)
+
+ swarms := makeSwarms(t, 2, swarmt.WithSwarmOpts(swarm.WithMultiaddrResolver(swarm.ResolverFromMaDNS{resolver})))
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ // Change the multiaddr from /ip4/127.0.0.1/... to /dns4/example.com/... so
+ // that the resovler has to resolve this
+ var s2Addrs []ma.Multiaddr
+ for _, a := range s2.ListenAddresses() {
+ _, rest := ma.SplitFunc(a, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_TCP || c.Protocol().Code == ma.P_UDP
+ },
+ )
+ if rest != nil {
+ s2Addrs = append(s2Addrs, ma.StringCast("/dns4/example.com").Encapsulate(rest))
+ }
+ }
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2Addrs, peerstore.PermanentAddrTTL)
+
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+
+ s, err := c.NewStream(context.Background())
+ require.NoError(t, err)
+ s.Close()
+}
+
+func TestDialWithNoListeners(t *testing.T) {
+ s1 := makeDialOnlySwarm(t)
+ swarms := makeSwarms(t, 1)
+ defer closeSwarms(swarms)
+ s2 := swarms[0]
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL)
+
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+
+ s, err := c.NewStream(context.Background())
+ require.NoError(t, err)
+ s.Close()
+}
+
+func acceptAndHang(l net.Listener) {
+ conns := make([]net.Conn, 0, 10)
+ for {
+ c, err := l.Accept()
+ if err != nil {
+ break
+ }
+ if c != nil {
+ conns = append(conns, c)
+ }
+ }
+ for _, c := range conns {
+ c.Close()
+ }
+}
+
+func TestSimultDials(t *testing.T) {
+ ctx := context.Background()
+ swarms := makeSwarms(t, 2, swarmt.OptDisableReuseport)
+ defer closeSwarms(swarms)
+
+ // connect everyone
+ {
+ var wg sync.WaitGroup
+ errs := make(chan error, 20) // 2 connect calls in each of the 10 for-loop iterations
+ connect := func(s *swarm.Swarm, dst peer.ID, addr ma.Multiaddr) {
+ // copy for other peer
+ log.Debug("TestSimultOpen: connecting", "local", s.LocalPeer(), "remote", dst, "addr", addr)
+ s.Peerstore().AddAddr(dst, addr, peerstore.TempAddrTTL)
+ if _, err := s.DialPeer(ctx, dst); err != nil {
+ errs <- err
+ }
+ wg.Done()
+ }
+
+ ifaceAddrs0, err := swarms[0].InterfaceListenAddresses()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ifaceAddrs1, err := swarms[1].InterfaceListenAddresses()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Info("Connecting swarms simultaneously.")
+ for i := 0; i < 10; i++ { // connect 10x for each.
+ wg.Add(2)
+ go connect(swarms[0], swarms[1].LocalPeer(), ifaceAddrs1[0])
+ go connect(swarms[1], swarms[0].LocalPeer(), ifaceAddrs0[0])
+ }
+ wg.Wait()
+ close(errs)
+
+ for err := range errs {
+ if err != nil {
+ t.Fatal("error swarm dialing to peer", err)
+ }
+ }
+ }
+
+ // should still just have 1, at most 2 connections :)
+ c01l := len(swarms[0].ConnsToPeer(swarms[1].LocalPeer()))
+ if c01l > 2 {
+ t.Error("0->1 has", c01l)
+ }
+ c10l := len(swarms[1].ConnsToPeer(swarms[0].LocalPeer()))
+ if c10l > 2 {
+ t.Error("1->0 has", c10l)
+ }
+}
+
+func newSilentPeer(t *testing.T) (peer.ID, ma.Multiaddr, net.Listener) {
+ dst := testutil.RandPeerIDFatal(t)
+ lst, err := net.Listen("tcp4", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ addr, err := manet.FromNetAddr(lst.Addr())
+ if err != nil {
+ t.Fatal(err)
+ }
+ addrs, err := manet.ResolveUnspecifiedAddresses([]ma.Multiaddr{addr}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Log("new silent peer:", dst, addrs[0])
+ return dst, addrs[0], lst
+}
+
+func TestDialWait(t *testing.T) {
+ const dialTimeout = 5 * time.Second
+
+ swarms := makeSwarms(t, 1, swarmt.WithSwarmOpts(swarm.WithDialTimeout(dialTimeout)))
+ s1 := swarms[0]
+ defer s1.Close()
+
+ // dial to a non-existent peer.
+ s2p, s2addr, s2l := newSilentPeer(t)
+ go acceptAndHang(s2l)
+ defer s2l.Close()
+ s1.Peerstore().AddAddr(s2p, s2addr, peerstore.PermanentAddrTTL)
+
+ before := time.Now()
+ if c, err := s1.DialPeer(context.Background(), s2p); err == nil {
+ defer c.Close()
+ t.Fatal("error swarm dialing to unknown peer worked...", err)
+ } else {
+ t.Log("correctly got error:", err)
+ }
+ duration := time.Since(before)
+
+ if duration < dialTimeout*swarm.DialAttempts {
+ t.Error("< dialTimeout * DialAttempts not being respected", duration, dialTimeout*swarm.DialAttempts)
+ }
+ if duration > 2*dialTimeout*swarm.DialAttempts {
+ t.Error("> 2*dialTimeout * DialAttempts not being respected", duration, 2*dialTimeout*swarm.DialAttempts)
+ }
+
+ if !s1.Backoff().Backoff(s2p, s2addr) {
+ t.Error("s2 should now be on backoff")
+ }
+}
+
+func TestDialBackoff(t *testing.T) {
+ if ci.IsRunning() {
+ t.Skip("travis will never have fun with this test")
+ }
+ const dialTimeout = 100 * time.Millisecond
+
+ ctx := context.Background()
+ swarms := makeSwarms(t, 2, swarmt.WithSwarmOpts(swarm.WithDialTimeout(dialTimeout)))
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ s2addrs, err := s2.InterfaceListenAddresses()
+ require.NoError(t, err)
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2addrs, peerstore.PermanentAddrTTL)
+
+ // dial to a non-existent peer.
+ s3p, s3addr, s3l := newSilentPeer(t)
+ go acceptAndHang(s3l)
+ defer s3l.Close()
+ s1.Peerstore().AddAddr(s3p, s3addr, peerstore.PermanentAddrTTL)
+
+ // in this test we will:
+ // 1) dial 10x to each node.
+ // 2) all dials should hang
+ // 3) s1->s2 should succeed.
+ // 4) s1->s3 should not (and should place s3 on backoff)
+ // 5) disconnect entirely
+ // 6) dial 10x to each node again
+ // 7) s3 dials should all return immediately (except 1)
+ // 8) s2 dials should all hang, and succeed
+ // 9) last s3 dial ends, unsuccessful
+
+ dialOnlineNode := func(dst peer.ID, times int) <-chan bool {
+ ch := make(chan bool)
+ for i := 0; i < times; i++ {
+ go func() {
+ if _, err := s1.DialPeer(ctx, dst); err != nil {
+ t.Error("error dialing", dst, err)
+ ch <- false
+ } else {
+ ch <- true
+ }
+ }()
+ }
+ return ch
+ }
+
+ dialOfflineNode := func(dst peer.ID, times int) <-chan bool {
+ ch := make(chan bool)
+ for i := 0; i < times; i++ {
+ go func() {
+ if c, err := s1.DialPeer(ctx, dst); err != nil {
+ ch <- false
+ } else {
+ t.Error("succeeded in dialing", dst)
+ ch <- true
+ c.Close()
+ }
+ }()
+ }
+ return ch
+ }
+
+ {
+ // 1) dial 10x to each node.
+ N := 10
+ s2done := dialOnlineNode(s2.LocalPeer(), N)
+ s3done := dialOfflineNode(s3p, N)
+
+ // when all dials should be done by:
+ dialTimeout1x := time.After(dialTimeout)
+ dialTimeout10Ax := time.After(dialTimeout * 2 * 10) // DialAttempts * 10)
+
+ // 2) all dials should hang
+ select {
+ case <-s2done:
+ t.Error("s2 should not happen immediately")
+ case <-s3done:
+ t.Error("s3 should not happen yet")
+ case <-time.After(time.Millisecond):
+ // s2 may finish very quickly, so let's get out.
+ }
+
+ // 3) s1->s2 should succeed.
+ for i := 0; i < N; i++ {
+ select {
+ case r := <-s2done:
+ if !r {
+ t.Error("s2 should not fail")
+ }
+ case <-s3done:
+ t.Error("s3 should not happen yet")
+ case <-dialTimeout1x:
+ t.Error("s2 took too long")
+ }
+ }
+
+ select {
+ case <-s2done:
+ t.Error("s2 should have no more")
+ case <-s3done:
+ t.Error("s3 should not happen yet")
+ case <-dialTimeout1x: // let it pass
+ }
+
+ // 4) s1->s3 should not (and should place s3 on backoff)
+ // N-1 should finish before dialTimeout1x * 2
+ for i := 0; i < N; i++ {
+ select {
+ case <-s2done:
+ t.Error("s2 should have no more")
+ case r := <-s3done:
+ if r {
+ t.Error("s3 should not succeed")
+ }
+ case <-(dialTimeout1x):
+ if i < (N - 1) {
+ t.Fatal("s3 took too long")
+ }
+ t.Log("dialTimeout1x * 1.3 hit for last peer")
+ case <-dialTimeout10Ax:
+ t.Fatal("s3 took too long")
+ }
+ }
+
+ // check backoff state
+ if s1.Backoff().Backoff(s2.LocalPeer(), s2addrs[0]) {
+ t.Error("s2 should not be on backoff")
+ }
+ if !s1.Backoff().Backoff(s3p, s3addr) {
+ t.Error("s3 should be on backoff")
+ }
+
+ // 5) disconnect entirely
+
+ for _, c := range s1.Conns() {
+ c.Close()
+ }
+ for i := 0; i < 100 && len(s1.Conns()) > 0; i++ {
+ <-time.After(time.Millisecond)
+ }
+ if len(s1.Conns()) > 0 {
+ t.Fatal("s1 conns must exit")
+ }
+ }
+
+ {
+ // 6) dial 10x to each node again
+ N := 10
+ s2done := dialOnlineNode(s2.LocalPeer(), N)
+ s3done := dialOfflineNode(s3p, N)
+
+ // when all dials should be done by:
+ dialTimeout1x := time.After(dialTimeout)
+ dialTimeout10Ax := time.After(dialTimeout * 2 * 10) // DialAttempts * 10)
+
+ // 7) s3 dials should all return immediately (except 1)
+ for i := 0; i < N-1; i++ {
+ select {
+ case <-s2done:
+ t.Error("s2 should not succeed yet")
+ case r := <-s3done:
+ if r {
+ t.Error("s3 should not succeed")
+ }
+ case <-dialTimeout1x:
+ t.Fatal("s3 took too long")
+ }
+ }
+
+ // 8) s2 dials should all hang, and succeed
+ for i := 0; i < N; i++ {
+ select {
+ case r := <-s2done:
+ if !r {
+ t.Error("s2 should succeed")
+ }
+ // case <-s3done:
+ case <-(dialTimeout1x):
+ t.Fatal("s3 took too long")
+ }
+ }
+
+ // 9) the last s3 should return, failed.
+ select {
+ case <-s2done:
+ t.Error("s2 should have no more")
+ case r := <-s3done:
+ if r {
+ t.Error("s3 should not succeed")
+ }
+ case <-dialTimeout10Ax:
+ t.Fatal("s3 took too long")
+ }
+
+ // check backoff state (the same)
+ if s1.Backoff().Backoff(s2.LocalPeer(), s2addrs[0]) {
+ t.Error("s2 should not be on backoff")
+ }
+ if !s1.Backoff().Backoff(s3p, s3addr) {
+ t.Error("s3 should be on backoff")
+ }
+ }
+}
+
+func TestDialBackoffClears(t *testing.T) {
+ const dialTimeout = 3 * time.Second
+ swarms := makeSwarms(t, 2, swarmt.WithSwarmOpts(swarm.WithDialTimeout(dialTimeout)))
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ // use another address first, that accept and hang on conns
+ _, s2bad, s2l := newSilentPeer(t)
+ go acceptAndHang(s2l)
+ defer s2l.Close()
+
+ // phase 1 -- dial to non-operational addresses
+ s1.Peerstore().AddAddr(s2.LocalPeer(), s2bad, peerstore.PermanentAddrTTL)
+
+ before := time.Now()
+ _, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.Error(t, err, "dialing to broken addr worked...")
+ duration := time.Since(before)
+
+ if duration < dialTimeout*swarm.DialAttempts {
+ t.Error("< dialTimeout * DialAttempts not being respected", duration, dialTimeout*swarm.DialAttempts)
+ }
+ if duration > 2*dialTimeout*swarm.DialAttempts {
+ t.Error("> 2*dialTimeout * DialAttempts not being respected", duration, 2*dialTimeout*swarm.DialAttempts)
+ }
+ require.True(t, s1.Backoff().Backoff(s2.LocalPeer(), s2bad), "s2 should now be on backoff")
+
+ // phase 2 -- add the working address. dial should succeed.
+ ifaceAddrs1, err := s2.InterfaceListenAddresses()
+ require.NoError(t, err)
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), ifaceAddrs1, peerstore.PermanentAddrTTL)
+
+ // backoffs are per address, not peer
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+ defer c.Close()
+ require.False(t, s1.Backoff().Backoff(s2.LocalPeer(), s2bad), "s2 should no longer be on backoff")
+}
+
+func TestDialPeerFailed(t *testing.T) {
+ swarms := makeSwarms(t, 2, swarmt.WithSwarmOpts(swarm.WithDialTimeout(100*time.Millisecond)))
+ defer closeSwarms(swarms)
+ testedSwarm, targetSwarm := swarms[0], swarms[1]
+
+ const expectedErrorsCount = 5
+ for i := 0; i < expectedErrorsCount; i++ {
+ _, silentPeerAddress, silentPeerListener := newSilentPeer(t)
+ go acceptAndHang(silentPeerListener)
+ defer silentPeerListener.Close()
+
+ testedSwarm.Peerstore().AddAddr(targetSwarm.LocalPeer(), silentPeerAddress, peerstore.PermanentAddrTTL)
+ }
+
+ _, err := testedSwarm.DialPeer(context.Background(), targetSwarm.LocalPeer())
+ require.Error(t, err)
+
+ // dial_test.go:508: correctly get a combined error: failed to dial PEER: all dials failed
+ // * [/ip4/127.0.0.1/tcp/46485] failed to negotiate security protocol: context deadline exceeded
+ // * [/ip4/127.0.0.1/tcp/34881] failed to negotiate security protocol: context deadline exceeded
+ // ...
+
+ dialErr, ok := err.(*swarm.DialError)
+ if !ok {
+ t.Fatalf("expected *DialError, got %T", err)
+ }
+
+ if len(dialErr.DialErrors) != expectedErrorsCount {
+ t.Errorf("expected %d errors, got %d", expectedErrorsCount, len(dialErr.DialErrors))
+ }
+}
+
+func TestDialExistingConnection(t *testing.T) {
+ swarms := makeSwarms(t, 2)
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ // Only use one of the addresses here.
+ // Otherwise, we might dial TCP and QUIC simultaneously here, and end up with two connections,
+ // if the handshake latencies line up exactly.
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses()[:1], peerstore.PermanentAddrTTL)
+
+ c1, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+
+ c2, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+
+ // can't use require.Equal here, as this does a deep comparison
+ if c1 != c2 {
+ t.Fatalf("expecting the same connection from both dials, got %s <-> %s vs %s <-> %s", c1.LocalMultiaddr(), c1.RemoteMultiaddr(), c2.LocalMultiaddr(), c2.RemoteMultiaddr())
+ }
+}
+
+func newSilentListener(t *testing.T) ([]ma.Multiaddr, net.Listener) {
+ lst, err := net.Listen("tcp4", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ addr, err := manet.FromNetAddr(lst.Addr())
+ if err != nil {
+ t.Fatal(err)
+ }
+ addrs, err := manet.ResolveUnspecifiedAddresses([]ma.Multiaddr{addr}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return addrs, lst
+
+}
+
+func TestDialSimultaneousJoin(t *testing.T) {
+ const dialTimeout = 3 * time.Second
+
+ swarms := makeSwarms(t, 2, swarmt.WithSwarmOpts(swarm.WithDialTimeout(dialTimeout)))
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ s2silentAddrs, s2silentListener := newSilentListener(t)
+ go acceptAndHang(s2silentListener)
+
+ connch := make(chan network.Conn, 512)
+ errs := make(chan error, 2)
+
+ // start a dial to s2 through the silent addr
+ go func() {
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2silentAddrs, peerstore.PermanentAddrTTL)
+
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ if err != nil {
+ errs <- err
+ connch <- nil
+ return
+ }
+
+ t.Logf("first dial succeeded; conn: %+v", c)
+
+ connch <- c
+ errs <- nil
+ }()
+
+ // wait a bit for the dial to take hold
+ time.Sleep(100 * time.Millisecond)
+
+ // start a second dial to s2 that uses the real s2 addrs
+ go func() {
+ s2addrs, err := s2.InterfaceListenAddresses()
+ if err != nil {
+ errs <- err
+ return
+ }
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2addrs[:1], peerstore.PermanentAddrTTL)
+
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ if err != nil {
+ errs <- err
+ connch <- nil
+ return
+ }
+
+ t.Logf("second dial succeeded; conn: %+v", c)
+
+ connch <- c
+ errs <- nil
+ }()
+
+ // wait for the second dial to finish
+ c2 := <-connch
+
+ // start a third dial to s2, this should get the existing connection from the successful dial
+ go func() {
+ c, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ if err != nil {
+ errs <- err
+ connch <- nil
+ return
+ }
+
+ t.Logf("third dial succeeded; conn: %+v", c)
+
+ connch <- c
+ errs <- nil
+ }()
+
+ c3 := <-connch
+
+ // raise any errors from the previous goroutines
+ for i := 0; i < 3; i++ {
+ require.NoError(t, <-errs)
+ }
+
+ if c2 != c3 {
+ t.Fatal("expected c2 and c3 to be the same")
+ }
+
+ // next, the first dial to s2, using the silent addr should timeout; at this point the dial
+ // will error but the last chance check will see the existing connection and return it
+ select {
+ case c1 := <-connch:
+ if c1 != c2 {
+ t.Fatal("expected c1 and c2 to be the same")
+ }
+ case <-time.After(2 * dialTimeout):
+ t.Fatal("no connection from first dial")
+ }
+}
+
+func TestDialSelf(t *testing.T) {
+ swarms := makeSwarms(t, 2)
+ defer closeSwarms(swarms)
+ s1 := swarms[0]
+
+ _, err := s1.DialPeer(context.Background(), s1.LocalPeer())
+ require.ErrorIs(t, err, swarm.ErrDialToSelf, "expected error from self dial")
+}
+
+func TestDialQUICDraft29(t *testing.T) {
+ s := makeDialOnlySwarm(t)
+ id := testutil.RandPeerIDFatal(t)
+ s.Peerstore().AddAddr(id, ma.StringCast("/ip4/127.0.0.1/udp/1234/quic"), time.Hour)
+ _, err := s.DialPeer(context.Background(), id)
+ require.ErrorIs(t, err, swarm.ErrQUICDraft29)
+ require.ErrorIs(t, err, swarm.ErrNoTransport)
+}
diff --git a/p2p/net/swarm/dial_worker.go b/p2p/net/swarm/dial_worker.go
new file mode 100644
index 0000000000..88017325a1
--- /dev/null
+++ b/p2p/net/swarm/dial_worker.go
@@ -0,0 +1,504 @@
+package swarm
+
+import (
+ "context"
+ "math"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// dialRequest is structure used to request dials to the peer associated with a
+// worker loop
+type dialRequest struct {
+ // ctx is the context that may be used for the request
+ // if another concurrent request is made, any of the concurrent request's ctx may be used for
+ // dials to the peer's addresses
+ // ctx for simultaneous connect requests have higher priority than normal requests
+ ctx context.Context
+ // resch is the channel used to send the response for this query
+ resch chan dialResponse
+}
+
+// dialResponse is the response sent to dialRequests on the request's resch channel
+type dialResponse struct {
+ // conn is the connection to the peer on success
+ conn *Conn
+ // err is the error in dialing the peer
+ // nil on connection success
+ err error
+}
+
+// pendRequest is used to track progress on a dialRequest.
+type pendRequest struct {
+ // req is the original dialRequest
+ req dialRequest
+ // err comprises errors of all failed dials
+ err *DialError
+ // addrs are the addresses on which we are waiting for pending dials
+ // At the time of creation addrs is initialised to all the addresses of the peer. On a failed dial,
+ // the addr is removed from the map and err is updated. On a successful dial, the dialRequest is
+ // completed and response is sent with the connection
+ addrs map[string]struct{}
+}
+
+// addrDial tracks dials to a particular multiaddress.
+type addrDial struct {
+ // addr is the address dialed
+ addr ma.Multiaddr
+ // ctx is the context used for dialing the address
+ ctx context.Context
+ // conn is the established connection on success
+ conn *Conn
+ // err is the err on dialing the address
+ err error
+ // dialed indicates whether we have triggered the dial to the address
+ dialed bool
+ // createdAt is the time this struct was created
+ createdAt time.Time
+ // dialRankingDelay is the delay in dialing this address introduced by the ranking logic
+ dialRankingDelay time.Duration
+ // expectedTCPUpgradeTime is the expected time by which security upgrade will complete
+ expectedTCPUpgradeTime time.Time
+}
+
+// dialWorker synchronises concurrent dials to a peer. It ensures that we make at most one dial to a
+// peer's address
+type dialWorker struct {
+ s *Swarm
+ peer peer.ID
+ // reqch is used to send dial requests to the worker. close reqch to end the worker loop
+ reqch <-chan dialRequest
+ // pendingRequests is the set of pendingRequests
+ pendingRequests map[*pendRequest]struct{}
+ // trackedDials tracks dials to the peer's addresses. An entry here is used to ensure that
+ // we dial an address at most once
+ trackedDials map[string]*addrDial
+ // resch is used to receive response for dials to the peers addresses.
+ resch chan tpt.DialUpdate
+
+ connected bool // true when a connection has been successfully established
+
+ // for testing
+ wg sync.WaitGroup
+ cl Clock
+}
+
+func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest, cl Clock) *dialWorker {
+ if cl == nil {
+ cl = RealClock{}
+ }
+ return &dialWorker{
+ s: s,
+ peer: p,
+ reqch: reqch,
+ pendingRequests: make(map[*pendRequest]struct{}),
+ trackedDials: make(map[string]*addrDial),
+ resch: make(chan tpt.DialUpdate),
+ cl: cl,
+ }
+}
+
+// loop implements the core dial worker loop. Requests are received on w.reqch.
+// The loop exits when w.reqch is closed.
+func (w *dialWorker) loop() {
+ w.wg.Add(1)
+ defer w.wg.Done()
+ defer w.s.limiter.clearAllPeerDials(w.peer)
+
+ // dq is used to pace dials to different addresses of the peer
+ dq := newDialQueue()
+ // dialsInFlight is the number of dials in flight.
+ dialsInFlight := 0
+
+ startTime := w.cl.Now()
+ // dialTimer is the dialTimer used to trigger dials
+ dialTimer := w.cl.InstantTimer(startTime.Add(math.MaxInt64))
+ defer dialTimer.Stop()
+
+ timerRunning := true
+ // scheduleNextDial updates timer for triggering the next dial
+ scheduleNextDial := func() {
+ if timerRunning && !dialTimer.Stop() {
+ <-dialTimer.Ch()
+ }
+ timerRunning = false
+ if dq.Len() > 0 {
+ if dialsInFlight == 0 && !w.connected {
+ // if there are no dials in flight, trigger the next dials immediately
+ dialTimer.Reset(startTime)
+ } else {
+ resetTime := startTime.Add(dq.top().Delay)
+ for _, ad := range w.trackedDials {
+ if !ad.expectedTCPUpgradeTime.IsZero() && ad.expectedTCPUpgradeTime.After(resetTime) {
+ resetTime = ad.expectedTCPUpgradeTime
+ }
+ }
+ dialTimer.Reset(resetTime)
+ }
+ timerRunning = true
+ }
+ }
+
+ // totalDials is used to track number of dials made by this worker for metrics
+ totalDials := 0
+loop:
+ for {
+ // The loop has three parts
+ // 1. Input requests are received on w.reqch. If a suitable connection is not available we create
+ // a pendRequest object to track the dialRequest and add the addresses to dq.
+ // 2. Addresses from the dialQueue are dialed at appropriate time intervals depending on delay logic.
+ // We are notified of the completion of these dials on w.resch.
+ // 3. Responses for dials are received on w.resch. On receiving a response, we updated the pendRequests
+ // interested in dials on this address.
+
+ select {
+ case req, ok := <-w.reqch:
+ if !ok {
+ if w.s.metricsTracer != nil {
+ w.s.metricsTracer.DialCompleted(w.connected, totalDials, time.Since(startTime))
+ }
+ return
+ }
+ // We have received a new request. If we do not have a suitable connection,
+ // track this dialRequest with a pendRequest.
+ // Enqueue the peer's addresses relevant to this request in dq and
+ // track dials to the addresses relevant to this request.
+
+ c := w.s.bestAcceptableConnToPeer(req.ctx, w.peer)
+ if c != nil {
+ req.resch <- dialResponse{conn: c}
+ continue loop
+ }
+
+ addrs, addrErrs, err := w.s.addrsForDial(req.ctx, w.peer)
+ if err != nil {
+ req.resch <- dialResponse{
+ err: &DialError{
+ Peer: w.peer,
+ DialErrors: addrErrs,
+ Cause: err,
+ }}
+ continue loop
+ }
+
+ // get the delays to dial these addrs from the swarms dialRanker
+ simConnect, _, _ := network.GetSimultaneousConnect(req.ctx)
+ addrRanking := w.rankAddrs(addrs, simConnect)
+ addrDelay := make(map[string]time.Duration, len(addrRanking))
+
+ // create the pending request object
+ pr := &pendRequest{
+ req: req,
+ addrs: make(map[string]struct{}, len(addrRanking)),
+ err: &DialError{Peer: w.peer, DialErrors: addrErrs},
+ }
+ for _, adelay := range addrRanking {
+ pr.addrs[string(adelay.Addr.Bytes())] = struct{}{}
+ addrDelay[string(adelay.Addr.Bytes())] = adelay.Delay
+ }
+
+ // Check if dials to any of the addrs have completed already
+ // If they have errored, record the error in pr. If they have succeeded,
+ // respond with the connection.
+ // If they are pending, add them to tojoin.
+ // If we haven't seen any of the addresses before, add them to todial.
+ var todial []ma.Multiaddr
+ var tojoin []*addrDial
+
+ for _, adelay := range addrRanking {
+ ad, ok := w.trackedDials[string(adelay.Addr.Bytes())]
+ if !ok {
+ todial = append(todial, adelay.Addr)
+ continue
+ }
+
+ if ad.conn != nil {
+ // dial to this addr was successful, complete the request
+ req.resch <- dialResponse{conn: ad.conn}
+ continue loop
+ }
+
+ if ad.err != nil {
+ // dial to this addr errored, accumulate the error
+ pr.err.recordErr(ad.addr, ad.err)
+ delete(pr.addrs, string(ad.addr.Bytes()))
+ continue
+ }
+
+ // dial is still pending, add to the join list
+ tojoin = append(tojoin, ad)
+ }
+
+ if len(todial) == 0 && len(tojoin) == 0 {
+ // all request applicable addrs have been dialed, we must have errored
+ pr.err.Cause = ErrAllDialsFailed
+ req.resch <- dialResponse{err: pr.err}
+ continue loop
+ }
+
+ // The request has some pending or new dials
+ w.pendingRequests[pr] = struct{}{}
+
+ for _, ad := range tojoin {
+ if !ad.dialed {
+ // we haven't dialed this address. update the ad.ctx to have simultaneous connect values
+ // set correctly
+ if simConnect, isClient, reason := network.GetSimultaneousConnect(req.ctx); simConnect {
+ if simConnect, _, _ := network.GetSimultaneousConnect(ad.ctx); !simConnect {
+ ad.ctx = network.WithSimultaneousConnect(ad.ctx, isClient, reason)
+ // update the element in dq to use the simultaneous connect delay.
+ dq.UpdateOrAdd(network.AddrDelay{
+ Addr: ad.addr,
+ Delay: addrDelay[string(ad.addr.Bytes())],
+ })
+ }
+ }
+ }
+ // add the request to the addrDial
+ }
+
+ if len(todial) > 0 {
+ now := time.Now()
+ // these are new addresses, track them and add them to dq
+ for _, a := range todial {
+ w.trackedDials[string(a.Bytes())] = &addrDial{
+ addr: a,
+ ctx: req.ctx,
+ createdAt: now,
+ }
+ dq.Add(network.AddrDelay{Addr: a, Delay: addrDelay[string(a.Bytes())]})
+ }
+ }
+ // setup dialTimer for updates to dq
+ scheduleNextDial()
+
+ case <-dialTimer.Ch():
+ // It's time to dial the next batch of addresses.
+ // We don't check the delay of the addresses received from the queue here
+ // because if the timer triggered before the delay, it means that all
+ // the inflight dials have errored and we should dial the next batch of
+ // addresses
+ now := time.Now()
+ for _, adelay := range dq.NextBatch() {
+ // spawn the dial
+ ad, ok := w.trackedDials[string(adelay.Addr.Bytes())]
+ if !ok {
+ log.Error("SWARM BUG: no entry for address in trackedDials", "addr", adelay.Addr)
+ continue
+ }
+ ad.dialed = true
+ ad.dialRankingDelay = now.Sub(ad.createdAt)
+ err := w.s.dialNextAddr(ad.ctx, w.peer, ad.addr, w.resch)
+ if err != nil {
+ // Errored without attempting a dial. This happens in case of
+ // backoff or black hole.
+ w.dispatchError(ad, err)
+ } else {
+ // the dial was successful. update inflight dials
+ dialsInFlight++
+ totalDials++
+ }
+ }
+ timerRunning = false
+ // schedule more dials
+ scheduleNextDial()
+
+ case res := <-w.resch:
+ // A dial to an address has completed.
+ // Update all requests waiting on this address. On success, complete the request.
+ // On error, record the error
+
+ ad, ok := w.trackedDials[string(res.Addr.Bytes())]
+ if !ok {
+ log.Error("SWARM BUG: no entry for address in trackedDials", "addr", res.Addr)
+ if res.Conn != nil {
+ res.Conn.Close()
+ }
+ dialsInFlight--
+ continue
+ }
+
+ // TCP Connection has been established. Wait for connection upgrade on this address
+ // before making new dials.
+ if res.Kind == tpt.UpdateKindHandshakeProgressed {
+ // Only wait for public addresses to complete dialing since private dials
+ // are quick any way
+ if manet.IsPublicAddr(res.Addr) {
+ ad.expectedTCPUpgradeTime = w.cl.Now().Add(PublicTCPDelay)
+ }
+ scheduleNextDial()
+ continue
+ }
+ dialsInFlight--
+ ad.expectedTCPUpgradeTime = time.Time{}
+ if res.Conn != nil {
+ // we got a connection, add it to the swarm
+ conn, err := w.s.addConn(res.Conn, network.DirOutbound)
+ if err != nil {
+ // oops no, we failed to add it to the swarm
+ res.Conn.Close()
+ w.dispatchError(ad, err)
+ continue loop
+ }
+
+ for pr := range w.pendingRequests {
+ if _, ok := pr.addrs[string(ad.addr.Bytes())]; ok {
+ pr.req.resch <- dialResponse{conn: conn}
+ delete(w.pendingRequests, pr)
+ }
+ }
+
+ ad.conn = conn
+ if !w.connected {
+ w.connected = true
+ if w.s.metricsTracer != nil {
+ w.s.metricsTracer.DialRankingDelay(ad.dialRankingDelay)
+ }
+ }
+
+ continue loop
+ }
+
+ // it must be an error -- add backoff if applicable and dispatch
+ // ErrDialRefusedBlackHole shouldn't end up here, just a safety check
+ if res.Err != ErrDialRefusedBlackHole && res.Err != context.Canceled && !w.connected {
+ // we only add backoff if there has not been a successful connection
+ // for consistency with the old dialer behavior.
+ w.s.backf.AddBackoff(w.peer, res.Addr)
+ } else if res.Err == ErrDialRefusedBlackHole {
+ log.Error("SWARM BUG: unexpected ErrDialRefusedBlackHole while dialing peer to addr",
+ "peer", w.peer, "addr", res.Addr)
+ }
+
+ w.dispatchError(ad, res.Err)
+ // Only schedule next dial on error.
+ // If we scheduleNextDial on success, we will end up making one dial more than
+ // required because the final successful dial will spawn one more dial
+ scheduleNextDial()
+ }
+ }
+}
+
+// dispatches an error to a specific addr dial
+func (w *dialWorker) dispatchError(ad *addrDial, err error) {
+ ad.err = err
+ for pr := range w.pendingRequests {
+ // accumulate the error
+ if _, ok := pr.addrs[string(ad.addr.Bytes())]; ok {
+ pr.err.recordErr(ad.addr, err)
+ delete(pr.addrs, string(ad.addr.Bytes()))
+ if len(pr.addrs) == 0 {
+ // all addrs have erred, dispatch dial error
+ // but first do a last one check in case an acceptable connection has landed from
+ // a simultaneous dial that started later and added new acceptable addrs
+ c := w.s.bestAcceptableConnToPeer(pr.req.ctx, w.peer)
+ if c != nil {
+ pr.req.resch <- dialResponse{conn: c}
+ } else {
+ pr.err.Cause = ErrAllDialsFailed
+ pr.req.resch <- dialResponse{err: pr.err}
+ }
+ delete(w.pendingRequests, pr)
+ }
+ }
+ }
+
+ // if it was a backoff, clear the address dial so that it doesn't inhibit new dial requests.
+ // this is necessary to support active listen scenarios, where a new dial comes in while
+ // another dial is in progress, and needs to do a direct connection without inhibitions from
+ // dial backoff.
+ if err == ErrDialBackoff {
+ delete(w.trackedDials, string(ad.addr.Bytes()))
+ }
+}
+
+// rankAddrs ranks addresses for dialing. if it's a simConnect request we
+// dial all addresses immediately without any delay
+func (w *dialWorker) rankAddrs(addrs []ma.Multiaddr, isSimConnect bool) []network.AddrDelay {
+ if isSimConnect {
+ return NoDelayDialRanker(addrs)
+ }
+ return w.s.dialRanker(addrs)
+}
+
+// dialQueue is a priority queue used to schedule dials
+type dialQueue struct {
+ // q contains dials ordered by delay
+ q []network.AddrDelay
+}
+
+// newDialQueue returns a new dialQueue
+func newDialQueue() *dialQueue {
+ return &dialQueue{
+ q: make([]network.AddrDelay, 0, 16),
+ }
+}
+
+// Add adds a new element to the dialQueue. To update an element use UpdateOrAdd.
+func (dq *dialQueue) Add(adelay network.AddrDelay) {
+ for i := dq.Len() - 1; i >= 0; i-- {
+ if dq.q[i].Delay <= adelay.Delay {
+ // insert at pos i+1
+ dq.q = append(dq.q, network.AddrDelay{}) // extend the slice
+ copy(dq.q[i+2:], dq.q[i+1:])
+ dq.q[i+1] = adelay
+ return
+ }
+ }
+ // insert at position 0
+ dq.q = append(dq.q, network.AddrDelay{}) // extend the slice
+ copy(dq.q[1:], dq.q[0:])
+ dq.q[0] = adelay
+}
+
+// UpdateOrAdd updates the elements with address adelay.Addr to the new delay
+// Useful when hole punching
+func (dq *dialQueue) UpdateOrAdd(adelay network.AddrDelay) {
+ for i := 0; i < dq.Len(); i++ {
+ if dq.q[i].Addr.Equal(adelay.Addr) {
+ if dq.q[i].Delay == adelay.Delay {
+ // existing element is the same. nothing to do
+ return
+ }
+ // remove the element
+ copy(dq.q[i:], dq.q[i+1:])
+ dq.q = dq.q[:len(dq.q)-1]
+ }
+ }
+ dq.Add(adelay)
+}
+
+// NextBatch returns all the elements in the queue with the highest priority
+func (dq *dialQueue) NextBatch() []network.AddrDelay {
+ if dq.Len() == 0 {
+ return nil
+ }
+
+ // i is the index of the second highest priority element
+ var i int
+ for i = 0; i < dq.Len(); i++ {
+ if dq.q[i].Delay != dq.q[0].Delay {
+ break
+ }
+ }
+ res := dq.q[:i]
+ dq.q = dq.q[i:]
+ return res
+}
+
+// top returns the top element of the queue
+func (dq *dialQueue) top() network.AddrDelay {
+ return dq.q[0]
+}
+
+// Len returns the number of elements in the queue
+func (dq *dialQueue) Len() int {
+ return len(dq.q)
+}
diff --git a/p2p/net/swarm/dial_worker_test.go b/p2p/net/swarm/dial_worker_test.go
new file mode 100644
index 0000000000..3935c79767
--- /dev/null
+++ b/p2p/net/swarm/dial_worker_test.go
@@ -0,0 +1,1196 @@
+package swarm
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "math"
+ mrand "math/rand"
+ "reflect"
+ "sort"
+ "sync"
+ "testing"
+ "testing/quick"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+ "github.com/stretchr/testify/require"
+)
+
+type mockClock struct {
+ *test.MockClock
+}
+
+func (m *mockClock) InstantTimer(when time.Time) InstantTimer {
+ return m.MockClock.InstantTimer(when)
+}
+
+func newMockClock() *mockClock {
+ return &mockClock{test.NewMockClock()}
+}
+
+func newPeer(t *testing.T) (crypto.PrivKey, peer.ID) {
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ return priv, id
+}
+
+func makeSwarm(t *testing.T) *Swarm {
+ s := makeSwarmWithNoListenAddrs(t, WithDialTimeout(1*time.Second))
+ if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")); err != nil {
+ t.Fatal(err)
+ }
+
+ return s
+}
+
+func makeSwarmWithNoListenAddrs(t *testing.T, opts ...Option) *Swarm {
+ priv, id := newPeer(t)
+
+ ps, err := pstoremem.NewPeerstore()
+ require.NoError(t, err)
+ ps.AddPubKey(id, priv.GetPublic())
+ ps.AddPrivKey(id, priv)
+ t.Cleanup(func() { ps.Close() })
+
+ s, err := NewSwarm(id, ps, eventbus.NewBus(), opts...)
+ require.NoError(t, err)
+
+ upgrader := makeUpgrader(t, s)
+ var tcpOpts []tcp.Option
+ tcpOpts = append(tcpOpts, tcp.DisableReuseport())
+ tcpTransport, err := tcp.NewTCPTransport(upgrader, nil, nil, tcpOpts...)
+ require.NoError(t, err)
+ if err := s.AddTransport(tcpTransport); err != nil {
+ t.Fatal(err)
+ }
+ reuse, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ quicTransport, err := libp2pquic.NewTransport(priv, reuse, nil, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := s.AddTransport(quicTransport); err != nil {
+ t.Fatal(err)
+ }
+ return s
+}
+
+func makeUpgrader(t *testing.T, n *Swarm) transport.Upgrader {
+ id := n.LocalPeer()
+ pk := n.Peerstore().PrivKey(id)
+ st := insecure.NewWithIdentity(insecure.ID, id, pk)
+
+ u, err := tptu.New([]sec.SecureTransport{st}, []tptu.StreamMuxer{{ID: yamux.ID, Muxer: yamux.DefaultTransport}}, nil, nil, nil)
+ require.NoError(t, err)
+ return u
+}
+
+// makeTCPListener listens on tcp address a. On accepting a connection it notifies recvCh. Sending a message to
+// channel ch will close an accepted connection
+func makeTCPListener(t *testing.T, a ma.Multiaddr, recvCh chan struct{}) (list manet.Listener, ch chan struct{}) {
+ t.Helper()
+ list, err := manet.Listen(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ch = make(chan struct{})
+ go func() {
+ for {
+ c, err := list.Accept()
+ if err != nil {
+ break
+ }
+ recvCh <- struct{}{}
+ <-ch
+ err = c.Close()
+ if err != nil {
+ t.Error(err)
+ }
+
+ }
+ }()
+ return list, ch
+}
+
+func TestDialWorkerLoopBasic(t *testing.T) {
+ s1 := makeSwarm(t)
+ s2 := makeSwarm(t)
+ defer s1.Close()
+ defer s2.Close()
+
+ // Only pass in a single address here, otherwise we might end up with a TCP and QUIC connection dialed.
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{s2.ListenAddresses()[0]}, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ resch := make(chan dialResponse)
+ worker := newDialWorker(s1, s2.LocalPeer(), reqch, nil)
+ go worker.loop()
+
+ var conn *Conn
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+ select {
+ case res := <-resch:
+ require.NoError(t, res.err)
+ conn = res.conn
+ case <-time.After(10 * time.Second):
+ t.Fatal("dial didn't complete")
+ }
+
+ s, err := conn.NewStream(context.Background())
+ require.NoError(t, err)
+ s.Close()
+
+ var conn2 *Conn
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+ select {
+ case res := <-resch:
+ require.NoError(t, res.err)
+ conn2 = res.conn
+ case <-time.After(10 * time.Second):
+ t.Fatal("dial didn't complete")
+ }
+
+ // can't use require.Equal here, as this does a deep comparison
+ if conn != conn2 {
+ t.Fatalf("expecting the same connection from both dials. %s <-> %s vs. %s <-> %s", conn.LocalMultiaddr(), conn.RemoteMultiaddr(), conn2.LocalMultiaddr(), conn2.RemoteMultiaddr())
+ }
+
+ close(reqch)
+ worker.wg.Wait()
+}
+
+func TestDialWorkerLoopConcurrent(t *testing.T) {
+ s1 := makeSwarm(t)
+ s2 := makeSwarm(t)
+ defer s1.Close()
+ defer s2.Close()
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ worker := newDialWorker(s1, s2.LocalPeer(), reqch, nil)
+ go worker.loop()
+
+ const dials = 100
+ var wg sync.WaitGroup
+ resch := make(chan dialResponse, dials)
+ for i := 0; i < dials; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ reschgo := make(chan dialResponse, 1)
+ reqch <- dialRequest{ctx: context.Background(), resch: reschgo}
+ select {
+ case res := <-reschgo:
+ resch <- res
+ case <-time.After(time.Minute):
+ resch <- dialResponse{err: errors.New("timed out!")}
+ }
+ }()
+ }
+ wg.Wait()
+
+ for i := 0; i < dials; i++ {
+ res := <-resch
+ require.NoError(t, res.err)
+ }
+
+ t.Log("all concurrent dials done")
+
+ close(reqch)
+ worker.wg.Wait()
+}
+
+func TestDialWorkerLoopFailure(t *testing.T) {
+ s1 := makeSwarm(t)
+ defer s1.Close()
+
+ _, p2 := newPeer(t)
+
+ s1.Peerstore().AddAddrs(p2, []ma.Multiaddr{ma.StringCast("/ip4/11.0.0.1/tcp/1234"), ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1")}, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ resch := make(chan dialResponse)
+ worker := newDialWorker(s1, p2, reqch, nil)
+ go worker.loop()
+
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+ select {
+ case res := <-resch:
+ require.Error(t, res.err)
+ case <-time.After(time.Minute):
+ t.Fatal("dial didn't complete")
+ }
+
+ close(reqch)
+ worker.wg.Wait()
+}
+
+func TestDialWorkerLoopConcurrentFailure(t *testing.T) {
+ s1 := makeSwarm(t)
+ defer s1.Close()
+
+ _, p2 := newPeer(t)
+
+ s1.Peerstore().AddAddrs(p2, []ma.Multiaddr{ma.StringCast("/ip4/11.0.0.1/tcp/1234"), ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1")}, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ worker := newDialWorker(s1, p2, reqch, nil)
+ go worker.loop()
+
+ const dials = 100
+ var errTimeout = errors.New("timed out!")
+ var wg sync.WaitGroup
+ resch := make(chan dialResponse, dials)
+ for i := 0; i < dials; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ reschgo := make(chan dialResponse, 1)
+ reqch <- dialRequest{ctx: context.Background(), resch: reschgo}
+
+ select {
+ case res := <-reschgo:
+ resch <- res
+ case <-time.After(time.Minute):
+ resch <- dialResponse{err: errTimeout}
+ }
+ }()
+ }
+ wg.Wait()
+
+ for i := 0; i < dials; i++ {
+ res := <-resch
+ require.Error(t, res.err)
+ if res.err == errTimeout {
+ t.Fatal("dial response timed out")
+ }
+ }
+
+ t.Log("all concurrent dials done")
+
+ close(reqch)
+ worker.wg.Wait()
+}
+
+func TestDialWorkerLoopConcurrentMix(t *testing.T) {
+ s1 := makeSwarm(t)
+ s2 := makeSwarm(t)
+ defer s1.Close()
+ defer s2.Close()
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL)
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{ma.StringCast("/ip4/11.0.0.1/tcp/1234"), ma.StringCast("/ip4/11.0.0.1/udp/1234/quic-v1")}, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ worker := newDialWorker(s1, s2.LocalPeer(), reqch, nil)
+ go worker.loop()
+
+ const dials = 100
+ var wg sync.WaitGroup
+ resch := make(chan dialResponse, dials)
+ for i := 0; i < dials; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ reschgo := make(chan dialResponse, 1)
+ reqch <- dialRequest{ctx: context.Background(), resch: reschgo}
+ select {
+ case res := <-reschgo:
+ resch <- res
+ case <-time.After(time.Minute):
+ resch <- dialResponse{err: errors.New("timed out!")}
+ }
+ }()
+ }
+ wg.Wait()
+
+ for i := 0; i < dials; i++ {
+ res := <-resch
+ require.NoError(t, res.err)
+ }
+
+ t.Log("all concurrent dials done")
+
+ close(reqch)
+ worker.wg.Wait()
+}
+
+func TestDialWorkerLoopConcurrentFailureStress(t *testing.T) {
+ s1 := makeSwarm(t)
+ defer s1.Close()
+
+ _, p2 := newPeer(t)
+
+ var addrs []ma.Multiaddr
+ for i := 0; i < 16; i++ {
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/11.0.0.%d/tcp/%d", i%256, 1234+i)))
+ }
+ s1.Peerstore().AddAddrs(p2, addrs, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ worker := newDialWorker(s1, p2, reqch, nil)
+ go worker.loop()
+
+ const dials = 100
+ var errTimeout = errors.New("timed out!")
+ var wg sync.WaitGroup
+ resch := make(chan dialResponse, dials)
+ for i := 0; i < dials; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ reschgo := make(chan dialResponse, 1)
+ reqch <- dialRequest{ctx: context.Background(), resch: reschgo}
+ select {
+ case res := <-reschgo:
+ t.Log("received result")
+ resch <- res
+ case <-time.After(15 * time.Second):
+ resch <- dialResponse{err: errTimeout}
+ }
+ }()
+ }
+ wg.Wait()
+
+ for i := 0; i < dials; i++ {
+ res := <-resch
+ require.Error(t, res.err)
+ if res.err == errTimeout {
+ t.Fatal("dial response timed out")
+ }
+ }
+
+ t.Log("all concurrent dials done")
+
+ close(reqch)
+ worker.wg.Wait()
+}
+
+func TestDialQueueNextBatch(t *testing.T) {
+ addrs := make([]ma.Multiaddr, 0)
+ for i := 0; i < 10; i++ {
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/%d", i)))
+ }
+ testcase := []struct {
+ name string
+ input []network.AddrDelay
+ output [][]ma.Multiaddr
+ hasUpdates bool
+ }{
+ {
+ name: "next batch",
+ input: []network.AddrDelay{
+ {Addr: addrs[0], Delay: 3},
+ {Addr: addrs[1], Delay: 2},
+ {Addr: addrs[2], Delay: 1},
+ {Addr: addrs[3], Delay: 1},
+ },
+ output: [][]ma.Multiaddr{
+ {addrs[2], addrs[3]},
+ {addrs[1]},
+ {addrs[0]},
+ },
+ },
+ {
+ name: "priority queue property 2",
+ input: []network.AddrDelay{
+ {Addr: addrs[0], Delay: 5},
+ {Addr: addrs[1], Delay: 3},
+ {Addr: addrs[2], Delay: 2},
+ {Addr: addrs[3], Delay: 1},
+ {Addr: addrs[4], Delay: 1},
+ },
+
+ output: [][]ma.Multiaddr{
+ {addrs[3], addrs[4]},
+ {addrs[2]},
+ {addrs[1]},
+ {addrs[0]},
+ },
+ },
+ {
+ name: "updates",
+ input: []network.AddrDelay{
+ {Addr: addrs[0], Delay: 3}, // decreasing order
+ {Addr: addrs[1], Delay: 3},
+ {Addr: addrs[2], Delay: 2},
+ {Addr: addrs[3], Delay: 2},
+ {Addr: addrs[4], Delay: 1},
+ {Addr: addrs[0], Delay: 1}, // increasing order
+ {Addr: addrs[1], Delay: 1},
+ {Addr: addrs[2], Delay: 2},
+ {Addr: addrs[3], Delay: 2},
+ {Addr: addrs[4], Delay: 3},
+ },
+ output: [][]ma.Multiaddr{
+ {addrs[0], addrs[1]},
+ {addrs[2], addrs[3]},
+ {addrs[4]},
+ {},
+ },
+ hasUpdates: true,
+ },
+ {
+ name: "null input",
+ input: []network.AddrDelay{},
+ output: [][]ma.Multiaddr{
+ {},
+ {},
+ },
+ },
+ }
+ for _, tc := range testcase {
+ t.Run(tc.name, func(t *testing.T) {
+ q := newDialQueue()
+ for i := 0; i < len(tc.input); i++ {
+ if tc.hasUpdates {
+ q.UpdateOrAdd(tc.input[i])
+ } else {
+ q.Add(tc.input[i])
+ }
+ }
+ for _, batch := range tc.output {
+ b := q.NextBatch()
+ if len(batch) != len(b) {
+ t.Errorf("expected %d elements got %d", len(batch), len(b))
+ }
+ sort.Slice(b, func(i, j int) bool { return b[i].Addr.String() < b[j].Addr.String() })
+ sort.Slice(batch, func(i, j int) bool { return batch[i].String() < batch[j].String() })
+ for i := 0; i < len(b); i++ {
+ if !b[i].Addr.Equal(batch[i]) {
+ log.Error("expected address mismatch", "expected", batch[i], "got", b[i].Addr)
+ }
+ }
+ }
+ if q.Len() != 0 {
+ t.Errorf("expected queue to be empty at end. got: %d", q.Len())
+ }
+ })
+ }
+}
+
+// timedDial is a dial to a single address of the peer
+type timedDial struct {
+ // addr is the address to dial
+ addr ma.Multiaddr
+ // delay is the delay after which this address should be dialed
+ delay time.Duration
+ // success indicates whether the dial should succeed
+ success bool
+ // failAfter is how long this dial should take to fail after it is dialed
+ failAfter time.Duration
+}
+
+// schedulingTestCase is used to test dialWorker loop scheduler logic
+// a ranker is made according to `input` which provides the addresses to
+// dial worker loop with the specified delays
+// checkDialWorkerLoopScheduling then verifies that the different dial calls are
+// made at the right moments
+type schedulingTestCase struct {
+ name string
+ input []timedDial
+ maxDuration time.Duration
+}
+
+// schedulingTestCase generates a random test case
+func (s schedulingTestCase) Generate(rand *mrand.Rand, size int) reflect.Value {
+ if size > 20 {
+ size = 20
+ }
+ input := make([]timedDial, size)
+ delays := make(map[time.Duration]struct{})
+ for i := 0; i < size; i++ {
+ input[i] = timedDial{
+ addr: ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i+10550)),
+ delay: time.Duration(mrand.Intn(100)) * 10 * time.Millisecond, // max 1 second
+ success: false,
+ failAfter: time.Duration(mrand.Intn(100)) * 10 * time.Millisecond, // max 1 second
+ }
+ delays[input[i].delay] = struct{}{}
+ }
+ successIdx := rand.Intn(size)
+ for {
+ // set a unique delay for success. This is required to test the property that
+ // no extra dials are made after success
+ d := time.Duration(rand.Intn(100)) * 10 * time.Millisecond
+ if _, ok := delays[d]; !ok {
+ input[successIdx].delay = d
+ input[successIdx].success = true
+ break
+ }
+ }
+ return reflect.ValueOf(schedulingTestCase{
+ name: "",
+ input: input,
+ maxDuration: 10 * time.Second, // not tested here
+ })
+}
+
+// dialState is used to track the dials for testing dialWorker ranking logic
+type dialState struct {
+ // ch is the chan used to trigger dial failure.
+ ch chan struct{}
+ // addr is the address of the dial
+ addr ma.Multiaddr
+ // delay is the delay after which this address should be dialed
+ delay time.Duration
+ // success indicates whether the dial should succeed
+ success bool
+ // failAfter is how long this dial should take to fail after it is dialed
+ failAfter time.Duration
+ // failAt is the instant at which this dial should fail if success is false
+ failAt time.Time
+}
+
+// checkDialWorkerLoopScheduling verifies whether s1 dials s2 according to the
+// schedule specified by the test case tc
+func checkDialWorkerLoopScheduling(t *testing.T, s1, s2 *Swarm, tc schedulingTestCase) error {
+ t.Helper()
+ // failDials is used to track dials which should fail in the future
+ // at appropriate moment a message is sent to dialState.ch to trigger
+ // failure
+ failDials := make(map[*ma.Multiaddr]dialState)
+ // recvCh is used to receive dial notifications for dials that will fail
+ recvCh := make(chan struct{}, 100)
+ // allDials tracks all pending dials
+ allDials := make(map[*ma.Multiaddr]dialState)
+ // addrs are the peer addresses the swarm will use for dialing
+ addrs := make([]ma.Multiaddr, 0)
+ // create pending dials
+ // we add success cases as a listen address on swarm
+ // failed cases are created using makeTCPListener
+ for _, inp := range tc.input {
+ var failCh chan struct{}
+ if inp.success {
+ // add the address as a listen address if this dial should succeed
+ err := s2.AddListenAddr(inp.addr)
+ if err != nil {
+ return fmt.Errorf("failed to listen on addr: %s: err: %w", inp.addr, err)
+ }
+ } else {
+ // make a listener which will fail on sending a message to ch
+ l, ch := makeTCPListener(t, inp.addr, recvCh)
+ failCh = ch
+ f := func() {
+ err := l.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ }
+ defer f()
+ }
+ addrs = append(addrs, inp.addr)
+ // add to pending dials
+ allDials[&inp.addr] = dialState{
+ ch: failCh,
+ addr: inp.addr,
+ delay: inp.delay,
+ success: inp.success,
+ failAfter: inp.failAfter,
+ }
+ }
+ // setup the peers addresses
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), addrs, peerstore.PermanentAddrTTL)
+
+ // create worker
+ reqch := make(chan dialRequest)
+ resch := make(chan dialResponse)
+ cl := newMockClock()
+ st := cl.Now()
+ worker1 := newDialWorker(s1, s2.LocalPeer(), reqch, cl)
+ go worker1.loop()
+ defer worker1.wg.Wait()
+ defer close(reqch)
+
+ // trigger the request
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+
+ connected := false
+
+ // Advance the clock by 10 ms every iteration
+ // At every iteration:
+ // Check if any dial should fail. if it should, trigger the failure by sending a message on the
+ // listener failCh
+ // If there are no dials in flight check the most urgent dials have been triggered
+ // If there are dials in flight check that the relevant dials have been triggered
+ // Before next iteration ensure that no unexpected dials are received
+loop:
+ for {
+ // fail any dials that should fail at this instant
+ for a, p := range failDials {
+ if p.failAt.Before(cl.Now()) || p.failAt == cl.Now() {
+ p.ch <- struct{}{}
+ delete(failDials, a)
+ }
+ }
+ // if there are no pending dials, next dial should have been triggered
+ trigger := len(failDials) == 0
+
+ // mi is the minimum delay of pending dials
+ // if trigger is true, all dials with miDelay should have been triggered
+ mi := time.Duration(math.MaxInt64)
+ for _, ds := range allDials {
+ if ds.delay < mi {
+ mi = ds.delay
+ }
+ }
+ for a, ds := range allDials {
+ if (trigger && mi == ds.delay) ||
+ cl.Now().After(st.Add(ds.delay)) ||
+ cl.Now() == st.Add(ds.delay) {
+ if ds.success {
+ // check for success and exit
+ select {
+ case r := <-resch:
+ if r.conn == nil {
+ return errors.New("expected connection to succeed")
+ }
+ // High timeout here is okay. We will exit whenever the other branch
+ // is triggered
+ case <-time.After(10 * time.Second):
+ return errors.New("expected to receive a response")
+ }
+ connected = true
+ break loop
+ } else {
+ // ensure that a failing dial attempt happened but didn't succeed
+ select {
+ case <-recvCh:
+ case <-resch:
+ return errors.New("didn't expect a response")
+ // High timeout here is okay. We will exit whenever the other branch
+ // is triggered
+ case <-time.After(10 * time.Second):
+ return errors.New("didn't receive a dial attempt notification")
+ }
+ failDials[a] = dialState{
+ ch: ds.ch,
+ failAt: cl.Now().Add(ds.failAfter),
+ addr: *a,
+ delay: ds.delay,
+ }
+ }
+ delete(allDials, a)
+ }
+ }
+ // check for unexpected dials
+ select {
+ case <-recvCh:
+ return errors.New("no dial should have succeeded at this instant")
+ default:
+ }
+
+ // advance the clock
+ cl.AdvanceBy(10 * time.Millisecond)
+ // nothing more to do. exit
+ if len(failDials) == 0 && len(allDials) == 0 {
+ break
+ }
+ }
+
+ if connected {
+ // ensure we don't receive any extra connections
+ select {
+ case <-recvCh:
+ return errors.New("didn't expect a dial attempt")
+ case <-time.After(100 * time.Millisecond):
+ }
+ } else {
+ // ensure that we do receive the final error response
+ select {
+ case r := <-resch:
+ require.Error(t, r.err)
+ case <-time.After(100 * time.Millisecond):
+ return errors.New("expected to receive response")
+ }
+ }
+ // check if this test didn't take too much time
+ if cl.Now().Sub(st) > tc.maxDuration {
+ return fmt.Errorf("expected test to finish early: expected %d, took: %d", tc.maxDuration, cl.Now().Sub(st))
+ }
+ return nil
+}
+
+// makeRanker takes a slice of timedDial objects and returns a DialRanker
+// which will trigger dials to addresses at the specified delays in the timedDials
+func makeRanker(tc []timedDial) network.DialRanker {
+ return func(_ []ma.Multiaddr) []network.AddrDelay {
+ res := make([]network.AddrDelay, len(tc))
+ for i := 0; i < len(tc); i++ {
+ res[i] = network.AddrDelay{Addr: tc[i].addr, Delay: tc[i].delay}
+ }
+ return res
+ }
+}
+
+// TestCheckDialWorkerLoopScheduling will check the checker
+func TestCheckDialWorkerLoopScheduling(t *testing.T) {
+ addrs := make([]ma.Multiaddr, 0)
+ for i := 0; i < 10; i++ {
+ for {
+ p := 20000 + i
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)))
+ break
+ }
+ }
+
+ tc := schedulingTestCase{
+ input: []timedDial{
+ {
+ addr: addrs[1],
+ delay: 0,
+ success: true,
+ },
+ {
+ addr: addrs[0],
+ delay: 100 * time.Millisecond,
+ success: false,
+ failAfter: 50 * time.Millisecond,
+ },
+ },
+ maxDuration: 20 * time.Millisecond,
+ }
+ s1 := makeSwarmWithNoListenAddrs(t)
+ s2 := makeSwarmWithNoListenAddrs(t)
+ // valid ranking logic, so it shouldn't error
+ s1.dialRanker = makeRanker(tc.input)
+ err := checkDialWorkerLoopScheduling(t, s1, s2, tc)
+ require.NoError(t, err)
+ // close swarms to remove address binding
+ s1.Close()
+ s2.Close()
+
+ s3 := makeSwarmWithNoListenAddrs(t)
+ defer s3.Close()
+ s4 := makeSwarmWithNoListenAddrs(t)
+ defer s4.Close()
+ // invalid ranking logic to trigger an error
+ s3.dialRanker = NoDelayDialRanker
+ err = checkDialWorkerLoopScheduling(t, s3, s4, tc)
+ require.Error(t, err)
+}
+
+func TestDialWorkerLoopRanking(t *testing.T) {
+ addrs := make([]ma.Multiaddr, 0)
+ for i := 0; i < 10; i++ {
+ for {
+ p := 20000 + i
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)))
+ break
+ }
+ }
+
+ testcases := []schedulingTestCase{
+ {
+ name: "first success",
+ input: []timedDial{
+ {
+ addr: addrs[1],
+ delay: 0,
+ success: true,
+ },
+ {
+ addr: addrs[0],
+ delay: 100 * time.Millisecond,
+ success: false,
+ failAfter: 50 * time.Millisecond,
+ },
+ },
+ maxDuration: 20 * time.Millisecond,
+ },
+ {
+ name: "delayed dials",
+ input: []timedDial{
+ {
+ addr: addrs[0],
+ delay: 0,
+ success: false,
+ failAfter: 200 * time.Millisecond,
+ },
+ {
+ addr: addrs[1],
+ delay: 100 * time.Millisecond,
+ success: false,
+ failAfter: 100 * time.Millisecond,
+ },
+ {
+ addr: addrs[2],
+ delay: 300 * time.Millisecond,
+ success: false,
+ failAfter: 100 * time.Millisecond,
+ },
+ {
+ addr: addrs[3],
+ delay: 2 * time.Second,
+ success: true,
+ },
+ {
+ addr: addrs[4],
+ delay: 2*time.Second + 1*time.Millisecond,
+ success: false, // this call will never happened
+ failAfter: 100 * time.Millisecond,
+ },
+ },
+ maxDuration: 310 * time.Millisecond,
+ },
+ {
+ name: "failed dials",
+ input: []timedDial{
+ {
+ addr: addrs[0],
+ delay: 0,
+ success: false,
+ failAfter: 105 * time.Millisecond,
+ },
+ {
+ addr: addrs[1],
+ delay: 100 * time.Millisecond,
+ success: false,
+ failAfter: 20 * time.Millisecond,
+ },
+ },
+ maxDuration: 200 * time.Millisecond,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ s1 := makeSwarmWithNoListenAddrs(t)
+ defer s1.Close()
+ s2 := makeSwarmWithNoListenAddrs(t)
+ defer s2.Close()
+ // setup the ranker to trigger dials according to the test case
+ s1.dialRanker = makeRanker(tc.input)
+ err := checkDialWorkerLoopScheduling(t, s1, s2, tc)
+ if err != nil {
+ t.Error(err)
+ }
+ })
+ }
+}
+
+func TestDialWorkerLoopSchedulingProperty(t *testing.T) {
+ f := func(tc schedulingTestCase) bool {
+ s1 := makeSwarmWithNoListenAddrs(t)
+ defer s1.Close()
+ // ignore limiter delays just check scheduling
+ s1.limiter.perPeerLimit = 10000
+ s2 := makeSwarmWithNoListenAddrs(t)
+ defer s2.Close()
+ // setup the ranker to trigger dials according to the test case
+ s1.dialRanker = makeRanker(tc.input)
+ err := checkDialWorkerLoopScheduling(t, s1, s2, tc)
+ if err != nil {
+ t.Log(err)
+ }
+ return err == nil
+ }
+
+ if err := quick.Check(f, &quick.Config{MaxCount: 50}); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestDialWorkerLoopQuicOverTCP(t *testing.T) {
+ tc := schedulingTestCase{
+ input: []timedDial{
+ {
+ addr: ma.StringCast("/ip4/127.0.0.1/udp/20000/quic-v1"),
+ delay: 0,
+ success: true,
+ },
+ {
+ addr: ma.StringCast("/ip4/127.0.0.1/tcp/20000"),
+ delay: 30 * time.Millisecond,
+ success: true,
+ },
+ },
+ maxDuration: 20 * time.Millisecond,
+ }
+ s1 := makeSwarmWithNoListenAddrs(t)
+ defer s1.Close()
+
+ s2 := makeSwarmWithNoListenAddrs(t)
+ defer s2.Close()
+
+ // we use the default ranker here
+
+ err := checkDialWorkerLoopScheduling(t, s1, s2, tc)
+ require.NoError(t, err)
+}
+
+func TestDialWorkerLoopHolePunching(t *testing.T) {
+ s1 := makeSwarmWithNoListenAddrs(t)
+ defer s1.Close()
+
+ s2 := makeSwarmWithNoListenAddrs(t)
+ defer s2.Close()
+
+ // t1 will accept and keep the other end waiting
+ t1 := ma.StringCast("/ip4/127.0.0.1/tcp/10000")
+ recvCh := make(chan struct{})
+ list, ch := makeTCPListener(t, t1, recvCh) // ignore ch because we want to hang forever
+ defer list.Close()
+ defer func() { ch <- struct{}{} }() // close listener
+
+ // t2 will succeed
+ t2 := ma.StringCast("/ip4/127.0.0.1/tcp/10001")
+
+ err := s2.AddListenAddr(t2)
+ if err != nil {
+ t.Error(err)
+ }
+
+ s1.dialRanker = func(addrs []ma.Multiaddr) (res []network.AddrDelay) {
+ res = make([]network.AddrDelay, len(addrs))
+ for i := 0; i < len(addrs); i++ {
+ delay := 10 * time.Second
+ if addrs[i].Equal(t1) {
+ // fire t1 immediately
+ delay = 0
+ } else if addrs[i].Equal(t2) {
+ // delay t2 by 100ms
+ // without holepunch this call will not happen
+ delay = 100 * time.Millisecond
+ }
+ res[i] = network.AddrDelay{Addr: addrs[i], Delay: delay}
+ }
+ return
+ }
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{t1, t2}, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ resch := make(chan dialResponse, 2)
+
+ cl := newMockClock()
+ worker := newDialWorker(s1, s2.LocalPeer(), reqch, cl)
+ go worker.loop()
+ defer worker.wg.Wait()
+ defer close(reqch)
+
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+ <-recvCh // received connection on t1
+
+ select {
+ case <-resch:
+ t.Errorf("didn't expect connection to succeed")
+ case <-time.After(100 * time.Millisecond):
+ }
+
+ hpCtx := network.WithSimultaneousConnect(context.Background(), true, "testing")
+ // with holepunch request, t2 will be dialed immediately
+ reqch <- dialRequest{ctx: hpCtx, resch: resch}
+ select {
+ case r := <-resch:
+ require.NoError(t, r.err)
+ case <-time.After(5 * time.Second):
+ t.Errorf("expected conn to succeed")
+ }
+
+ select {
+ case r := <-resch:
+ require.NoError(t, r.err)
+ case <-time.After(5 * time.Second):
+ t.Errorf("expected conn to succeed")
+ }
+}
+
+func TestDialWorkerLoopAddrDedup(t *testing.T) {
+ s1 := makeSwarm(t)
+ s2 := makeSwarm(t)
+ defer s1.Close()
+ defer s2.Close()
+ t1 := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000))
+ t2 := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10000))
+
+ // acceptAndClose accepts a connection and closes it
+ acceptAndClose := func(a ma.Multiaddr, ch chan struct{}, closech chan struct{}) {
+ list, err := manet.Listen(a)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ go func() {
+ ch <- struct{}{}
+ for {
+ conn, err := list.Accept()
+ if err != nil {
+ return
+ }
+ ch <- struct{}{}
+ conn.Close()
+ }
+ }()
+ <-closech
+ list.Close()
+ }
+ ch := make(chan struct{}, 1)
+ closeCh := make(chan struct{})
+ go acceptAndClose(t1, ch, closeCh)
+ defer close(closeCh)
+ <-ch // the routine has started listening on addr
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{t1}, peerstore.PermanentAddrTTL)
+
+ reqch := make(chan dialRequest)
+ resch := make(chan dialResponse, 2)
+
+ worker := newDialWorker(s1, s2.LocalPeer(), reqch, nil)
+ go worker.loop()
+ defer worker.wg.Wait()
+ defer close(reqch)
+
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+ <-ch
+ <-resch
+ // Need to clear backoff otherwise the dial attempt would not be made
+ s1.Backoff().Clear(s2.LocalPeer())
+
+ s1.Peerstore().ClearAddrs(s2.LocalPeer())
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{t2}, peerstore.PermanentAddrTTL)
+
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+ select {
+ case r := <-resch:
+ require.Error(t, r.err)
+ case <-ch:
+ t.Errorf("didn't expect a connection attempt")
+ case <-time.After(5 * time.Second):
+ t.Errorf("expected a fail response")
+ }
+}
+
+func TestDialWorkerLoopTCPConnUpgradeWait(t *testing.T) {
+ s1 := makeSwarmWithNoListenAddrs(t, WithDialTimeout(10*time.Second))
+ s2 := makeSwarmWithNoListenAddrs(t, WithDialTimeout(10*time.Second))
+ defer s1.Close()
+ defer s2.Close()
+ // Connection to a1 will fail but a1 is a public address so we can test waiting for tcp
+ // connection established dial update. ipv4only.arpa reserved address.
+ a1 := ma.StringCast(fmt.Sprintf("/ip4/192.0.0.170/tcp/%d", 10001))
+ // Connection to a2 will succeed.
+ a2 := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 10002))
+ s2.Listen(a2)
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{a1, a2}, peerstore.PermanentAddrTTL)
+
+ rankerCalled := make(chan struct{})
+ s1.dialRanker = func(_ []ma.Multiaddr) []network.AddrDelay {
+ defer close(rankerCalled)
+ return []network.AddrDelay{{Addr: a1, Delay: 0}, {Addr: a2, Delay: 100 * time.Millisecond}}
+ }
+
+ reqch := make(chan dialRequest)
+ resch := make(chan dialResponse, 2)
+ cl := newMockClock()
+ worker := newDialWorker(s1, s2.LocalPeer(), reqch, cl)
+ go worker.loop()
+ defer worker.wg.Wait()
+ defer close(reqch)
+
+ reqch <- dialRequest{ctx: context.Background(), resch: resch}
+
+ <-rankerCalled
+ // Wait a bit to let the loop make the dial attempt to a1
+ time.Sleep(1 * time.Second)
+ // Send conn established for a1
+ worker.resch <- transport.DialUpdate{Kind: transport.UpdateKindHandshakeProgressed, Addr: a1}
+ // Dial to a2 shouldn't happen even if a2 is scheduled to dial by now
+ cl.AdvanceBy(200 * time.Millisecond)
+ select {
+ case r := <-resch:
+ t.Fatalf("didn't expect any event on resch %s %s", r.err, r.conn)
+ case <-time.After(500 * time.Millisecond):
+ }
+
+ // Dial to a2 should happen now
+ // This number is high because there's a race between this goroutine advancing the clock
+ // and the worker loop goroutine processing the TCPConnectionEstablished event.
+ // In case it processes the event after the previous clock advancement we need to wait
+ // 2 * PublicTCPDelay.
+ cl.AdvanceBy(2 * PublicTCPDelay)
+ select {
+ case r := <-resch:
+ require.NoError(t, r.err)
+ require.NotNil(t, r.conn)
+ case <-time.After(3 * time.Second):
+ t.Errorf("expected a fail response")
+ }
+}
+
+func BenchmarkDialRanker(b *testing.B) {
+ const N = 10000
+
+ benchDialQueue := func(adelays []network.AddrDelay) {
+ dq := newDialQueue()
+ for _, a := range adelays {
+ dq.Add(a)
+ }
+ for {
+ batch := dq.NextBatch()
+ if len(batch) == 0 {
+ return
+ }
+ }
+ }
+ addrs := make([]ma.Multiaddr, N)
+ for i := 0; i < N; i++ {
+ addrs[i] = ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", i))
+ }
+
+ b.Run("equal delay", func(b *testing.B) {
+ b.ReportAllocs()
+ addrDelays := make([]network.AddrDelay, N)
+ for i := 0; i < N; i++ {
+ addrDelays[i] = network.AddrDelay{
+ Addr: addrs[i],
+ Delay: 0,
+ }
+ }
+ for i := 0; i < b.N; i++ {
+ benchDialQueue(addrDelays)
+ }
+ })
+ b.Run("sorted delay", func(b *testing.B) {
+ b.ReportAllocs()
+ addrDelays := make([]network.AddrDelay, N)
+ for i := 0; i < N; i++ {
+ addrDelays[i] = network.AddrDelay{
+ Addr: addrs[i],
+ Delay: time.Millisecond * time.Duration(i),
+ }
+ }
+ for i := 0; i < b.N; i++ {
+ benchDialQueue(addrDelays)
+ }
+ })
+}
diff --git a/p2p/net/swarm/limiter.go b/p2p/net/swarm/limiter.go
new file mode 100644
index 0000000000..aef5580f08
--- /dev/null
+++ b/p2p/net/swarm/limiter.go
@@ -0,0 +1,240 @@
+package swarm
+
+import (
+ "context"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+type dialJob struct {
+ addr ma.Multiaddr
+ peer peer.ID
+ ctx context.Context
+ resp chan transport.DialUpdate
+ timeout time.Duration
+}
+
+func (dj *dialJob) cancelled() bool {
+ return dj.ctx.Err() != nil
+}
+
+type dialLimiter struct {
+ lk sync.Mutex
+
+ fdConsuming int
+ fdLimit int
+ waitingOnFd []*dialJob
+
+ dialFunc dialfunc
+
+ activePerPeer map[peer.ID]int
+ perPeerLimit int
+ waitingOnPeerLimit map[peer.ID][]*dialJob
+}
+
+type dialfunc func(context.Context, peer.ID, ma.Multiaddr, chan<- transport.DialUpdate) (transport.CapableConn, error)
+
+func newDialLimiter(df dialfunc) *dialLimiter {
+ fd := ConcurrentFdDials
+ if env := os.Getenv("LIBP2P_SWARM_FD_LIMIT"); env != "" {
+ if n, err := strconv.ParseInt(env, 10, 32); err == nil {
+ fd = int(n)
+ }
+ }
+ return newDialLimiterWithParams(df, fd, DefaultPerPeerRateLimit)
+}
+
+func newDialLimiterWithParams(df dialfunc, fdLimit, perPeerLimit int) *dialLimiter {
+ return &dialLimiter{
+ fdLimit: fdLimit,
+ perPeerLimit: perPeerLimit,
+ waitingOnPeerLimit: make(map[peer.ID][]*dialJob),
+ activePerPeer: make(map[peer.ID]int),
+ dialFunc: df,
+ }
+}
+
+// freeFDToken frees FD token and if there are any schedules another waiting dialJob
+// in it's place
+func (dl *dialLimiter) freeFDToken() {
+ log.Debug("[limiter] freeing FD token", "waiting", len(dl.waitingOnFd), "fd_consuming", dl.fdConsuming)
+ dl.fdConsuming--
+
+ for len(dl.waitingOnFd) > 0 {
+ next := dl.waitingOnFd[0]
+ dl.waitingOnFd[0] = nil // clear out memory
+ dl.waitingOnFd = dl.waitingOnFd[1:]
+
+ if len(dl.waitingOnFd) == 0 {
+ // clear out memory.
+ dl.waitingOnFd = nil
+ }
+
+ // Skip over canceled dials instead of queuing up a goroutine.
+ if next.cancelled() {
+ dl.freePeerToken(next)
+ continue
+ }
+ dl.fdConsuming++
+
+ // we already have activePerPeer token at this point so we can just dial
+ go dl.executeDial(next)
+ return
+ }
+}
+
+func (dl *dialLimiter) freePeerToken(dj *dialJob) {
+ log.Debug("[limiter] freeing peer token",
+ "peer", dj.peer,
+ "addr", dj.addr,
+ "active_for_peer", dl.activePerPeer[dj.peer],
+ "waiting_on_peer_limit", len(dl.waitingOnPeerLimit[dj.peer]))
+ // release tokens in reverse order than we take them
+ dl.activePerPeer[dj.peer]--
+ if dl.activePerPeer[dj.peer] == 0 {
+ delete(dl.activePerPeer, dj.peer)
+ }
+
+ waitlist := dl.waitingOnPeerLimit[dj.peer]
+ for len(waitlist) > 0 {
+ next := waitlist[0]
+ waitlist[0] = nil // clear out memory
+ waitlist = waitlist[1:]
+
+ if len(waitlist) == 0 {
+ delete(dl.waitingOnPeerLimit, next.peer)
+ } else {
+ dl.waitingOnPeerLimit[next.peer] = waitlist
+ }
+
+ if next.cancelled() {
+ continue
+ }
+
+ dl.activePerPeer[next.peer]++ // just kidding, we still want this token
+
+ dl.addCheckFdLimit(next)
+ return
+ }
+}
+
+func (dl *dialLimiter) finishedDial(dj *dialJob) {
+ dl.lk.Lock()
+ defer dl.lk.Unlock()
+ if dl.shouldConsumeFd(dj.addr) {
+ dl.freeFDToken()
+ }
+
+ dl.freePeerToken(dj)
+}
+
+func (dl *dialLimiter) shouldConsumeFd(addr ma.Multiaddr) bool {
+ // we don't consume FD's for relay addresses for now as they will be consumed when the Relay Transport
+ // actually dials the Relay server. That dial call will also pass through this limiter with
+ // the address of the relay server i.e. non-relay address.
+ _, err := addr.ValueForProtocol(ma.P_CIRCUIT)
+
+ isRelay := err == nil
+
+ return !isRelay && isFdConsumingAddr(addr)
+}
+
+func (dl *dialLimiter) addCheckFdLimit(dj *dialJob) {
+ if dl.shouldConsumeFd(dj.addr) {
+ if dl.fdConsuming >= dl.fdLimit {
+ log.Debug("[limiter] blocked dial waiting on FD token",
+ "peer", dj.peer,
+ "addr", dj.addr,
+ "fd_consuming", dl.fdConsuming,
+ "fd_limit", dl.fdLimit,
+ "waiting", len(dl.waitingOnFd))
+ dl.waitingOnFd = append(dl.waitingOnFd, dj)
+ return
+ }
+
+ log.Debug("[limiter] taking FD token",
+ "peer", dj.peer,
+ "addr", dj.addr,
+ "prev_consuming", dl.fdConsuming)
+ // take token
+ dl.fdConsuming++
+ }
+
+ log.Debug("[limiter] executing dial",
+ "peer", dj.peer,
+ "addr", dj.addr,
+ "fd_consuming", dl.fdConsuming,
+ "waiting", len(dl.waitingOnFd))
+ go dl.executeDial(dj)
+}
+
+func (dl *dialLimiter) addCheckPeerLimit(dj *dialJob) {
+ if dl.activePerPeer[dj.peer] >= dl.perPeerLimit {
+ log.Debug("[limiter] blocked dial waiting on peer limit",
+ "peer", dj.peer,
+ "addr", dj.addr,
+ "active", dl.activePerPeer[dj.peer],
+ "peer_limit", dl.perPeerLimit,
+ "waiting", len(dl.waitingOnPeerLimit[dj.peer]))
+ wlist := dl.waitingOnPeerLimit[dj.peer]
+ dl.waitingOnPeerLimit[dj.peer] = append(wlist, dj)
+ return
+ }
+ dl.activePerPeer[dj.peer]++
+
+ dl.addCheckFdLimit(dj)
+}
+
+// AddDialJob tries to take the needed tokens for starting the given dial job.
+// If it acquires all needed tokens, it immediately starts the dial, otherwise
+// it will put it on the waitlist for the requested token.
+func (dl *dialLimiter) AddDialJob(dj *dialJob) {
+ dl.lk.Lock()
+ defer dl.lk.Unlock()
+
+ log.Debug("[limiter] adding a dial job through limiter", "addr", dj.addr)
+ dl.addCheckPeerLimit(dj)
+}
+
+func (dl *dialLimiter) clearAllPeerDials(p peer.ID) {
+ dl.lk.Lock()
+ defer dl.lk.Unlock()
+ delete(dl.waitingOnPeerLimit, p)
+ log.Debug("[limiter] clearing all peer dials", "peer", p)
+ // NB: the waitingOnFd list doesn't need to be cleaned out here, we will
+ // remove them as we encounter them because they are 'cancelled' at this
+ // point
+}
+
+// executeDial calls the dialFunc, and reports the result through the response
+// channel when finished. Once the response is sent it also releases all tokens
+// it held during the dial.
+func (dl *dialLimiter) executeDial(j *dialJob) {
+ defer dl.finishedDial(j)
+ if j.cancelled() {
+ return
+ }
+
+ dctx, cancel := context.WithTimeout(j.ctx, j.timeout)
+ defer cancel()
+
+ con, err := dl.dialFunc(dctx, j.peer, j.addr, j.resp)
+ kind := transport.UpdateKindDialSuccessful
+ if err != nil {
+ kind = transport.UpdateKindDialFailed
+ }
+ select {
+ case j.resp <- transport.DialUpdate{Kind: kind, Conn: con, Addr: j.addr, Err: err}:
+ case <-j.ctx.Done():
+ if con != nil {
+ con.Close()
+ }
+ }
+}
diff --git a/p2p/net/swarm/limiter_test.go b/p2p/net/swarm/limiter_test.go
new file mode 100644
index 0000000000..7a1772ba54
--- /dev/null
+++ b/p2p/net/swarm/limiter_test.go
@@ -0,0 +1,397 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ mafmt "github.com/multiformats/go-multiaddr-fmt"
+)
+
+func addrWithPort(p int) ma.Multiaddr {
+ return ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p))
+}
+
+// in these tests I use addresses with tcp ports over a certain number to
+// signify 'good' addresses that will succeed, and addresses below that number
+// will fail. This lets us more easily test these different scenarios.
+func tcpPortOver(a ma.Multiaddr, n int) bool {
+ port, err := a.ValueForProtocol(ma.P_TCP)
+ if err != nil {
+ panic(err)
+ }
+
+ pnum, err := strconv.Atoi(port)
+ if err != nil {
+ panic(err)
+ }
+
+ return pnum > n
+}
+
+func tryDialAddrs(ctx context.Context, l *dialLimiter, p peer.ID, addrs []ma.Multiaddr, res chan transport.DialUpdate) {
+ for _, a := range addrs {
+ l.AddDialJob(&dialJob{
+ ctx: ctx,
+ peer: p,
+ addr: a,
+ resp: res,
+ })
+ }
+}
+
+func hangDialFunc(hang chan struct{}) dialfunc {
+ return func(_ context.Context, _ peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ if mafmt.UTP.Matches(a) {
+ return transport.CapableConn(nil), nil
+ }
+
+ _, err := a.ValueForProtocol(ma.P_CIRCUIT)
+ if err == nil {
+ return transport.CapableConn(nil), nil
+ }
+
+ if tcpPortOver(a, 10) {
+ return transport.CapableConn(nil), nil
+ }
+
+ <-hang
+ return nil, fmt.Errorf("test bad dial")
+ }
+}
+
+func TestLimiterBasicDials(t *testing.T) {
+ hang := make(chan struct{})
+ defer close(hang)
+
+ l := newDialLimiterWithParams(hangDialFunc(hang), ConcurrentFdDials, 4)
+
+ bads := []ma.Multiaddr{addrWithPort(1), addrWithPort(2), addrWithPort(3), addrWithPort(4)}
+ good := addrWithPort(20)
+
+ resch := make(chan transport.DialUpdate)
+ pid := peer.ID("testpeer")
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ tryDialAddrs(ctx, l, pid, bads, resch)
+
+ l.AddDialJob(&dialJob{
+ ctx: ctx,
+ peer: pid,
+ addr: good,
+ resp: resch,
+ })
+
+ select {
+ case <-resch:
+ t.Fatal("no dials should have completed!")
+ case <-time.After(time.Millisecond * 100):
+ }
+
+ // complete a single hung dial
+ hang <- struct{}{}
+
+ select {
+ case r := <-resch:
+ if r.Err == nil {
+ t.Fatal("should have gotten failed dial result")
+ }
+ case <-time.After(time.Second):
+ t.Fatal("timed out waiting for dial completion")
+ }
+
+ select {
+ case r := <-resch:
+ if r.Err != nil {
+ t.Fatal("expected second result to be success!")
+ }
+ case <-time.After(time.Second):
+ }
+}
+
+func TestFDLimiting(t *testing.T) {
+ hang := make(chan struct{})
+ defer close(hang)
+ l := newDialLimiterWithParams(hangDialFunc(hang), 16, 5)
+
+ bads := []ma.Multiaddr{addrWithPort(1), addrWithPort(2), addrWithPort(3), addrWithPort(4)}
+ pids := []peer.ID{"testpeer1", "testpeer2", "testpeer3", "testpeer4"}
+ goodTCP := addrWithPort(20)
+
+ ctx := context.Background()
+ resch := make(chan transport.DialUpdate)
+
+ // take all fd limit tokens with hang dials
+ for _, pid := range pids {
+ tryDialAddrs(ctx, l, pid, bads, resch)
+ }
+
+ // these dials should work normally, but will hang because we have taken
+ // up all the fd limiting
+ for _, pid := range pids {
+ l.AddDialJob(&dialJob{
+ ctx: ctx,
+ peer: pid,
+ addr: goodTCP,
+ resp: resch,
+ })
+ }
+
+ select {
+ case <-resch:
+ t.Fatal("no dials should have completed!")
+ case <-time.After(time.Millisecond * 100):
+ }
+
+ pid5 := peer.ID("testpeer5")
+ utpaddr := ma.StringCast("/ip4/127.0.0.1/udp/7777/utp")
+
+ // This should complete immediately since utp addresses arent blocked by fd rate limiting
+ l.AddDialJob(&dialJob{ctx: ctx, peer: pid5, addr: utpaddr, resp: resch})
+
+ select {
+ case res := <-resch:
+ if res.Err != nil {
+ t.Fatal("should have gotten successful response")
+ }
+ case <-time.After(time.Second * 5):
+ t.Fatal("timeout waiting for utp addr success")
+ }
+
+ // A relay address with tcp transport will complete because we do not consume fds for dials
+ // with relay addresses as the fd will be consumed when we actually dial the relay server.
+ pid6 := test.RandPeerIDFatal(t)
+ relayAddr := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/20/p2p-circuit/p2p/%s", pid6))
+ l.AddDialJob(&dialJob{ctx: ctx, peer: pid6, addr: relayAddr, resp: resch})
+
+ select {
+ case res := <-resch:
+ if res.Err != nil {
+ t.Fatal("should have gotten successful response")
+ }
+ case <-time.After(time.Second * 5):
+ t.Fatal("timeout waiting for relay addr success")
+ }
+}
+
+func TestTokenRedistribution(t *testing.T) {
+ var lk sync.Mutex
+ hangchs := make(map[peer.ID]chan struct{})
+ df := func(_ context.Context, p peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ if tcpPortOver(a, 10) {
+ return (transport.CapableConn)(nil), nil
+ }
+
+ lk.Lock()
+ ch := hangchs[p]
+ lk.Unlock()
+ <-ch
+ return nil, fmt.Errorf("test bad dial")
+ }
+ l := newDialLimiterWithParams(df, 8, 4)
+
+ bads := []ma.Multiaddr{addrWithPort(1), addrWithPort(2), addrWithPort(3), addrWithPort(4)}
+ pids := []peer.ID{"testpeer1", "testpeer2"}
+
+ ctx := context.Background()
+ resch := make(chan transport.DialUpdate)
+
+ // take all fd limit tokens with hang dials
+ for _, pid := range pids {
+ hangchs[pid] = make(chan struct{})
+ }
+
+ for _, pid := range pids {
+ tryDialAddrs(ctx, l, pid, bads, resch)
+ }
+
+ // add a good dial job for peer 1
+ l.AddDialJob(&dialJob{
+ ctx: ctx,
+ peer: pids[1],
+ addr: ma.StringCast("/ip4/127.0.0.1/tcp/1001"),
+ resp: resch,
+ })
+
+ select {
+ case <-resch:
+ t.Fatal("no dials should have completed!")
+ case <-time.After(time.Millisecond * 100):
+ }
+
+ // unblock one dial for peer 0
+ hangchs[pids[0]] <- struct{}{}
+
+ select {
+ case res := <-resch:
+ if res.Err == nil {
+ t.Fatal("should have only been a failure here")
+ }
+ case <-time.After(time.Millisecond * 100):
+ t.Fatal("expected a dial failure here")
+ }
+
+ select {
+ case <-resch:
+ t.Fatal("no more dials should have completed!")
+ case <-time.After(time.Millisecond * 100):
+ }
+
+ // add a bad dial job to peer 0 to fill their rate limiter
+ // and test that more dials for this peer won't interfere with peer 1's successful dial incoming
+ l.AddDialJob(&dialJob{
+ ctx: ctx,
+ peer: pids[0],
+ addr: addrWithPort(7),
+ resp: resch,
+ })
+
+ hangchs[pids[1]] <- struct{}{}
+
+ // now one failed dial from peer 1 should get through and fail
+ // which will in turn unblock the successful dial on peer 1
+ select {
+ case res := <-resch:
+ if res.Err == nil {
+ t.Fatal("should have only been a failure here")
+ }
+ case <-time.After(time.Millisecond * 100):
+ t.Fatal("expected a dial failure here")
+ }
+
+ select {
+ case res := <-resch:
+ if res.Err != nil {
+ t.Fatal("should have succeeded!")
+ }
+ case <-time.After(time.Millisecond * 100):
+ t.Fatal("should have gotten successful dial")
+ }
+}
+
+func TestStressLimiter(t *testing.T) {
+ df := func(_ context.Context, _ peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ if tcpPortOver(a, 1000) {
+ return transport.CapableConn(nil), nil
+ }
+
+ time.Sleep(time.Millisecond * time.Duration(5+rand.Intn(100)))
+ return nil, fmt.Errorf("test bad dial")
+ }
+
+ l := newDialLimiterWithParams(df, 20, 5)
+
+ var bads []ma.Multiaddr
+ for i := 0; i < 100; i++ {
+ bads = append(bads, addrWithPort(i))
+ }
+
+ addresses := append(bads, addrWithPort(2000))
+ success := make(chan struct{})
+
+ for i := 0; i < 20; i++ {
+ go func(id peer.ID) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resp := make(chan transport.DialUpdate)
+ time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond)
+ for _, i := range rand.Perm(len(addresses)) {
+ l.AddDialJob(&dialJob{
+ addr: addresses[i],
+ ctx: ctx,
+ peer: id,
+ resp: resp,
+ })
+ }
+
+ for res := range resp {
+ if res.Err == nil {
+ success <- struct{}{}
+ return
+ }
+ }
+ }(peer.ID(fmt.Sprintf("testpeer%d", i)))
+ }
+
+ for i := 0; i < 20; i++ {
+ select {
+ case <-success:
+ case <-time.After(time.Minute):
+ t.Fatal("expected a success within five seconds")
+ }
+ }
+}
+
+func TestFDLimitUnderflow(t *testing.T) {
+ df := func(ctx context.Context, _ peer.ID, _ ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ select {
+ case <-ctx.Done():
+ case <-time.After(5 * time.Second):
+ }
+ return nil, fmt.Errorf("df timed out")
+ }
+
+ const fdLimit = 20
+ l := newDialLimiterWithParams(df, fdLimit, 3)
+
+ var addrs []ma.Multiaddr
+ for i := 0; i <= 1000; i++ {
+ addrs = append(addrs, addrWithPort(i))
+ }
+
+ wg := sync.WaitGroup{}
+ const num = 3 * fdLimit
+ wg.Add(num)
+ errs := make(chan error, num)
+ for i := 0; i < num; i++ {
+ go func(id peer.ID, i int) {
+ defer wg.Done()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ resp := make(chan transport.DialUpdate)
+ l.AddDialJob(&dialJob{
+ addr: addrs[i],
+ ctx: ctx,
+ peer: id,
+ resp: resp,
+ })
+
+ for res := range resp {
+ if res.Err != nil {
+ return
+ }
+ errs <- errors.New("got dial res, but shouldn't")
+ }
+ }(peer.ID(fmt.Sprintf("testpeer%d", i%20)), i)
+ }
+
+ go func() {
+ wg.Wait()
+ close(errs)
+ }()
+
+ for err := range errs {
+ t.Fatal(err)
+ }
+
+ l.lk.Lock()
+ fdConsuming := l.fdConsuming
+ l.lk.Unlock()
+
+ if fdConsuming < 0 {
+ t.Fatalf("l.fdConsuming < 0")
+ }
+}
diff --git a/p2p/net/swarm/peers_test.go b/p2p/net/swarm/peers_test.go
new file mode 100644
index 0000000000..20b522fb9f
--- /dev/null
+++ b/p2p/net/swarm/peers_test.go
@@ -0,0 +1,63 @@
+package swarm_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ . "github.com/libp2p/go-libp2p/p2p/net/swarm"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPeers(t *testing.T) {
+ ctx := context.Background()
+ swarms := makeSwarms(t, 2)
+ s1 := swarms[0]
+ s2 := swarms[1]
+
+ connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
+ // TODO: make a DialAddr func.
+ s.Peerstore().AddAddr(dst, addr, peerstore.PermanentAddrTTL)
+ // t.Logf("connections from %s", s.LocalPeer())
+ // for _, c := range s.ConnsToPeer(dst) {
+ // t.Logf("connection from %s to %s: %v", s.LocalPeer(), dst, c)
+ // }
+ // t.Logf("")
+ if _, err := s.DialPeer(ctx, dst); err != nil {
+ t.Fatal("error swarm dialing to peer", err)
+ }
+ // t.Log(s.swarm.Dump())
+ }
+
+ connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0])
+ require.Eventually(t, func() bool { return len(s2.Peers()) > 0 }, 3*time.Second, 50*time.Millisecond)
+ connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0])
+
+ for i := 0; i < 100; i++ {
+ connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0])
+ connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0])
+ }
+
+ for _, s := range swarms {
+ log.Info("swarm routing table", "peer", s.LocalPeer(), "peers", s.Peers())
+ }
+
+ test := func(s *Swarm) {
+ expect := 1
+ actual := len(s.Peers())
+ if actual != expect {
+ t.Errorf("%s has %d peers, not %d: %v", s.LocalPeer(), actual, expect, s.Peers())
+ }
+ actual = len(s.Conns())
+ if actual != expect {
+ t.Errorf("%s has %d conns, not %d: %v", s.LocalPeer(), actual, expect, s.Conns())
+ }
+ }
+
+ test(s1)
+ test(s2)
+}
diff --git a/p2p/net/swarm/resolve_test.go b/p2p/net/swarm/resolve_test.go
new file mode 100644
index 0000000000..1921e9433d
--- /dev/null
+++ b/p2p/net/swarm/resolve_test.go
@@ -0,0 +1,120 @@
+package swarm
+
+import (
+ "context"
+ "net"
+ "strconv"
+ "testing"
+
+ "github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSwarmResolver(t *testing.T) {
+ mockResolver := madns.MockResolver{IP: make(map[string][]net.IPAddr)}
+ ipaddr, err := net.ResolveIPAddr("ip4", "127.0.0.1")
+ require.NoError(t, err)
+ mockResolver.IP["example.com"] = []net.IPAddr{*ipaddr}
+ mockResolver.TXT = map[string][]string{
+ "_dnsaddr.example.com": {"dnsaddr=/ip4/127.0.0.1"},
+ }
+ madnsResolver, err := madns.NewResolver(madns.WithDomainResolver("example.com", &mockResolver))
+ require.NoError(t, err)
+ swarmResolver := ResolverFromMaDNS{madnsResolver}
+
+ ctx := context.Background()
+ res, err := swarmResolver.ResolveDNSComponent(ctx, multiaddr.StringCast("/dns/example.com"), 10)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, "/ip4/127.0.0.1", res[0].String())
+
+ res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/example.com"), 1, 10)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, "/ip4/127.0.0.1", res[0].String())
+
+ t.Run("Test Limits", func(t *testing.T) {
+ var ipaddrs []net.IPAddr
+ var manyDNSAddrs []string
+ for i := 0; i < 255; i++ {
+ ip := "1.2.3." + strconv.Itoa(i)
+ ipaddrs = append(ipaddrs, net.IPAddr{IP: net.ParseIP(ip)})
+ manyDNSAddrs = append(manyDNSAddrs, "dnsaddr=/ip4/"+ip)
+ }
+
+ mockResolver.IP = map[string][]net.IPAddr{
+ "example.com": ipaddrs,
+ }
+ mockResolver.TXT = map[string][]string{
+ "_dnsaddr.example.com": manyDNSAddrs,
+ }
+
+ res, err := swarmResolver.ResolveDNSComponent(ctx, multiaddr.StringCast("/dns/example.com"), 10)
+ require.NoError(t, err)
+ require.Equal(t, 10, len(res))
+ for i := 0; i < 10; i++ {
+ require.Equal(t, "/ip4/1.2.3."+strconv.Itoa(i), res[i].String())
+ }
+
+ res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/example.com"), 1, 10)
+ require.NoError(t, err)
+ require.Equal(t, 10, len(res))
+ for i := 0; i < 10; i++ {
+ require.Equal(t, "/ip4/1.2.3."+strconv.Itoa(i), res[i].String())
+ }
+ })
+
+ t.Run("Test Recursive Limits", func(t *testing.T) {
+ recursiveDNSAddr := make(map[string][]string)
+ for i := 0; i < 255; i++ {
+ recursiveDNSAddr["_dnsaddr."+strconv.Itoa(i)+".example.com"] = []string{"dnsaddr=/dnsaddr/" + strconv.Itoa(i+1) + ".example.com"}
+ }
+ recursiveDNSAddr["_dnsaddr.255.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ mockResolver.TXT = recursiveDNSAddr
+
+ res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/0.example.com"), 256, 10)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, "/ip4/127.0.0.1", res[0].String())
+
+ res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/0.example.com"), 255, 10)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(res))
+ require.Equal(t, "/dnsaddr/255.example.com", res[0].String())
+ })
+
+ t.Run("Test Resolve at output limit", func(t *testing.T) {
+ recursiveDNSAddr := make(map[string][]string)
+ recursiveDNSAddr["_dnsaddr.example.com"] = []string{
+ "dnsaddr=/dnsaddr/0.example.com",
+ "dnsaddr=/dnsaddr/1.example.com",
+ "dnsaddr=/dnsaddr/2.example.com",
+ "dnsaddr=/dnsaddr/3.example.com",
+ "dnsaddr=/dnsaddr/4.example.com",
+ "dnsaddr=/dnsaddr/5.example.com",
+ "dnsaddr=/dnsaddr/6.example.com",
+ "dnsaddr=/dnsaddr/7.example.com",
+ "dnsaddr=/dnsaddr/8.example.com",
+ "dnsaddr=/dnsaddr/9.example.com",
+ }
+ recursiveDNSAddr["_dnsaddr.0.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.1.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.2.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.3.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.4.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.5.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.6.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.7.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.8.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ recursiveDNSAddr["_dnsaddr.9.example.com"] = []string{"dnsaddr=/ip4/127.0.0.1"}
+ mockResolver.TXT = recursiveDNSAddr
+
+ res, err = swarmResolver.ResolveDNSAddr(ctx, "", multiaddr.StringCast("/dnsaddr/example.com"), 256, 10)
+ require.NoError(t, err)
+ require.Equal(t, 10, len(res))
+ for _, r := range res {
+ require.Equal(t, "/ip4/127.0.0.1", r.String())
+ }
+ })
+}
diff --git a/p2p/net/swarm/simul_test.go b/p2p/net/swarm/simul_test.go
new file mode 100644
index 0000000000..3fbe8f085b
--- /dev/null
+++ b/p2p/net/swarm/simul_test.go
@@ -0,0 +1,77 @@
+package swarm_test
+
+import (
+ "context"
+ "runtime"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ . "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/libp2p/go-libp2p-testing/ci"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestSimultOpen(t *testing.T) {
+ t.Parallel()
+ swarms := makeSwarms(t, 2, swarmt.OptDisableReuseport)
+
+ // connect everyone
+ {
+ var wg sync.WaitGroup
+ connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {
+ defer wg.Done()
+ // copy for other peer
+ log.Debug("TestSimultOpen: connecting", "local", s.LocalPeer(), "remote", dst, "addr", addr)
+ s.Peerstore().AddAddr(dst, addr, peerstore.PermanentAddrTTL)
+ if _, err := s.DialPeer(context.Background(), dst); err != nil {
+ t.Error("error swarm dialing to peer", err)
+ }
+ }
+
+ log.Info("Connecting swarms simultaneously.")
+ wg.Add(2)
+ go connect(swarms[0], swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0])
+ go connect(swarms[1], swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0])
+ wg.Wait()
+ }
+
+ for _, s := range swarms {
+ s.Close()
+ }
+}
+
+func TestSimultOpenMany(t *testing.T) {
+ // t.Skip("very very slow")
+
+ addrs := 20
+ rounds := 10
+ if ci.IsRunning() || runtime.GOOS == "darwin" {
+ // osx has a limit of 256 file descriptors
+ addrs = 10
+ rounds = 5
+ }
+ subtestSwarm(t, addrs, rounds)
+}
+
+func TestSimultOpenFewStress(t *testing.T) {
+ if testing.Short() {
+ t.SkipNow()
+ }
+ // t.Skip("skipping for another test")
+ t.Parallel()
+
+ msgs := 40
+ swarms := 2
+ rounds := 10
+ // rounds := 100
+
+ for i := 0; i < rounds; i++ {
+ subtestSwarm(t, swarms, msgs)
+ <-time.After(10 * time.Millisecond)
+ }
+}
diff --git a/p2p/net/swarm/swarm.go b/p2p/net/swarm/swarm.go
new file mode 100644
index 0000000000..503f62f7b3
--- /dev/null
+++ b/p2p/net/swarm/swarm.go
@@ -0,0 +1,991 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "slices"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/metrics"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
+)
+
+const (
+ defaultDialTimeout = 15 * time.Second
+
+ // defaultDialTimeoutLocal is the maximum duration a Dial to local network address
+ // is allowed to take.
+ // This includes the time between dialing the raw network connection,
+ // protocol selection as well the handshake, if applicable.
+ defaultDialTimeoutLocal = 5 * time.Second
+
+ defaultNewStreamTimeout = 15 * time.Second
+)
+
+var log = logging.Logger("swarm2")
+
+// ErrSwarmClosed is returned when one attempts to operate on a closed swarm.
+var ErrSwarmClosed = errors.New("swarm closed")
+
+// ErrAddrFiltered is returned when trying to register a connection to a
+// filtered address. You shouldn't see this error unless some underlying
+// transport is misbehaving.
+var ErrAddrFiltered = errors.New("address filtered")
+
+// ErrDialTimeout is returned when one a dial times out due to the global timeout
+var ErrDialTimeout = errors.New("dial timed out")
+
+type Option func(*Swarm) error
+
+// WithConnectionGater sets a connection gater
+func WithConnectionGater(gater connmgr.ConnectionGater) Option {
+ return func(s *Swarm) error {
+ s.gater = gater
+ return nil
+ }
+}
+
+// WithMultiaddrResolver sets a custom multiaddress resolver
+func WithMultiaddrResolver(resolver network.MultiaddrDNSResolver) Option {
+ return func(s *Swarm) error {
+ s.multiaddrResolver = resolver
+ return nil
+ }
+}
+
+// WithMetrics sets a metrics reporter
+func WithMetrics(reporter metrics.Reporter) Option {
+ return func(s *Swarm) error {
+ s.bwc = reporter
+ return nil
+ }
+}
+
+func WithMetricsTracer(t MetricsTracer) Option {
+ return func(s *Swarm) error {
+ s.metricsTracer = t
+ return nil
+ }
+}
+
+func WithDialTimeout(t time.Duration) Option {
+ return func(s *Swarm) error {
+ s.dialTimeout = t
+ return nil
+ }
+}
+
+func WithDialTimeoutLocal(t time.Duration) Option {
+ return func(s *Swarm) error {
+ s.dialTimeoutLocal = t
+ return nil
+ }
+}
+
+func WithResourceManager(m network.ResourceManager) Option {
+ return func(s *Swarm) error {
+ s.rcmgr = m
+ return nil
+ }
+}
+
+// WithDialRanker configures swarm to use d as the DialRanker
+func WithDialRanker(d network.DialRanker) Option {
+ return func(s *Swarm) error {
+ if d == nil {
+ return errors.New("swarm: dial ranker cannot be nil")
+ }
+ s.dialRanker = d
+ return nil
+ }
+}
+
+// WithUDPBlackHoleSuccessCounter configures swarm to use the provided config for UDP black hole detection
+// n is the size of the sliding window used to evaluate black hole state
+// min is the minimum number of successes out of n required to not block requests
+func WithUDPBlackHoleSuccessCounter(f *BlackHoleSuccessCounter) Option {
+ return func(s *Swarm) error {
+ s.udpBHF = f
+ return nil
+ }
+}
+
+// WithIPv6BlackHoleSuccessCounter configures swarm to use the provided config for IPv6 black hole detection
+// n is the size of the sliding window used to evaluate black hole state
+// min is the minimum number of successes out of n required to not block requests
+func WithIPv6BlackHoleSuccessCounter(f *BlackHoleSuccessCounter) Option {
+ return func(s *Swarm) error {
+ s.ipv6BHF = f
+ return nil
+ }
+}
+
+// WithReadOnlyBlackHoleDetector configures the swarm to use the black hole detector in
+// read only mode. In Read Only mode dial requests are refused in unknown state and
+// no updates to the detector state are made. This is useful for services like AutoNAT that
+// care about accurately providing reachability info.
+func WithReadOnlyBlackHoleDetector() Option {
+ return func(s *Swarm) error {
+ s.readOnlyBHD = true
+ return nil
+ }
+}
+
+// Swarm is a connection muxer, allowing connections to other peers to
+// be opened and closed, while still using the same Chan for all
+// communication. The Chan sends/receives Messages, which note the
+// destination or source Peer.
+type Swarm struct {
+ nextConnID atomic.Uint64
+ nextStreamID atomic.Uint64
+
+ // Close refcount. This allows us to fully wait for the swarm to be torn
+ // down before continuing.
+ refs sync.WaitGroup
+
+ emitter event.Emitter
+
+ rcmgr network.ResourceManager
+
+ local peer.ID
+ peers peerstore.Peerstore
+
+ dialTimeout time.Duration
+ dialTimeoutLocal time.Duration
+
+ conns struct {
+ sync.RWMutex
+ m map[peer.ID][]*Conn
+ }
+
+ listeners struct {
+ sync.RWMutex
+
+ ifaceListenAddres []ma.Multiaddr
+ cacheEOL time.Time
+
+ m map[transport.Listener]struct{}
+ }
+
+ notifs struct {
+ sync.RWMutex
+ m map[network.Notifiee]struct{}
+ }
+
+ directConnNotifs struct {
+ sync.Mutex
+ m map[peer.ID][]chan struct{}
+ }
+
+ transports struct {
+ sync.RWMutex
+ m map[int]transport.Transport
+ }
+
+ multiaddrResolver network.MultiaddrDNSResolver
+
+ // stream handlers
+ streamh atomic.Pointer[network.StreamHandler]
+
+ // dialing helpers
+ dsync *dialSync
+ backf DialBackoff
+ limiter *dialLimiter
+ gater connmgr.ConnectionGater
+
+ closeOnce sync.Once
+ ctx context.Context // is canceled when Close is called
+ ctxCancel context.CancelFunc
+
+ bwc metrics.Reporter
+ metricsTracer MetricsTracer
+
+ dialRanker network.DialRanker
+
+ connectednessEventEmitter *connectednessEventEmitter
+ udpBHF *BlackHoleSuccessCounter
+ ipv6BHF *BlackHoleSuccessCounter
+ bhd *blackHoleDetector
+ readOnlyBHD bool
+}
+
+// NewSwarm constructs a Swarm.
+func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts ...Option) (*Swarm, error) {
+ emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged))
+ if err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ s := &Swarm{
+ local: local,
+ peers: peers,
+ emitter: emitter,
+ ctx: ctx,
+ ctxCancel: cancel,
+ dialTimeout: defaultDialTimeout,
+ dialTimeoutLocal: defaultDialTimeoutLocal,
+ multiaddrResolver: ResolverFromMaDNS{madns.DefaultResolver},
+ dialRanker: DefaultDialRanker,
+
+ // A black hole is a binary property. On a network if UDP dials are blocked or there is
+ // no IPv6 connectivity, all dials will fail. So a low success rate of 5 out 100 dials
+ // is good enough.
+ udpBHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "UDP"},
+ ipv6BHF: &BlackHoleSuccessCounter{N: 100, MinSuccesses: 5, Name: "IPv6"},
+ }
+
+ s.conns.m = make(map[peer.ID][]*Conn)
+ s.listeners.m = make(map[transport.Listener]struct{})
+ s.transports.m = make(map[int]transport.Transport)
+ s.notifs.m = make(map[network.Notifiee]struct{})
+ s.directConnNotifs.m = make(map[peer.ID][]chan struct{})
+ s.connectednessEventEmitter = newConnectednessEventEmitter(s.Connectedness, emitter)
+
+ for _, opt := range opts {
+ if err := opt(s); err != nil {
+ return nil, err
+ }
+ }
+ if s.rcmgr == nil {
+ s.rcmgr = &network.NullResourceManager{}
+ }
+
+ s.dsync = newDialSync(s.dialWorkerLoop)
+
+ s.limiter = newDialLimiter(s.dialAddr)
+ s.backf.init(s.ctx)
+
+ s.bhd = &blackHoleDetector{
+ udp: s.udpBHF,
+ ipv6: s.ipv6BHF,
+ mt: s.metricsTracer,
+ readOnly: s.readOnlyBHD,
+ }
+ return s, nil
+}
+
+func (s *Swarm) Close() error {
+ s.closeOnce.Do(s.close)
+ return nil
+}
+
+// Done returns a channel that is closed when the swarm is closed.
+func (s *Swarm) Done() <-chan struct{} {
+ return s.ctx.Done()
+}
+
+func (s *Swarm) close() {
+ s.ctxCancel()
+
+ // Prevents new connections and/or listeners from being added to the swarm.
+ s.listeners.Lock()
+ listeners := s.listeners.m
+ s.listeners.m = nil
+ s.listeners.Unlock()
+
+ s.conns.Lock()
+ conns := s.conns.m
+ s.conns.m = nil
+ s.conns.Unlock()
+
+ // Lots of goroutines but we might as well do this in parallel. We want to shut down as fast as
+ // possible.
+ s.refs.Add(len(listeners))
+ for l := range listeners {
+ go func(l transport.Listener) {
+ defer s.refs.Done()
+ if err := l.Close(); err != nil && err != transport.ErrListenerClosed {
+ log.Error("error when shutting down listener", "err", err)
+ }
+ }(l)
+ }
+
+ for _, cs := range conns {
+ for _, c := range cs {
+ go func(c *Conn) {
+ if err := c.Close(); err != nil {
+ log.Error("error when shutting down connection", "err", err)
+ }
+ }(c)
+ }
+ }
+
+ // Wait for everything to finish.
+ s.refs.Wait()
+ s.connectednessEventEmitter.Close()
+ s.emitter.Close()
+
+ // Now close out any transports (if necessary). Do this after closing
+ // all connections/listeners.
+ s.transports.Lock()
+ transports := s.transports.m
+ s.transports.m = nil
+ s.transports.Unlock()
+
+ // Dedup transports that may be listening on multiple protocols
+ transportsToClose := make(map[transport.Transport]struct{}, len(transports))
+ for _, t := range transports {
+ transportsToClose[t] = struct{}{}
+ }
+
+ var wg sync.WaitGroup
+ for t := range transportsToClose {
+ if closer, ok := t.(io.Closer); ok {
+ wg.Add(1)
+ go func(c io.Closer) {
+ defer wg.Done()
+ if err := c.Close(); err != nil {
+ log.Error("error when closing down transport", "transport_type", fmt.Sprintf("%T", c), "err", err)
+ }
+ }(closer)
+ }
+ }
+ wg.Wait()
+}
+
+func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn, error) {
+ var (
+ p = tc.RemotePeer()
+ addr = tc.RemoteMultiaddr()
+ )
+
+ // create the Stat object, initializing with the underlying connection Stat if available
+ var stat network.ConnStats
+ if cs, ok := tc.(network.ConnStat); ok {
+ stat = cs.Stat()
+ }
+ stat.Direction = dir
+ stat.Opened = time.Now()
+ isLimited := stat.Limited
+
+ // Wrap and register the connection.
+ c := &Conn{
+ conn: tc,
+ swarm: s,
+ stat: stat,
+ id: s.nextConnID.Add(1),
+ }
+
+ // we ONLY check upgraded connections here so we can send them a Disconnect message.
+ // If we do this in the Upgrader, we will not be able to do this.
+ if s.gater != nil {
+ if allow, _ := s.gater.InterceptUpgraded(c); !allow {
+ err := tc.CloseWithError(network.ConnGated)
+ if err != nil {
+ log.Warn("failed to close connection with peer and addr", "peer", p, "addr", addr, "err", err)
+ }
+ return nil, ErrGaterDisallowedConnection
+ }
+ }
+
+ // Add the public key.
+ if pk := tc.RemotePublicKey(); pk != nil {
+ s.peers.AddPubKey(p, pk)
+ }
+
+ // Clear any backoffs
+ s.backf.Clear(p)
+
+ // Finally, add the peer.
+ s.conns.Lock()
+ // Check if we're still online
+ if s.conns.m == nil {
+ s.conns.Unlock()
+ tc.Close()
+ return nil, ErrSwarmClosed
+ }
+
+ c.streams.m = make(map[*Stream]struct{})
+ s.conns.m[p] = append(s.conns.m[p], c)
+ // Add two swarm refs:
+ // * One will be decremented after the close notifications fire in Conn.doClose
+ // * The other will be decremented when Conn.start exits.
+ s.refs.Add(2)
+ // Take the notification lock before releasing the conns lock to block
+ // Disconnect notifications until after the Connect notifications done.
+ // This lock also ensures that swarm.refs.Wait() exits after we have
+ // enqueued the peer connectedness changed notification.
+ // TODO: Fix this fragility by taking a swarm ref for dial worker loop
+ c.notifyLk.Lock()
+ s.conns.Unlock()
+
+ s.connectednessEventEmitter.AddConn(p)
+
+ if !isLimited {
+ // Notify goroutines waiting for a direct connection
+ //
+ // Go routines interested in waiting for direct connection first acquire this lock
+ // and then acquire s.conns.RLock. Do not acquire this lock before conns.Unlock to
+ // prevent deadlock.
+ s.directConnNotifs.Lock()
+ for _, ch := range s.directConnNotifs.m[p] {
+ close(ch)
+ }
+ delete(s.directConnNotifs.m, p)
+ s.directConnNotifs.Unlock()
+ }
+ s.notifyAll(func(f network.Notifiee) {
+ f.Connected(s, c)
+ })
+ c.notifyLk.Unlock()
+
+ c.start()
+ return c, nil
+}
+
+// Peerstore returns this swarms internal Peerstore.
+func (s *Swarm) Peerstore() peerstore.Peerstore {
+ return s.peers
+}
+
+// SetStreamHandler assigns the handler for new streams.
+func (s *Swarm) SetStreamHandler(handler network.StreamHandler) {
+ s.streamh.Store(&handler)
+}
+
+// StreamHandler gets the handler for new streams.
+func (s *Swarm) StreamHandler() network.StreamHandler {
+ handler := s.streamh.Load()
+ if handler == nil {
+ return nil
+ }
+ return *handler
+}
+
+// NewStream creates a new stream on any available connection to peer, dialing
+// if necessary.
+// Use network.WithAllowLimitedConn to open a stream over a limited(relayed)
+// connection.
+func (s *Swarm) NewStream(ctx context.Context, p peer.ID) (network.Stream, error) {
+ log.Debug("opening stream to peer", "source_peer", s.local, "destination_peer", p)
+
+ // Algorithm:
+ // 1. Find the best connection, otherwise, dial.
+ // 2. If the best connection is limited, wait for a direct conn via conn
+ // reversal or hole punching.
+ // 3. Try opening a stream.
+ // 4. If the underlying connection is, in fact, closed, close the outer
+ // connection and try again. We do this in case we have a closed
+ // connection but don't notice it until we actually try to open a
+ // stream.
+ //
+ // TODO: Try all connections even if we get an error opening a stream on
+ // a non-closed connection.
+ numDials := 0
+ for {
+ c := s.bestConnToPeer(p)
+ if c == nil {
+ if nodial, _ := network.GetNoDial(ctx); !nodial {
+ numDials++
+ if numDials > DialAttempts {
+ return nil, errors.New("max dial attempts exceeded")
+ }
+ var err error
+ c, err = s.dialPeer(ctx, p)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, network.ErrNoConn
+ }
+ }
+
+ limitedAllowed, _ := network.GetAllowLimitedConn(ctx)
+ if !limitedAllowed && c.Stat().Limited {
+ var err error
+ c, err = s.waitForDirectConn(ctx, p)
+ if err != nil {
+ log.Debug("failed to get direct connection to a limited peer", "destination_peer", p, "err", err)
+ return nil, err
+ }
+ }
+
+ str, err := c.NewStream(ctx)
+ if err != nil {
+ if c.conn.IsClosed() {
+ continue
+ }
+ return nil, err
+ }
+ return str, nil
+ }
+}
+
+// waitForDirectConn waits for a direct connection established through hole punching or connection reversal.
+func (s *Swarm) waitForDirectConn(ctx context.Context, p peer.ID) (*Conn, error) {
+ s.directConnNotifs.Lock()
+ c := s.bestConnToPeer(p)
+ if c == nil {
+ s.directConnNotifs.Unlock()
+ return nil, network.ErrNoConn
+ } else if !c.Stat().Limited {
+ s.directConnNotifs.Unlock()
+ return c, nil
+ }
+
+ // Wait for limited connection to upgrade to a direct connection either by
+ // connection reversal or hole punching.
+ ch := make(chan struct{})
+ s.directConnNotifs.m[p] = append(s.directConnNotifs.m[p], ch)
+ s.directConnNotifs.Unlock()
+
+ // apply the DialPeer timeout
+ ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
+ defer cancel()
+
+ // Wait for notification.
+ select {
+ case <-ctx.Done():
+ // Remove ourselves from the notification list
+ s.directConnNotifs.Lock()
+ defer s.directConnNotifs.Unlock()
+
+ s.directConnNotifs.m[p] = slices.DeleteFunc(
+ s.directConnNotifs.m[p],
+ func(c chan struct{}) bool { return c == ch },
+ )
+ if len(s.directConnNotifs.m[p]) == 0 {
+ delete(s.directConnNotifs.m, p)
+ }
+ return nil, ctx.Err()
+ case <-ch:
+ // We do not need to remove ourselves from the list here as the notifier
+ // clears the map entry
+ c := s.bestConnToPeer(p)
+ if c == nil {
+ return nil, network.ErrNoConn
+ }
+ if c.Stat().Limited {
+ return nil, network.ErrLimitedConn
+ }
+ return c, nil
+ }
+}
+
+// ConnsToPeer returns all the live connections to peer.
+func (s *Swarm) ConnsToPeer(p peer.ID) []network.Conn {
+ // TODO: Consider sorting the connection list best to worst. Currently,
+ // it's sorted oldest to newest.
+ s.conns.RLock()
+ defer s.conns.RUnlock()
+ conns := s.conns.m[p]
+ output := make([]network.Conn, len(conns))
+ for i, c := range conns {
+ output[i] = c
+ }
+ return output
+}
+
+func isBetterConn(a, b *Conn) bool {
+ // If one is limited and not the other, prefer the unlimited connection.
+ aLimited := a.Stat().Limited
+ bLimited := b.Stat().Limited
+ if aLimited != bLimited {
+ return !aLimited
+ }
+
+ // If one is direct and not the other, prefer the direct connection.
+ aDirect := isDirectConn(a)
+ bDirect := isDirectConn(b)
+ if aDirect != bDirect {
+ return aDirect
+ }
+
+ // Otherwise, prefer the connection with more open streams.
+ a.streams.Lock()
+ aLen := len(a.streams.m)
+ a.streams.Unlock()
+
+ b.streams.Lock()
+ bLen := len(b.streams.m)
+ b.streams.Unlock()
+
+ if aLen != bLen {
+ return aLen > bLen
+ }
+
+ // finally, pick the last connection.
+ return true
+}
+
+// bestConnToPeer returns the best connection to peer.
+func (s *Swarm) bestConnToPeer(p peer.ID) *Conn {
+ // TODO: Prefer some transports over others.
+ // For now, prefers direct connections over Relayed connections.
+ // For tie-breaking, select the newest non-closed connection with the most streams.
+ s.conns.RLock()
+ defer s.conns.RUnlock()
+
+ var best *Conn
+ for _, c := range s.conns.m[p] {
+ if c.conn.IsClosed() {
+ // We *will* garbage collect this soon anyways.
+ continue
+ }
+ if best == nil || isBetterConn(c, best) {
+ best = c
+ }
+ }
+ return best
+}
+
+// bestAcceptableConnToPeer returns the best acceptable connection, considering the passed in ctx.
+// If network.WithForceDirectDial is used, it only returns a direct connections, ignoring
+// any limited (relayed) connections to the peer.
+func (s *Swarm) bestAcceptableConnToPeer(ctx context.Context, p peer.ID) *Conn {
+ conn := s.bestConnToPeer(p)
+
+ forceDirect, _ := network.GetForceDirectDial(ctx)
+ if forceDirect && !isDirectConn(conn) {
+ return nil
+ }
+ return conn
+}
+
+func isDirectConn(c *Conn) bool {
+ return c != nil && !c.conn.Transport().Proxy()
+}
+
+// Connectedness returns our "connectedness" state with the given peer.
+//
+// To check if we have an open connection, use `s.Connectedness(p) ==
+// network.Connected`.
+func (s *Swarm) Connectedness(p peer.ID) network.Connectedness {
+ s.conns.RLock()
+ defer s.conns.RUnlock()
+
+ return s.connectednessUnlocked(p)
+}
+
+// connectednessUnlocked returns the connectedness of a peer.
+func (s *Swarm) connectednessUnlocked(p peer.ID) network.Connectedness {
+ var haveLimited bool
+ for _, c := range s.conns.m[p] {
+ if c.IsClosed() {
+ // These will be garbage collected soon
+ continue
+ }
+ if c.Stat().Limited {
+ haveLimited = true
+ } else {
+ return network.Connected
+ }
+ }
+ if haveLimited {
+ return network.Limited
+ }
+ return network.NotConnected
+}
+
+// Conns returns a slice of all connections.
+func (s *Swarm) Conns() []network.Conn {
+ s.conns.RLock()
+ defer s.conns.RUnlock()
+
+ conns := make([]network.Conn, 0, len(s.conns.m))
+ for _, cs := range s.conns.m {
+ for _, c := range cs {
+ conns = append(conns, c)
+ }
+ }
+ return conns
+}
+
+// ClosePeer closes all connections to the given peer.
+func (s *Swarm) ClosePeer(p peer.ID) error {
+ conns := s.ConnsToPeer(p)
+ switch len(conns) {
+ case 0:
+ return nil
+ case 1:
+ return conns[0].Close()
+ default:
+ errCh := make(chan error)
+ for _, c := range conns {
+ go func(c network.Conn) {
+ errCh <- c.Close()
+ }(c)
+ }
+
+ var errs []string
+ for range conns {
+ err := <-errCh
+ if err != nil {
+ errs = append(errs, err.Error())
+ }
+ }
+ if len(errs) > 0 {
+ return fmt.Errorf("when disconnecting from peer %s: %s", p, strings.Join(errs, ", "))
+ }
+ return nil
+ }
+}
+
+// Peers returns a copy of the set of peers swarm is connected to.
+func (s *Swarm) Peers() []peer.ID {
+ s.conns.RLock()
+ defer s.conns.RUnlock()
+ peers := make([]peer.ID, 0, len(s.conns.m))
+ for p := range s.conns.m {
+ peers = append(peers, p)
+ }
+
+ return peers
+}
+
+// LocalPeer returns the local peer swarm is associated to.
+func (s *Swarm) LocalPeer() peer.ID {
+ return s.local
+}
+
+// Backoff returns the DialBackoff object for this swarm.
+func (s *Swarm) Backoff() *DialBackoff {
+ return &s.backf
+}
+
+// notifyAll sends a signal to all Notifiees
+func (s *Swarm) notifyAll(notify func(network.Notifiee)) {
+ s.notifs.RLock()
+ for f := range s.notifs.m {
+ notify(f)
+ }
+ s.notifs.RUnlock()
+}
+
+// Notify signs up Notifiee to receive signals when events happen
+func (s *Swarm) Notify(f network.Notifiee) {
+ s.notifs.Lock()
+ s.notifs.m[f] = struct{}{}
+ s.notifs.Unlock()
+}
+
+// StopNotify unregisters Notifiee fromr receiving signals
+func (s *Swarm) StopNotify(f network.Notifiee) {
+ s.notifs.Lock()
+ delete(s.notifs.m, f)
+ s.notifs.Unlock()
+}
+
+func (s *Swarm) removeConn(c *Conn) {
+ p := c.RemotePeer()
+
+ s.conns.Lock()
+ cs := s.conns.m[p]
+ for i, ci := range cs {
+ if ci == c {
+ // NOTE: We're intentionally preserving order.
+ // This way, connections to a peer are always
+ // sorted oldest to newest.
+ copy(cs[i:], cs[i+1:])
+ cs[len(cs)-1] = nil
+ s.conns.m[p] = cs[:len(cs)-1]
+ break
+ }
+ }
+ if len(s.conns.m[p]) == 0 {
+ delete(s.conns.m, p)
+ }
+ s.conns.Unlock()
+}
+
+// String returns a string representation of Network.
+func (s *Swarm) String() string {
+ return fmt.Sprintf("", s.LocalPeer())
+}
+
+func (s *Swarm) ResourceManager() network.ResourceManager {
+ return s.rcmgr
+}
+
+// Swarm is a Network.
+var (
+ _ network.Network = (*Swarm)(nil)
+ _ transport.TransportNetwork = (*Swarm)(nil)
+)
+
+type connWithMetrics struct {
+ transport.CapableConn
+ opened time.Time
+ dir network.Direction
+ metricsTracer MetricsTracer
+ once sync.Once
+ closeErr error
+}
+
+func wrapWithMetrics(capableConn transport.CapableConn, metricsTracer MetricsTracer, opened time.Time, dir network.Direction) *connWithMetrics {
+ c := &connWithMetrics{CapableConn: capableConn, opened: opened, dir: dir, metricsTracer: metricsTracer}
+ c.metricsTracer.OpenedConnection(c.dir, capableConn.RemotePublicKey(), capableConn.ConnState(), capableConn.LocalMultiaddr())
+ return c
+}
+
+func (c *connWithMetrics) completedHandshake() {
+ c.metricsTracer.CompletedHandshake(time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
+}
+
+func (c *connWithMetrics) Close() error {
+ c.once.Do(func() {
+ c.metricsTracer.ClosedConnection(c.dir, time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
+ c.closeErr = c.CapableConn.Close()
+ })
+ return c.closeErr
+}
+
+func (c *connWithMetrics) CloseWithError(errCode network.ConnErrorCode) error {
+ c.once.Do(func() {
+ c.metricsTracer.ClosedConnection(c.dir, time.Since(c.opened), c.ConnState(), c.LocalMultiaddr())
+ c.closeErr = c.CapableConn.CloseWithError(errCode)
+ })
+ return c.closeErr
+}
+
+func (c *connWithMetrics) Stat() network.ConnStats {
+ if cs, ok := c.CapableConn.(network.ConnStat); ok {
+ return cs.Stat()
+ }
+ return network.ConnStats{}
+}
+
+var _ network.ConnStat = &connWithMetrics{}
+
+type ResolverFromMaDNS struct {
+ *madns.Resolver
+}
+
+var _ network.MultiaddrDNSResolver = ResolverFromMaDNS{}
+
+func startsWithDNSADDR(m ma.Multiaddr) bool {
+ if m == nil {
+ return false
+ }
+
+ startsWithDNSADDR := false
+ // Using ForEach to avoid allocating
+ ma.ForEach(m, func(c ma.Component) bool {
+ startsWithDNSADDR = c.Protocol().Code == ma.P_DNSADDR
+ return false
+ })
+ return startsWithDNSADDR
+}
+
+// ResolveDNSAddr implements MultiaddrDNSResolver
+func (r ResolverFromMaDNS) ResolveDNSAddr(ctx context.Context, expectedPeerID peer.ID, maddr ma.Multiaddr, recursionLimit int, outputLimit int) ([]ma.Multiaddr, error) {
+ if outputLimit <= 0 {
+ return nil, nil
+ }
+ if recursionLimit <= 0 {
+ return []ma.Multiaddr{maddr}, nil
+ }
+ var resolved, toResolve []ma.Multiaddr
+ addrs, err := r.Resolve(ctx, maddr)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) > outputLimit {
+ addrs = addrs[:outputLimit]
+ }
+
+ for _, addr := range addrs {
+ if startsWithDNSADDR(addr) {
+ toResolve = append(toResolve, addr)
+ } else {
+ resolved = append(resolved, addr)
+ }
+ }
+
+ for i, addr := range toResolve {
+ // Set the nextOutputLimit to:
+ // outputLimit
+ // - len(resolved) // What we already have resolved
+ // - (len(toResolve) - i) // How many addresses we have left to resolve
+ // + 1 // The current address we are resolving
+ // This assumes that each DNSADDR address will resolve to at least one multiaddr.
+ // This assumption lets us bound the space we reserve for resolving.
+ nextOutputLimit := outputLimit - len(resolved) - (len(toResolve) - i) + 1
+ resolvedAddrs, err := r.ResolveDNSAddr(ctx, expectedPeerID, addr, recursionLimit-1, nextOutputLimit)
+ if err != nil {
+ log.Warn("failed to resolve dnsaddr", "addr", addr, "err", err)
+ // Dropping this address
+ continue
+ }
+ resolved = append(resolved, resolvedAddrs...)
+ }
+
+ if len(resolved) > outputLimit {
+ resolved = resolved[:outputLimit]
+ }
+
+ // If the address contains a peer id, make sure it matches our expectedPeerID
+ if expectedPeerID != "" {
+ removeMismatchPeerID := func(a ma.Multiaddr) bool {
+ id, err := peer.IDFromP2PAddr(a)
+ if err == peer.ErrInvalidAddr {
+ // This multiaddr didn't contain a peer id, assume it's for this peer.
+ // Handshake will fail later if it's not.
+ return false
+ } else if err != nil {
+ // This multiaddr is invalid, drop it.
+ return true
+ }
+
+ return id != expectedPeerID
+ }
+ resolved = slices.DeleteFunc(resolved, removeMismatchPeerID)
+ }
+
+ return resolved, nil
+}
+
+// ResolveDNSComponent implements MultiaddrDNSResolver
+func (r ResolverFromMaDNS) ResolveDNSComponent(ctx context.Context, maddr ma.Multiaddr, outputLimit int) ([]ma.Multiaddr, error) {
+ addrs, err := r.Resolve(ctx, maddr)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) > outputLimit {
+ addrs = addrs[:outputLimit]
+ }
+ return addrs, nil
+}
+
+// AddCertHashes adds certificate hashes to relevant transport addresses, if there
+// are no certhashes already present on the method. It mutates `listenAddrs`.
+// This method is useful for adding certhashes to public addresses discovered
+// via identify, nat mapping, or provided by the user.
+func (s *Swarm) AddCertHashes(listenAddrs []ma.Multiaddr) []ma.Multiaddr {
+ type addCertHasher interface {
+ AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool)
+ }
+
+ for i, addr := range listenAddrs {
+ t := s.TransportForListening(addr)
+ if t == nil {
+ continue
+ }
+ tpt, ok := t.(addCertHasher)
+ if !ok {
+ continue
+ }
+ addrWithCerthash, added := tpt.AddCertHashes(addr)
+ if !added {
+ log.Warn("Couldn't add certhashes to multiaddr", "addr", addr)
+ continue
+ }
+ listenAddrs[i] = addrWithCerthash
+ }
+ return listenAddrs
+}
diff --git a/p2p/net/swarm/swarm_addr.go b/p2p/net/swarm/swarm_addr.go
new file mode 100644
index 0000000000..b2e3e4e8aa
--- /dev/null
+++ b/p2p/net/swarm/swarm_addr.go
@@ -0,0 +1,72 @@
+package swarm
+
+import (
+ "time"
+
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// ListenAddresses returns a list of addresses at which this swarm listens.
+func (s *Swarm) ListenAddresses() []ma.Multiaddr {
+ s.listeners.RLock()
+ defer s.listeners.RUnlock()
+ return s.listenAddressesNoLock()
+}
+
+func (s *Swarm) listenAddressesNoLock() []ma.Multiaddr {
+ addrs := make([]ma.Multiaddr, 0, len(s.listeners.m)+10) // A bit extra so we may avoid an extra allocation in the for loop below.
+ for l := range s.listeners.m {
+ addrs = append(addrs, l.Multiaddr())
+ }
+ return addrs
+}
+
+const ifaceAddrsCacheDuration = 1 * time.Minute
+
+// InterfaceListenAddresses returns a list of addresses at which this swarm
+// listens. It expands "any interface" addresses (/ip4/0.0.0.0, /ip6/::) to
+// use the known local interfaces.
+func (s *Swarm) InterfaceListenAddresses() ([]ma.Multiaddr, error) {
+ s.listeners.RLock() // RLock start
+
+ ifaceListenAddres := s.listeners.ifaceListenAddres
+ isEOL := time.Now().After(s.listeners.cacheEOL)
+ s.listeners.RUnlock() // RLock end
+
+ if !isEOL {
+ // Cache is valid, clone the slice
+ return append(ifaceListenAddres[:0:0], ifaceListenAddres...), nil
+ }
+
+ // Cache is not valid
+ // Perfrom double checked locking
+
+ s.listeners.Lock() // Lock start
+
+ ifaceListenAddres = s.listeners.ifaceListenAddres
+ isEOL = time.Now().After(s.listeners.cacheEOL)
+ if isEOL {
+ // Cache is still invalid
+ listenAddres := s.listenAddressesNoLock()
+ if len(listenAddres) > 0 {
+ // We're actually listening on addresses.
+ var err error
+ ifaceListenAddres, err = manet.ResolveUnspecifiedAddresses(listenAddres, nil)
+ if err != nil {
+ s.listeners.Unlock() // Lock early exit
+ return nil, err
+ }
+ } else {
+ ifaceListenAddres = nil
+ }
+
+ s.listeners.ifaceListenAddres = ifaceListenAddres
+ s.listeners.cacheEOL = time.Now().Add(ifaceAddrsCacheDuration)
+ }
+
+ s.listeners.Unlock() // Lock end
+
+ return append(ifaceListenAddres[:0:0], ifaceListenAddres...), nil
+}
diff --git a/p2p/net/swarm/swarm_addr_test.go b/p2p/net/swarm/swarm_addr_test.go
new file mode 100644
index 0000000000..43e76716e5
--- /dev/null
+++ b/p2p/net/swarm/swarm_addr_test.go
@@ -0,0 +1,109 @@
+package swarm_test
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "testing"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+ "github.com/quic-go/quic-go"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDialBadAddrs(t *testing.T) {
+ m := func(s string) ma.Multiaddr {
+ maddr, err := ma.NewMultiaddr(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return maddr
+ }
+
+ s := makeSwarms(t, 1)[0]
+
+ test := func(a ma.Multiaddr) {
+ p := test.RandPeerIDFatal(t)
+ s.Peerstore().AddAddr(p, a, peerstore.PermanentAddrTTL)
+ if _, err := s.DialPeer(context.Background(), p); err == nil {
+ t.Errorf("swarm should not dial: %s", p)
+ }
+ }
+
+ test(m("/ip6/fe80::1")) // link local
+ test(m("/ip6/fe80::100")) // link local
+ test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp
+}
+
+func TestAddrRace(t *testing.T) {
+ s := makeSwarms(t, 1)[0]
+ defer s.Close()
+
+ a1, err := s.InterfaceListenAddresses()
+ require.NoError(t, err)
+ a2, err := s.InterfaceListenAddresses()
+ require.NoError(t, err)
+
+ if len(a1) > 0 && len(a2) > 0 && &a1[0] == &a2[0] {
+ t.Fatal("got the exact same address set twice; this could lead to data races")
+ }
+}
+
+func TestAddressesWithoutListening(t *testing.T) {
+ s := swarmt.GenSwarm(t, swarmt.OptDialOnly)
+ a1, err := s.InterfaceListenAddresses()
+ require.NoError(t, err)
+ require.Empty(t, a1, "expected to be listening on no addresses")
+}
+
+func TestDialAddressSelection(t *testing.T) {
+ priv, _, err := test.RandTestKeyPair(ic.Ed25519, 256)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ s, err := swarm.NewSwarm("local", nil, eventbus.NewBus())
+ require.NoError(t, err)
+
+ tcpTr, err := tcp.NewTCPTransport(nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, s.AddTransport(tcpTr))
+ reuse, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ defer reuse.Close()
+ quicTr, err := libp2pquic.NewTransport(priv, reuse, nil, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, s.AddTransport(quicTr))
+ webtransportTr, err := webtransport.New(priv, nil, reuse, nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, s.AddTransport(webtransportTr))
+ h := sha256.Sum256([]byte("foo"))
+ hash, err := multihash.Encode(h[:], multihash.SHA2_256)
+ require.NoError(t, err)
+ certHash, err := multibase.Encode(multibase.Base58BTC, hash)
+ require.NoError(t, err)
+ circuitTr, err := circuitv2.New(nil, nil)
+ require.NoError(t, err)
+ require.NoError(t, s.AddTransport(circuitTr))
+
+ require.Equal(t, tcpTr, s.TransportForDialing(ma.StringCast("/ip4/127.0.0.1/tcp/1234")))
+ require.Equal(t, quicTr, s.TransportForDialing(ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1")))
+ require.Equal(t, circuitTr, s.TransportForDialing(ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic/p2p-circuit/p2p/%s", id))))
+ require.Equal(t, webtransportTr, s.TransportForDialing(ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s", certHash))))
+ require.Nil(t, s.TransportForDialing(ma.StringCast("/ip4/1.2.3.4")))
+ require.Nil(t, s.TransportForDialing(ma.StringCast("/ip4/1.2.3.4/tcp/443/ws")))
+}
diff --git a/p2p/net/swarm/swarm_conn.go b/p2p/net/swarm/swarm_conn.go
new file mode 100644
index 0000000000..1d6cf96b4e
--- /dev/null
+++ b/p2p/net/swarm/swarm_conn.go
@@ -0,0 +1,295 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// TODO: Put this elsewhere.
+
+// ErrConnClosed is returned when operating on a closed connection.
+var ErrConnClosed = errors.New("connection closed")
+
+// Conn is the connection type used by swarm. In general, you won't use this
+// type directly.
+type Conn struct {
+ id uint64
+ conn transport.CapableConn
+ swarm *Swarm
+
+ closeOnce sync.Once
+ err error
+
+ notifyLk sync.Mutex
+
+ streams struct {
+ sync.Mutex
+ m map[*Stream]struct{}
+ }
+
+ stat network.ConnStats
+}
+
+var _ network.Conn = &Conn{}
+
+func (c *Conn) IsClosed() bool {
+ return c.conn.IsClosed()
+}
+
+func (c *Conn) ID() string {
+ // format: -
+ return fmt.Sprintf("%s-%d", c.RemotePeer().String()[:10], c.id)
+}
+
+// Close closes this connection.
+//
+// Note: This method won't wait for the close notifications to finish as that
+// would create a deadlock when called from an open notification (because all
+// open notifications must finish before we can fire off the close
+// notifications).
+func (c *Conn) Close() error {
+ c.closeOnce.Do(func() {
+ c.doClose(0)
+ })
+ return c.err
+}
+
+func (c *Conn) CloseWithError(errCode network.ConnErrorCode) error {
+ c.closeOnce.Do(func() {
+ c.doClose(errCode)
+ })
+ return c.err
+}
+
+func (c *Conn) doClose(errCode network.ConnErrorCode) {
+ c.swarm.removeConn(c)
+
+ // Prevent new streams from opening.
+ c.streams.Lock()
+ streams := c.streams.m
+ c.streams.m = nil
+ c.streams.Unlock()
+
+ if errCode != 0 {
+ c.err = c.conn.CloseWithError(errCode)
+ } else {
+ c.err = c.conn.Close()
+ }
+
+ // Send the connectedness event after closing the connection.
+ // This ensures that both remote connection close and local connection
+ // close events are sent after the underlying transport connection is closed.
+ c.swarm.connectednessEventEmitter.RemoveConn(c.RemotePeer())
+
+ // This is just for cleaning up state. The connection has already been closed.
+ // We *could* optimize this but it really isn't worth it.
+ for s := range streams {
+ s.Reset()
+ }
+
+ // do this in a goroutine to avoid deadlocking if we call close in an open notification.
+ go func() {
+ // prevents us from issuing close notifications before finishing the open notifications
+ c.notifyLk.Lock()
+ defer c.notifyLk.Unlock()
+
+ // Only notify for disconnection if we notified for connection
+ c.swarm.notifyAll(func(f network.Notifiee) {
+ f.Disconnected(c.swarm, c)
+ })
+ c.swarm.refs.Done()
+ }()
+}
+
+func (c *Conn) removeStream(s *Stream) {
+ c.streams.Lock()
+ c.stat.NumStreams--
+ delete(c.streams.m, s)
+ c.streams.Unlock()
+ s.scope.Done()
+}
+
+// listens for new streams.
+//
+// The caller must take a swarm ref before calling. This function decrements the
+// swarm ref count.
+func (c *Conn) start() {
+ go func() {
+ defer c.swarm.refs.Done()
+ defer c.Close()
+ for {
+ ts, err := c.conn.AcceptStream()
+ if err != nil {
+ return
+ }
+ scope, err := c.swarm.ResourceManager().OpenStream(c.RemotePeer(), network.DirInbound)
+ if err != nil {
+ ts.ResetWithError(network.StreamResourceLimitExceeded)
+ continue
+ }
+ c.swarm.refs.Add(1)
+ go func() {
+ s, err := c.addStream(ts, network.DirInbound, scope)
+
+ // Don't defer this. We don't want to block
+ // swarm shutdown on the connection handler.
+ c.swarm.refs.Done()
+
+ // We only get an error here when the swarm is closed or closing.
+ if err != nil {
+ scope.Done()
+ return
+ }
+
+ if h := c.swarm.StreamHandler(); h != nil {
+ h(s)
+ }
+ s.completeAcceptStreamGoroutine()
+ }()
+ }
+ }()
+}
+
+func (c *Conn) String() string {
+ return fmt.Sprintf(
+ " %s (%s)>",
+ c.conn.Transport(),
+ c.conn.LocalMultiaddr(),
+ c.conn.LocalPeer(),
+ c.conn.RemoteMultiaddr(),
+ c.conn.RemotePeer(),
+ )
+}
+
+// LocalMultiaddr is the Multiaddr on this side
+func (c *Conn) LocalMultiaddr() ma.Multiaddr {
+ return c.conn.LocalMultiaddr()
+}
+
+// LocalPeer is the Peer on our side of the connection
+func (c *Conn) LocalPeer() peer.ID {
+ return c.conn.LocalPeer()
+}
+
+// RemoteMultiaddr is the Multiaddr on the remote side
+func (c *Conn) RemoteMultiaddr() ma.Multiaddr {
+ return c.conn.RemoteMultiaddr()
+}
+
+// RemotePeer is the Peer on the remote side
+func (c *Conn) RemotePeer() peer.ID {
+ return c.conn.RemotePeer()
+}
+
+// RemotePublicKey is the public key of the peer on the remote side
+func (c *Conn) RemotePublicKey() ic.PubKey {
+ return c.conn.RemotePublicKey()
+}
+
+// ConnState is the security connection state. including early data result.
+// Empty if not supported.
+func (c *Conn) ConnState() network.ConnectionState {
+ return c.conn.ConnState()
+}
+
+// Stat returns metadata pertaining to this connection
+func (c *Conn) Stat() network.ConnStats {
+ c.streams.Lock()
+ defer c.streams.Unlock()
+ return c.stat
+}
+
+// NewStream returns a new Stream from this connection
+func (c *Conn) NewStream(ctx context.Context) (network.Stream, error) {
+ if c.Stat().Limited {
+ if useLimited, _ := network.GetAllowLimitedConn(ctx); !useLimited {
+ return nil, network.ErrLimitedConn
+ }
+ }
+
+ scope, err := c.swarm.ResourceManager().OpenStream(c.RemotePeer(), network.DirOutbound)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, defaultNewStreamTimeout)
+ defer cancel()
+ }
+
+ s, err := c.openAndAddStream(ctx, scope)
+ if err != nil {
+ scope.Done()
+ if errors.Is(err, context.DeadlineExceeded) {
+ err = fmt.Errorf("timed out: %w", err)
+ }
+ return nil, err
+ }
+ return s, nil
+}
+
+func (c *Conn) openAndAddStream(ctx context.Context, scope network.StreamManagementScope) (network.Stream, error) {
+ ts, err := c.conn.OpenStream(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return c.addStream(ts, network.DirOutbound, scope)
+}
+
+func (c *Conn) addStream(ts network.MuxedStream, dir network.Direction, scope network.StreamManagementScope) (*Stream, error) {
+ c.streams.Lock()
+ // Are we still online?
+ if c.streams.m == nil {
+ c.streams.Unlock()
+ ts.Reset()
+ return nil, ErrConnClosed
+ }
+
+ // Wrap and register the stream.
+ s := &Stream{
+ stream: ts,
+ conn: c,
+ scope: scope,
+ stat: network.Stats{
+ Direction: dir,
+ Opened: time.Now(),
+ },
+ id: c.swarm.nextStreamID.Add(1),
+ acceptStreamGoroutineCompleted: dir != network.DirInbound,
+ }
+ c.stat.NumStreams++
+ c.streams.m[s] = struct{}{}
+
+ // Released once the stream disconnect notifications have finished
+ // firing (in Swarm.remove).
+ c.swarm.refs.Add(1)
+
+ c.streams.Unlock()
+ return s, nil
+}
+
+// GetStreams returns the streams associated with this connection.
+func (c *Conn) GetStreams() []network.Stream {
+ c.streams.Lock()
+ defer c.streams.Unlock()
+ streams := make([]network.Stream, 0, len(c.streams.m))
+ for s := range c.streams.m {
+ streams = append(streams, s)
+ }
+ return streams
+}
+
+func (c *Conn) Scope() network.ConnScope {
+ return c.conn.Scope()
+}
diff --git a/p2p/net/swarm/swarm_dial.go b/p2p/net/swarm/swarm_dial.go
new file mode 100644
index 0000000000..031bdfa2d7
--- /dev/null
+++ b/p2p/net/swarm/swarm_dial.go
@@ -0,0 +1,740 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/netip"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/canonicallog"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ mafmt "github.com/multiformats/go-multiaddr-fmt"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// The maximum number of addresses we'll return when resolving all of a peer's
+// address
+const maximumResolvedAddresses = 100
+
+const maximumDNSADDRRecursion = 4
+
+// Diagram of dial sync:
+//
+// many callers of Dial() synched w. dials many addrs results to callers
+// ----------------------\ dialsync use earliest /--------------
+// -----------------------\ |----------\ /----------------
+// ------------------------>------------<------- >---------<-----------------
+// -----------------------| \----x \----------------
+// ----------------------| \-----x \---------------
+// any may fail if no addr at end
+// retry dialAttempt x
+
+var (
+ // ErrDialBackoff is returned by the backoff code when a given peer has
+ // been dialed too frequently
+ ErrDialBackoff = errors.New("dial backoff")
+
+ // ErrDialRefusedBlackHole is returned when we are in a black holed environment
+ ErrDialRefusedBlackHole = errors.New("dial refused because of black hole")
+
+ // ErrDialToSelf is returned if we attempt to dial our own peer
+ ErrDialToSelf = errors.New("dial to self attempted")
+
+ // ErrNoTransport is returned when we don't know a transport for the
+ // given multiaddr.
+ ErrNoTransport = errors.New("no transport for protocol")
+
+ // ErrAllDialsFailed is returned when connecting to a peer has ultimately failed
+ ErrAllDialsFailed = errors.New("all dials failed")
+
+ // ErrNoAddresses is returned when we fail to find any addresses for a
+ // peer we're trying to dial.
+ ErrNoAddresses = errors.New("no addresses")
+
+ // ErrNoGoodAddresses is returned when we find addresses for a peer but
+ // can't use any of them.
+ ErrNoGoodAddresses = errors.New("no good addresses")
+
+ // ErrGaterDisallowedConnection is returned when the gater prevents us from
+ // forming a connection with a peer.
+ ErrGaterDisallowedConnection = errors.New("gater disallows connection to peer")
+)
+
+// ErrQUICDraft29 wraps ErrNoTransport and provide a more meaningful error message
+var ErrQUICDraft29 errQUICDraft29
+
+type errQUICDraft29 struct{}
+
+func (errQUICDraft29) Error() string {
+ return "QUIC draft-29 has been removed, QUIC (RFC 9000) is accessible with /quic-v1"
+}
+
+func (errQUICDraft29) Unwrap() error {
+ return ErrNoTransport
+}
+
+// DialAttempts governs how many times a goroutine will try to dial a given peer.
+// Note: this is down to one, as we have _too many dials_ atm. To add back in,
+// add loop back in Dial(.)
+const DialAttempts = 1
+
+// ConcurrentFdDials is the number of concurrent outbound dials over transports
+// that consume file descriptors
+const ConcurrentFdDials = 160
+
+// DefaultPerPeerRateLimit is the number of concurrent outbound dials to make
+// per peer
+var DefaultPerPeerRateLimit = 8
+
+// DialBackoff is a type for tracking peer dial backoffs. Dialbackoff is used to
+// avoid over-dialing the same, dead peers. Whenever we totally time out on all
+// addresses of a peer, we add the addresses to DialBackoff. Then, whenever we
+// attempt to dial the peer again, we check each address for backoff. If it's on
+// backoff, we don't dial the address and exit promptly. If a dial is
+// successful, the peer and all its addresses are removed from backoff.
+//
+// * It's safe to use its zero value.
+// * It's thread-safe.
+// * It's *not* safe to move this type after using.
+type DialBackoff struct {
+ entries map[peer.ID]map[string]*backoffAddr
+ lock sync.RWMutex
+}
+
+type backoffAddr struct {
+ tries int
+ until time.Time
+}
+
+func (db *DialBackoff) init(ctx context.Context) {
+ if db.entries == nil {
+ db.entries = make(map[peer.ID]map[string]*backoffAddr)
+ }
+ go db.background(ctx)
+}
+
+func (db *DialBackoff) background(ctx context.Context) {
+ ticker := time.NewTicker(BackoffMax)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ db.cleanup()
+ }
+ }
+}
+
+// Backoff returns whether the client should backoff from dialing
+// peer p at address addr
+func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ ap, found := db.entries[p][string(addr.Bytes())]
+ return found && time.Now().Before(ap.until)
+}
+
+// BackoffBase is the base amount of time to backoff (default: 5s).
+var BackoffBase = time.Second * 5
+
+// BackoffCoef is the backoff coefficient (default: 1s).
+var BackoffCoef = time.Second
+
+// BackoffMax is the maximum backoff time (default: 5m).
+var BackoffMax = time.Minute * 5
+
+// AddBackoff adds peer's address to backoff.
+//
+// Backoff is not exponential, it's quadratic and computed according to the
+// following formula:
+//
+// BackoffBase + BakoffCoef * PriorBackoffs^2
+//
+// Where PriorBackoffs is the number of previous backoffs.
+func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) {
+ saddr := string(addr.Bytes())
+ db.lock.Lock()
+ defer db.lock.Unlock()
+ bp, ok := db.entries[p]
+ if !ok {
+ bp = make(map[string]*backoffAddr, 1)
+ db.entries[p] = bp
+ }
+ ba, ok := bp[saddr]
+ if !ok {
+ bp[saddr] = &backoffAddr{
+ tries: 1,
+ until: time.Now().Add(BackoffBase),
+ }
+ return
+ }
+
+ backoffTime := BackoffBase + BackoffCoef*time.Duration(ba.tries*ba.tries)
+ if backoffTime > BackoffMax {
+ backoffTime = BackoffMax
+ }
+ ba.until = time.Now().Add(backoffTime)
+ ba.tries++
+}
+
+// Clear removes a backoff record. Clients should call this after a
+// successful Dial.
+func (db *DialBackoff) Clear(p peer.ID) {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+ delete(db.entries, p)
+}
+
+func (db *DialBackoff) cleanup() {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+ now := time.Now()
+ for p, e := range db.entries {
+ good := false
+ for _, backoff := range e {
+ backoffTime := BackoffBase + BackoffCoef*time.Duration(backoff.tries*backoff.tries)
+ if backoffTime > BackoffMax {
+ backoffTime = BackoffMax
+ }
+ if now.Before(backoff.until.Add(backoffTime)) {
+ good = true
+ break
+ }
+ }
+ if !good {
+ delete(db.entries, p)
+ }
+ }
+}
+
+// DialPeer connects to a peer. Use network.WithForceDirectDial to force a
+// direct connection.
+//
+// The idea is that the client of Swarm does not need to know what network
+// the connection will happen over. Swarm can use whichever it choses.
+// This allows us to use various transport protocols, do NAT traversal/relay,
+// etc. to achieve connection.
+func (s *Swarm) DialPeer(ctx context.Context, p peer.ID) (network.Conn, error) {
+ // Avoid typed nil issues.
+ c, err := s.dialPeer(ctx, p)
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+// internal dial method that returns an unwrapped conn
+//
+// It is gated by the swarm's dial synchronization systems: dialsync and
+// dialbackoff.
+func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) {
+ log.Debug("dialing peer", "source_peer", s.local, "destination_peer", p)
+ err := p.Validate()
+ if err != nil {
+ return nil, err
+ }
+
+ if p == s.local {
+ return nil, ErrDialToSelf
+ }
+
+ // check if we already have an open (usable) connection.
+ conn := s.bestAcceptableConnToPeer(ctx, p)
+ if conn != nil {
+ return conn, nil
+ }
+
+ if s.gater != nil && !s.gater.InterceptPeerDial(p) {
+ log.Debug("gater disallowed outbound connection to peer", "peer", p)
+ return nil, &DialError{Peer: p, Cause: ErrGaterDisallowedConnection}
+ }
+
+ // apply the DialPeer timeout
+ ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx))
+ defer cancel()
+
+ conn, err = s.dsync.Dial(ctx, p)
+ if err == nil {
+ // Ensure we connected to the correct peer.
+ // This was most likely already checked by the security protocol, but it doesn't hurt do it again here.
+ if conn.RemotePeer() != p {
+ conn.Close()
+ log.Error("Handshake failed to properly authenticate peer", "authenticated", conn.RemotePeer(), "expected", p)
+ return nil, fmt.Errorf("unexpected peer")
+ }
+ return conn, nil
+ }
+
+ log.Debug("network finished dialing peer", "local", s.local, "remote", p)
+
+ if ctx.Err() != nil {
+ // Context error trumps any dial errors as it was likely the ultimate cause.
+ return nil, ctx.Err()
+ }
+
+ if s.ctx.Err() != nil {
+ // Ok, so the swarm is shutting down.
+ return nil, ErrSwarmClosed
+ }
+
+ return nil, err
+}
+
+// dialWorkerLoop synchronizes and executes concurrent dials to a single peer
+func (s *Swarm) dialWorkerLoop(p peer.ID, reqch <-chan dialRequest) {
+ w := newDialWorker(s, p, reqch, nil)
+ w.loop()
+}
+
+func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) (goodAddrs []ma.Multiaddr, addrErrs []TransportError, err error) {
+ peerAddrs := s.peers.Addrs(p)
+ if len(peerAddrs) == 0 {
+ return nil, nil, ErrNoAddresses
+ }
+
+ // Resolve dns or dnsaddrs
+ resolved := s.resolveAddrs(ctx, peer.AddrInfo{ID: p, Addrs: peerAddrs})
+
+ goodAddrs = ma.Unique(resolved)
+ goodAddrs, addrErrs = s.filterKnownUndialables(p, goodAddrs)
+ if forceDirect, _ := network.GetForceDirectDial(ctx); forceDirect {
+ goodAddrs = ma.FilterAddrs(goodAddrs, s.nonProxyAddr)
+ }
+
+ if len(goodAddrs) == 0 {
+ return nil, addrErrs, ErrNoGoodAddresses
+ }
+
+ s.peers.AddAddrs(p, goodAddrs, peerstore.TempAddrTTL)
+
+ return goodAddrs, addrErrs, nil
+}
+
+func startsWithDNSComponent(m ma.Multiaddr) bool {
+ if m == nil {
+ return false
+ }
+ startsWithDNS := false
+ // Using ForEach to avoid allocating
+ ma.ForEach(m, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_DNS, ma.P_DNS4, ma.P_DNS6:
+ startsWithDNS = true
+ }
+
+ return false
+ })
+ return startsWithDNS
+}
+
+func stripP2PComponent(addrs []ma.Multiaddr) []ma.Multiaddr {
+ for i, addr := range addrs {
+ if id, _ := peer.IDFromP2PAddr(addr); id != "" {
+ addrs[i], _ = ma.SplitLast(addr)
+ }
+ }
+ return addrs
+}
+
+type resolver struct {
+ canResolve func(ma.Multiaddr) bool
+ resolve func(ctx context.Context, maddr ma.Multiaddr, outputLimit int) ([]ma.Multiaddr, error)
+}
+
+type resolveErr struct {
+ addr ma.Multiaddr
+ err error
+}
+
+func chainResolvers(ctx context.Context, addrs []ma.Multiaddr, outputLimit int, resolvers []resolver) ([]ma.Multiaddr, []resolveErr) {
+ nextAddrs := make([]ma.Multiaddr, 0, len(addrs))
+ errs := make([]resolveErr, 0)
+ for _, r := range resolvers {
+ for _, a := range addrs {
+ if !r.canResolve(a) {
+ nextAddrs = append(nextAddrs, a)
+ continue
+ }
+ if len(nextAddrs) >= outputLimit {
+ nextAddrs = nextAddrs[:outputLimit]
+ break
+ }
+ next, err := r.resolve(ctx, a, outputLimit-len(nextAddrs))
+ if err != nil {
+ errs = append(errs, resolveErr{addr: a, err: err})
+ continue
+ }
+ nextAddrs = append(nextAddrs, next...)
+ }
+ addrs, nextAddrs = nextAddrs, addrs
+ nextAddrs = nextAddrs[:0]
+ }
+ return addrs, errs
+}
+
+// resolveAddrs resolves DNS/DNSADDR components in the given peer's addresses.
+// We want to resolve the DNS components to IP addresses becase we want the
+// swarm to manage ranking and dialing multiple connections, and a single DNS
+// address can resolve to multiple IP addresses.
+func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) []ma.Multiaddr {
+ dnsAddrResolver := resolver{
+ canResolve: startsWithDNSADDR,
+ resolve: func(ctx context.Context, maddr ma.Multiaddr, outputLimit int) ([]ma.Multiaddr, error) {
+ return s.multiaddrResolver.ResolveDNSAddr(ctx, pi.ID, maddr, maximumDNSADDRRecursion, outputLimit)
+ },
+ }
+
+ var skipped []ma.Multiaddr
+ skipResolver := resolver{
+ canResolve: func(addr ma.Multiaddr) bool {
+ tpt := s.TransportForDialing(addr)
+ if tpt == nil {
+ return false
+ }
+ _, ok := tpt.(transport.SkipResolver)
+ return ok
+
+ },
+ resolve: func(ctx context.Context, addr ma.Multiaddr, _ int) ([]ma.Multiaddr, error) {
+ tpt := s.TransportForDialing(addr)
+ resolver, ok := tpt.(transport.SkipResolver)
+ if !ok {
+ return []ma.Multiaddr{addr}, nil
+ }
+ if resolver.SkipResolve(ctx, addr) {
+ skipped = append(skipped, addr)
+ return nil, nil
+ }
+ return []ma.Multiaddr{addr}, nil
+ },
+ }
+
+ tptResolver := resolver{
+ canResolve: func(addr ma.Multiaddr) bool {
+ tpt := s.TransportForDialing(addr)
+ if tpt == nil {
+ return false
+ }
+ _, ok := tpt.(transport.Resolver)
+ return ok
+ },
+ resolve: func(ctx context.Context, addr ma.Multiaddr, outputLimit int) ([]ma.Multiaddr, error) {
+ tpt := s.TransportForDialing(addr)
+ resolver, ok := tpt.(transport.Resolver)
+ if !ok {
+ return []ma.Multiaddr{addr}, nil
+ }
+ addrs, err := resolver.Resolve(ctx, addr)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) > outputLimit {
+ addrs = addrs[:outputLimit]
+ }
+ return addrs, nil
+ },
+ }
+
+ dnsResolver := resolver{
+ canResolve: startsWithDNSComponent,
+ resolve: s.multiaddrResolver.ResolveDNSComponent,
+ }
+ addrs, errs := chainResolvers(ctx, pi.Addrs, maximumResolvedAddresses, []resolver{dnsAddrResolver, skipResolver, tptResolver, dnsResolver})
+ for _, err := range errs {
+ log.Warn("Failed to resolve addr", "addr", err.addr, "err", err.err)
+ }
+ // Add skipped addresses back to the resolved addresses
+ addrs = append(addrs, skipped...)
+ return stripP2PComponent(addrs)
+}
+
+func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, resch chan transport.DialUpdate) error {
+ // check the dial backoff
+ if forceDirect, _ := network.GetForceDirectDial(ctx); !forceDirect {
+ if s.backf.Backoff(p, addr) {
+ return ErrDialBackoff
+ }
+ }
+
+ // start the dial
+ s.limitedDial(ctx, p, addr, resch)
+
+ return nil
+}
+
+func (s *Swarm) CanDial(p peer.ID, addr ma.Multiaddr) bool {
+ dialable, _ := s.filterKnownUndialables(p, []ma.Multiaddr{addr})
+ return len(dialable) > 0
+}
+
+func (s *Swarm) nonProxyAddr(addr ma.Multiaddr) bool {
+ t := s.TransportForDialing(addr)
+ return !t.Proxy()
+}
+
+var quicDraft29DialMatcher = mafmt.And(mafmt.IP, mafmt.Base(ma.P_UDP), mafmt.Base(ma.P_QUIC))
+
+// filterKnownUndialables takes a list of multiaddrs, and removes those
+// that we definitely don't want to dial: addresses configured to be blocked,
+// IPv6 link-local addresses, addresses without a dial-capable transport,
+// addresses that we know to be our own, and addresses with a better transport
+// available. This is an optimization to avoid wasting time on dials that we
+// know are going to fail or for which we have a better alternative.
+func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) (goodAddrs []ma.Multiaddr, addrErrs []TransportError) {
+ lisAddrs, _ := s.InterfaceListenAddresses()
+ var ourAddrs []ma.Multiaddr
+ for _, addr := range lisAddrs {
+ // we're only sure about filtering out /ip4 and /ip6 addresses, so far
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_IP4 || c.Protocol().Code == ma.P_IP6 {
+ ourAddrs = append(ourAddrs, addr)
+ }
+ return false
+ })
+ }
+
+ addrErrs = make([]TransportError, 0, len(addrs))
+
+ // The order of checking for transport and filtering low priority addrs is important. If we
+ // can only dial /webtransport, we don't want to filter /webtransport addresses out because
+ // the peer had a /quic-v1 address
+
+ // filter addresses with no transport
+ addrs = ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool {
+ if s.TransportForDialing(a) == nil {
+ e := ErrNoTransport
+ // We used to support QUIC draft-29 for a long time.
+ // Provide a more useful error when attempting to dial a QUIC draft-29 address.
+ if quicDraft29DialMatcher.Matches(a) {
+ e = ErrQUICDraft29
+ }
+ addrErrs = append(addrErrs, TransportError{Address: a, Cause: e})
+ return false
+ }
+ return true
+ })
+
+ // filter low priority addresses among the addresses we can dial
+ // We don't return an error for these addresses
+ addrs = filterLowPriorityAddresses(addrs)
+
+ // remove black holed addrs
+ addrs, blackHoledAddrs := s.bhd.FilterAddrs(addrs)
+ for _, a := range blackHoledAddrs {
+ addrErrs = append(addrErrs, TransportError{Address: a, Cause: ErrDialRefusedBlackHole})
+ }
+
+ return ma.FilterAddrs(addrs,
+ // Linux and BSD treat an unspecified address when dialing as a localhost address.
+ // Windows doesn't support this. We filter all such addresses out because peers
+ // listening on unspecified addresses will advertise more specific addresses.
+ // https://unix.stackexchange.com/a/419881
+ // https://superuser.com/a/1755455
+ func(addr ma.Multiaddr) bool {
+ return !manet.IsIPUnspecified(addr)
+ },
+ func(addr ma.Multiaddr) bool {
+ if ma.Contains(ourAddrs, addr) {
+ addrErrs = append(addrErrs, TransportError{Address: addr, Cause: ErrDialToSelf})
+ return false
+ }
+ return true
+ },
+ // TODO: Consider allowing link-local addresses
+ func(addr ma.Multiaddr) bool { return !manet.IsIP6LinkLocal(addr) },
+ func(addr ma.Multiaddr) bool {
+ if s.gater != nil && !s.gater.InterceptAddrDial(p, addr) {
+ addrErrs = append(addrErrs, TransportError{Address: addr, Cause: ErrGaterDisallowedConnection})
+ return false
+ }
+ return true
+ },
+ ), addrErrs
+}
+
+// limitedDial will start a dial to the given peer when
+// it is able, respecting the various different types of rate
+// limiting that occur without using extra goroutines per addr
+func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp chan transport.DialUpdate) {
+ timeout := s.dialTimeout
+ if manet.IsPrivateAddr(a) && s.dialTimeoutLocal < s.dialTimeout {
+ timeout = s.dialTimeoutLocal
+ }
+ s.limiter.AddDialJob(&dialJob{
+ addr: a,
+ peer: p,
+ resp: resp,
+ ctx: ctx,
+ timeout: timeout,
+ })
+}
+
+// dialAddr is the actual dial for an addr, indirectly invoked through the limiter
+func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, updCh chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ // Just to double check. Costs nothing.
+ if s.local == p {
+ return nil, ErrDialToSelf
+ }
+ // Check before we start work
+ if err := ctx.Err(); err != nil {
+ log.Debug("swarm not dialing. Context cancelled", "source_peer", s.local, "err", err, "destination_peer", p, "addr", addr)
+ return nil, err
+ }
+ log.Debug("swarm dialing peer", "source_peer", s.local, "destination_peer", p, "addr", addr)
+
+ tpt := s.TransportForDialing(addr)
+ if tpt == nil {
+ return nil, ErrNoTransport
+ }
+
+ start := time.Now()
+ var connC transport.CapableConn
+ var err error
+ if du, ok := tpt.(transport.DialUpdater); ok {
+ connC, err = du.DialWithUpdates(ctx, addr, p, updCh)
+ } else {
+ connC, err = tpt.Dial(ctx, addr, p)
+ }
+
+ // We're recording any error as a failure here.
+ // Notably, this also applies to cancellations (i.e. if another dial attempt was faster).
+ // This is ok since the black hole detector uses a very low threshold (5%).
+ s.bhd.RecordResult(addr, err == nil)
+
+ if err != nil {
+ if s.metricsTracer != nil {
+ s.metricsTracer.FailedDialing(addr, err, context.Cause(ctx))
+ }
+ return nil, err
+ }
+ canonicallog.LogPeerStatus(100, connC.RemotePeer(), connC.RemoteMultiaddr(), "connection_status", "established", "dir", "outbound")
+ if s.metricsTracer != nil {
+ connWithMetrics := wrapWithMetrics(connC, s.metricsTracer, start, network.DirOutbound)
+ connWithMetrics.completedHandshake()
+ connC = connWithMetrics
+ }
+
+ // Trust the transport? Yeah... right.
+ if connC.RemotePeer() != p {
+ connC.Close()
+ err = fmt.Errorf("BUG in transport %T: tried to dial %s, dialed %s", tpt, p, connC.RemotePeer())
+ log.Error("BUG in transport: peer mismatch", "transport_type", fmt.Sprintf("%T", tpt), "expected_peer", p, "observed_peer", connC.RemotePeer())
+ return nil, err
+ }
+
+ // success! we got one!
+ return connC, nil
+}
+
+// TODO We should have a `IsFdConsuming() bool` method on the `Transport` interface in go-libp2p/core/transport.
+// This function checks if any of the transport protocols in the address requires a file descriptor.
+// For now:
+// A Non-circuit address which has the TCP/UNIX protocol is deemed FD consuming.
+// For a circuit-relay address, we look at the address of the relay server/proxy
+// and use the same logic as above to decide.
+func isFdConsumingAddr(addr ma.Multiaddr) bool {
+ first, _ := ma.SplitFunc(addr, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_CIRCUIT
+ })
+
+ // for safety
+ if first == nil {
+ return true
+ }
+
+ _, err1 := first.ValueForProtocol(ma.P_TCP)
+ _, err2 := first.ValueForProtocol(ma.P_UNIX)
+ return err1 == nil || err2 == nil
+}
+
+func isRelayAddr(addr ma.Multiaddr) bool {
+ _, err := addr.ValueForProtocol(ma.P_CIRCUIT)
+ return err == nil
+}
+
+// filterLowPriorityAddresses removes addresses inplace for which we have a better alternative
+// 1. If a /quic-v1 address is present, filter out /quic and /webtransport address on the same 2-tuple:
+// QUIC v1 is preferred over the deprecated QUIC draft-29, and given the choice, we prefer using
+// raw QUIC over using WebTransport.
+// 2. If a /tcp address is present, filter out /ws or /wss addresses on the same 2-tuple:
+// We prefer using raw TCP over using WebSocket.
+func filterLowPriorityAddresses(addrs []ma.Multiaddr) []ma.Multiaddr {
+ // make a map of QUIC v1 and TCP AddrPorts.
+ quicV1Addr := make(map[netip.AddrPort]struct{})
+ tcpAddr := make(map[netip.AddrPort]struct{})
+ for _, a := range addrs {
+ switch {
+ case isProtocolAddr(a, ma.P_WEBTRANSPORT):
+ case isProtocolAddr(a, ma.P_QUIC_V1):
+ ap, err := addrPort(a, ma.P_UDP)
+ if err != nil {
+ continue
+ }
+ quicV1Addr[ap] = struct{}{}
+ case isProtocolAddr(a, ma.P_WS) || isProtocolAddr(a, ma.P_WSS):
+ case isProtocolAddr(a, ma.P_TCP):
+ ap, err := addrPort(a, ma.P_TCP)
+ if err != nil {
+ continue
+ }
+ tcpAddr[ap] = struct{}{}
+ }
+ }
+
+ i := 0
+ for _, a := range addrs {
+ switch {
+ case isProtocolAddr(a, ma.P_WEBTRANSPORT) || isProtocolAddr(a, ma.P_QUIC):
+ ap, err := addrPort(a, ma.P_UDP)
+ if err != nil {
+ break
+ }
+ if _, ok := quicV1Addr[ap]; ok {
+ continue
+ }
+ case isProtocolAddr(a, ma.P_WS) || isProtocolAddr(a, ma.P_WSS):
+ ap, err := addrPort(a, ma.P_TCP)
+ if err != nil {
+ break
+ }
+ if _, ok := tcpAddr[ap]; ok {
+ continue
+ }
+ }
+ addrs[i] = a
+ i++
+ }
+ return addrs[:i]
+}
+
+// addrPort returns the ip and port for a. p should be either ma.P_TCP or ma.P_UDP.
+// a must be an (ip, TCP) or (ip, udp) address.
+func addrPort(a ma.Multiaddr, p int) (netip.AddrPort, error) {
+ ip, err := manet.ToIP(a)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ port, err := a.ValueForProtocol(p)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ pi, err := strconv.Atoi(port)
+ if err != nil {
+ return netip.AddrPort{}, err
+ }
+ addr, ok := netip.AddrFromSlice(ip)
+ if !ok {
+ return netip.AddrPort{}, fmt.Errorf("failed to parse IP %s", ip)
+ }
+ return netip.AddrPortFrom(addr, uint16(pi)), nil
+}
diff --git a/p2p/net/swarm/swarm_dial_test.go b/p2p/net/swarm/swarm_dial_test.go
new file mode 100644
index 0000000000..836ad7334e
--- /dev/null
+++ b/p2p/net/swarm/swarm_dial_test.go
@@ -0,0 +1,431 @@
+package swarm
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "errors"
+ "net"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ "github.com/quic-go/quic-go"
+
+ ma "github.com/multiformats/go-multiaddr"
+ madns "github.com/multiformats/go-multiaddr-dns"
+ matest "github.com/multiformats/go-multiaddr/matest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddrsForDial(t *testing.T) {
+ mockResolver := madns.MockResolver{IP: make(map[string][]net.IPAddr)}
+ ipaddr, err := net.ResolveIPAddr("ip4", "1.2.3.4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ mockResolver.IP["example.com"] = []net.IPAddr{*ipaddr}
+
+ resolver, err := madns.NewResolver(madns.WithDomainResolver("example.com", &mockResolver))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+
+ ps, err := pstoremem.NewPeerstore()
+ require.NoError(t, err)
+ ps.AddPubKey(id, priv.GetPublic())
+ ps.AddPrivKey(id, priv)
+ t.Cleanup(func() { ps.Close() })
+
+ tpt, err := websocket.New(nil, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ s, err := NewSwarm(id, ps, eventbus.NewBus(), WithMultiaddrResolver(ResolverFromMaDNS{resolver}))
+ require.NoError(t, err)
+ defer s.Close()
+ err = s.AddTransport(tpt)
+ require.NoError(t, err)
+
+ otherPeer := test.RandPeerIDFatal(t)
+
+ ps.AddAddr(otherPeer, ma.StringCast("/dns4/example.com/tcp/1234/wss"), time.Hour)
+
+ ctx := context.Background()
+ mas, _, err := s.addrsForDial(ctx, otherPeer)
+ require.NoError(t, err)
+
+ require.NotZero(t, len(mas))
+}
+
+func TestDedupAddrsForDial(t *testing.T) {
+ mockResolver := madns.MockResolver{IP: make(map[string][]net.IPAddr)}
+ ipaddr, err := net.ResolveIPAddr("ip4", "1.2.3.4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ mockResolver.IP["example.com"] = []net.IPAddr{*ipaddr}
+
+ resolver, err := madns.NewResolver(madns.WithDomainResolver("example.com", &mockResolver))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+
+ ps, err := pstoremem.NewPeerstore()
+ require.NoError(t, err)
+ ps.AddPubKey(id, priv.GetPublic())
+ ps.AddPrivKey(id, priv)
+ t.Cleanup(func() { ps.Close() })
+
+ s, err := NewSwarm(id, ps, eventbus.NewBus(), WithMultiaddrResolver(ResolverFromMaDNS{resolver}))
+ require.NoError(t, err)
+ defer s.Close()
+
+ tpt, err := tcp.NewTCPTransport(nil, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ err = s.AddTransport(tpt)
+ require.NoError(t, err)
+
+ otherPeer := test.RandPeerIDFatal(t)
+
+ ps.AddAddr(otherPeer, ma.StringCast("/dns4/example.com/tcp/1234"), time.Hour)
+ ps.AddAddr(otherPeer, ma.StringCast("/ip4/1.2.3.4/tcp/1234"), time.Hour)
+
+ ctx := context.Background()
+ mas, _, err := s.addrsForDial(ctx, otherPeer)
+ require.NoError(t, err)
+
+ require.Len(t, mas, 1)
+}
+
+func newTestSwarmWithResolver(t *testing.T, resolver *madns.Resolver) *Swarm {
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ ps, err := pstoremem.NewPeerstore()
+ require.NoError(t, err)
+ ps.AddPubKey(id, priv.GetPublic())
+ ps.AddPrivKey(id, priv)
+ t.Cleanup(func() { ps.Close() })
+ s, err := NewSwarm(id, ps, eventbus.NewBus(), WithMultiaddrResolver(ResolverFromMaDNS{resolver}))
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ s.Close()
+ })
+
+ // Add a tcp transport so that we know we can dial a tcp multiaddr and we don't filter it out.
+ tpt, err := tcp.NewTCPTransport(nil, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ err = s.AddTransport(tpt)
+ require.NoError(t, err)
+
+ connmgr, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ quicTpt, err := libp2pquic.NewTransport(priv, connmgr, nil, nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ err = s.AddTransport(quicTpt)
+ require.NoError(t, err)
+
+ wtTpt, err := libp2pwebtransport.New(priv, nil, connmgr, nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ err = s.AddTransport(wtTpt)
+ require.NoError(t, err)
+
+ wsTpt, err := websocket.New(nil, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ err = s.AddTransport(wsTpt)
+ require.NoError(t, err)
+
+ return s
+}
+
+func TestAddrResolution(t *testing.T) {
+ ctx := context.Background()
+
+ p1 := test.RandPeerIDFatal(t)
+ p2 := test.RandPeerIDFatal(t)
+ addr1 := ma.StringCast("/dnsaddr/example.com")
+ addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123")
+
+ p2paddr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.String())
+ p2paddr3 := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p2.String())
+
+ backend := &madns.MockResolver{
+ TXT: map[string][]string{"_dnsaddr.example.com": {
+ "dnsaddr=" + p2paddr2.String(), "dnsaddr=" + p2paddr3.String(),
+ }},
+ }
+ resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
+ require.NoError(t, err)
+
+ s := newTestSwarmWithResolver(t, resolver)
+
+ s.peers.AddAddr(p1, addr1, time.Hour)
+
+ tctx, cancel := context.WithTimeout(ctx, time.Millisecond*100)
+ defer cancel()
+ mas, _, err := s.addrsForDial(tctx, p1)
+ require.NoError(t, err)
+
+ require.Len(t, mas, 1)
+ matest.AssertMultiaddrsContain(t, mas, addr2)
+
+ addrs := s.peers.Addrs(p1)
+ require.Len(t, addrs, 2)
+ matest.AssertMultiaddrsContain(t, addrs, addr1)
+ matest.AssertMultiaddrsContain(t, addrs, addr2)
+}
+
+func TestAddrResolutionRecursive(t *testing.T) {
+ p1 := test.RandPeerIDFatal(t)
+ p2 := test.RandPeerIDFatal(t)
+
+ addr1 := ma.StringCast("/dnsaddr/example.com")
+ addr2 := ma.StringCast("/ip4/192.0.2.1/tcp/123")
+ p2paddr1 := ma.StringCast("/dnsaddr/example.com/p2p/" + p1.String())
+ p2paddr2 := ma.StringCast("/dnsaddr/example.com/p2p/" + p2.String())
+ p2paddr1i := ma.StringCast("/dnsaddr/foo.example.com/p2p/" + p1.String())
+ p2paddr2i := ma.StringCast("/dnsaddr/bar.example.com/p2p/" + p2.String())
+ p2paddr1f := ma.StringCast("/ip4/192.0.2.1/tcp/123/p2p/" + p1.String())
+
+ backend := &madns.MockResolver{
+ TXT: map[string][]string{
+ "_dnsaddr.example.com": {
+ "dnsaddr=" + p2paddr1i.String(),
+ "dnsaddr=" + p2paddr2i.String(),
+ },
+ "_dnsaddr.foo.example.com": {"dnsaddr=" + p2paddr1f.String()},
+ "_dnsaddr.bar.example.com": {"dnsaddr=" + p2paddr2i.String()},
+ },
+ }
+ resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
+ require.NoError(t, err)
+
+ s := newTestSwarmWithResolver(t, resolver)
+
+ pi1, err := peer.AddrInfoFromP2pAddr(p2paddr1)
+ require.NoError(t, err)
+
+ tctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100)
+ defer cancel()
+ s.Peerstore().AddAddrs(pi1.ID, pi1.Addrs, peerstore.TempAddrTTL)
+ _, _, err = s.addrsForDial(tctx, p1)
+ require.NoError(t, err)
+
+ addrs1 := s.Peerstore().Addrs(pi1.ID)
+ require.Len(t, addrs1, 2)
+ matest.AssertMultiaddrsContain(t, addrs1, addr1)
+ matest.AssertMultiaddrsContain(t, addrs1, addr2)
+
+ pi2, err := peer.AddrInfoFromP2pAddr(p2paddr2)
+ require.NoError(t, err)
+
+ s.Peerstore().AddAddrs(pi2.ID, pi2.Addrs, peerstore.TempAddrTTL)
+ _, _, err = s.addrsForDial(tctx, p2)
+ // This never resolves to a good address
+ require.Equal(t, ErrNoGoodAddresses, err)
+
+ addrs2 := s.Peerstore().Addrs(pi2.ID)
+ require.Len(t, addrs2, 1)
+ matest.AssertMultiaddrsContain(t, addrs2, addr1)
+}
+
+// see https://github.com/libp2p/go-libp2p/issues/2562
+func TestAddrResolutionRecursiveTransportSpecific(t *testing.T) {
+ p := test.RandPeerIDFatal(t)
+
+ backend := &madns.MockResolver{
+ IP: map[string][]net.IPAddr{
+ "sub.example.com": {net.IPAddr{IP: net.IPv4(1, 2, 3, 4)}},
+ },
+ TXT: map[string][]string{
+ "_dnsaddr.example.com": {"dnsaddr=/dns4/sub.example.com/tcp/443/wss/p2p/" + p.String()},
+ },
+ }
+ resolver, err := madns.NewResolver(madns.WithDefaultResolver(backend))
+ require.NoError(t, err)
+
+ s := newTestSwarmWithResolver(t, resolver)
+ pi1, err := peer.AddrInfoFromP2pAddr(ma.StringCast("/dnsaddr/example.com/p2p/" + p.String()))
+ require.NoError(t, err)
+
+ tctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100)
+ defer cancel()
+ s.Peerstore().AddAddrs(pi1.ID, pi1.Addrs, peerstore.TempAddrTTL)
+ addrs, _, err := s.addrsForDial(tctx, p)
+ require.NoError(t, err)
+ require.Len(t, addrs, 1)
+ require.Equal(t, "/ip4/1.2.3.4/tcp/443/tls/sni/sub.example.com/ws", addrs[0].String())
+}
+
+func TestAddrsForDialFiltering(t *testing.T) {
+ q1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ wt1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport/")
+
+ q2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+ wt2 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1/webtransport/")
+
+ q3 := ma.StringCast("/ip4/1.2.3.4/udp/3/quic-v1")
+
+ t1 := ma.StringCast("/ip4/1.2.3.4/tcp/1")
+ ws1 := ma.StringCast("/ip4/1.2.3.4/tcp/1/ws")
+
+ unSpecQ := ma.StringCast("/ip4/0.0.0.0/udp/2/quic-v1")
+ unSpecT := ma.StringCast("/ip6/::/tcp/2/")
+
+ resolver, err := madns.NewResolver(madns.WithDefaultResolver(&madns.MockResolver{}))
+ require.NoError(t, err)
+ s := newTestSwarmWithResolver(t, resolver)
+ ourAddrs := s.ListenAddresses()
+
+ testCases := []struct {
+ name string
+ input []ma.Multiaddr
+ output []ma.Multiaddr
+ }{
+ {
+ name: "quic-filtered",
+ input: []ma.Multiaddr{q1, q1v1, q2, q2v1, q3},
+ output: []ma.Multiaddr{q1v1, q2v1, q3},
+ },
+ {
+ name: "webtransport-filtered",
+ input: []ma.Multiaddr{q1, q1v1, wt1, wt2},
+ output: []ma.Multiaddr{q1v1, wt2},
+ },
+ {
+ name: "all",
+ input: []ma.Multiaddr{q1, q1v1, wt1, q2, q2v1, wt2, t1, ws1},
+ output: []ma.Multiaddr{q1v1, q2v1, t1},
+ },
+ {
+ name: "our-addrs-filtered",
+ input: append([]ma.Multiaddr{q1}, ourAddrs...),
+ output: []ma.Multiaddr{q1},
+ },
+ {
+ name: "unspecified-filtered",
+ input: []ma.Multiaddr{q1v1, t1, unSpecQ, unSpecT},
+ output: []ma.Multiaddr{q1v1, t1},
+ },
+ }
+
+ ctx := context.Background()
+ p1 := test.RandPeerIDFatal(t)
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ s.Peerstore().ClearAddrs(p1)
+ s.Peerstore().AddAddrs(p1, tc.input, peerstore.PermanentAddrTTL)
+ result, _, err := s.addrsForDial(ctx, p1)
+ require.NoError(t, err)
+ sort.Slice(result, func(i, j int) bool { return bytes.Compare(result[i].Bytes(), result[j].Bytes()) < 0 })
+ sort.Slice(tc.output, func(i, j int) bool { return bytes.Compare(tc.output[i].Bytes(), tc.output[j].Bytes()) < 0 })
+ if len(result) != len(tc.output) {
+ t.Fatalf("output mismatch got: %s want: %s", result, tc.output)
+ }
+ for i := 0; i < len(result); i++ {
+ if !result[i].Equal(tc.output[i]) {
+ t.Fatalf("output mismatch got: %s want: %s", result, tc.output)
+ }
+ }
+ })
+ }
+}
+
+func TestBlackHoledAddrBlocked(t *testing.T) {
+ resolver, err := madns.NewResolver()
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := newTestSwarmWithResolver(t, resolver)
+ defer s.Close()
+
+ n := 3
+ s.bhd.ipv6 = &BlackHoleSuccessCounter{N: n, MinSuccesses: 1, Name: "IPv6"}
+
+ // All dials to this addr will fail.
+ // manet.IsPublic is aggressive for IPv6 addresses. Use a NAT64 address.
+ addr := ma.StringCast("/ip6/64:ff9b::1.2.3.4/tcp/54321/")
+
+ p, err := test.RandPeerID()
+ if err != nil {
+ t.Error(err)
+ }
+ s.Peerstore().AddAddr(p, addr, peerstore.PermanentAddrTTL)
+
+ // do 1 extra dial to ensure that the blackHoleDetector state is updated since it
+ // happens in a different goroutine
+ for i := 0; i < n+1; i++ {
+ s.backf.Clear(p)
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ conn, err := s.DialPeer(ctx, p)
+ if err == nil || conn != nil {
+ t.Fatalf("expected dial to fail")
+ }
+ cancel()
+ }
+ s.backf.Clear(p)
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+ conn, err := s.DialPeer(ctx, p)
+ require.Nil(t, conn)
+ var de *DialError
+ if !errors.As(err, &de) {
+ t.Fatalf("expected to receive an error of type *DialError, got %s of type %T", err, err)
+ }
+ require.ErrorIs(t, err, ErrDialRefusedBlackHole)
+}
+
+type mockDNSResolver struct {
+ ipsToReturn []net.IPAddr
+ txtsToReturn []string
+}
+
+var _ madns.BasicResolver = (*mockDNSResolver)(nil)
+
+func (m *mockDNSResolver) LookupIPAddr(_ context.Context, _ string) ([]net.IPAddr, error) {
+ return m.ipsToReturn, nil
+}
+
+func (m *mockDNSResolver) LookupTXT(_ context.Context, _ string) ([]string, error) {
+ return m.txtsToReturn, nil
+}
+
+func TestSkipDialingManyDNS(t *testing.T) {
+ resolver, err := madns.NewResolver(madns.WithDefaultResolver(&mockDNSResolver{ipsToReturn: []net.IPAddr{{IP: net.ParseIP("1.2.3.4")}, {IP: net.ParseIP("1.2.3.5")}}}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := newTestSwarmWithResolver(t, resolver)
+ defer s.Close()
+ id := test.RandPeerIDFatal(t)
+ addr := ma.StringCast("/dns/example.com/udp/1234/p2p-circuit/dns/example.com/p2p-circuit/dns/example.com")
+
+ resolved := s.resolveAddrs(context.Background(), peer.AddrInfo{ID: id, Addrs: []ma.Multiaddr{addr}})
+ require.NoError(t, err)
+ require.Less(t, len(resolved), 3, "got: %v", resolved)
+}
diff --git a/p2p/net/swarm/swarm_event_test.go b/p2p/net/swarm/swarm_event_test.go
new file mode 100644
index 0000000000..5010215fc2
--- /dev/null
+++ b/p2p/net/swarm/swarm_event_test.go
@@ -0,0 +1,310 @@
+package swarm_test
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ . "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func newSwarmWithSubscription(t *testing.T) (*Swarm, event.Subscription) {
+ t.Helper()
+ bus := eventbus.NewBus()
+ sw := swarmt.GenSwarm(t, swarmt.EventBus(bus))
+ t.Cleanup(func() { sw.Close() })
+ sub, err := bus.Subscribe(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+ t.Cleanup(func() { sub.Close() })
+ return sw, sub
+}
+
+func checkEvent(t *testing.T, sub event.Subscription, expected event.EvtPeerConnectednessChanged) {
+ t.Helper()
+ select {
+ case ev, ok := <-sub.Out():
+ require.True(t, ok)
+ evt := ev.(event.EvtPeerConnectednessChanged)
+ require.Equal(t, expected.Connectedness, evt.Connectedness, "wrong connectedness state")
+ require.Equal(t, expected.Peer, evt.Peer)
+ case <-time.After(time.Second):
+ t.Fatal("didn't get PeerConnectedness event")
+ }
+
+ // check that there are no more events
+ select {
+ case <-sub.Out():
+ t.Fatal("didn't expect any more events")
+ case <-time.After(100 * time.Millisecond):
+ return
+ }
+}
+
+func TestConnectednessEventsSingleConn(t *testing.T) {
+ s1, sub1 := newSwarmWithSubscription(t)
+ s2, sub2 := newSwarmWithSubscription(t)
+
+ s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{s2.ListenAddresses()[0]}, time.Hour)
+ _, err := s1.DialPeer(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+
+ checkEvent(t, sub1, event.EvtPeerConnectednessChanged{Peer: s2.LocalPeer(), Connectedness: network.Connected})
+ checkEvent(t, sub2, event.EvtPeerConnectednessChanged{Peer: s1.LocalPeer(), Connectedness: network.Connected})
+
+ for _, c := range s2.ConnsToPeer(s1.LocalPeer()) {
+ require.NoError(t, c.Close())
+ }
+ checkEvent(t, sub1, event.EvtPeerConnectednessChanged{Peer: s2.LocalPeer(), Connectedness: network.NotConnected})
+ checkEvent(t, sub2, event.EvtPeerConnectednessChanged{Peer: s1.LocalPeer(), Connectedness: network.NotConnected})
+}
+
+func TestNoDeadlockWhenConsumingConnectednessEvents(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ dialerEventBus := eventbus.NewBus()
+ dialer := swarmt.GenSwarm(t, swarmt.OptDialOnly, swarmt.EventBus(dialerEventBus))
+ defer dialer.Close()
+
+ listener := swarmt.GenSwarm(t, swarmt.OptDialOnly)
+ addrsToListen := []ma.Multiaddr{
+ ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
+ }
+
+ if err := listener.Listen(addrsToListen...); err != nil {
+ t.Fatal(err)
+ }
+ listenedAddrs := listener.ListenAddresses()
+
+ dialer.Peerstore().AddAddrs(listener.LocalPeer(), listenedAddrs, time.Hour)
+
+ sub, err := dialerEventBus.Subscribe(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+
+ // A slow consumer
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-sub.Out():
+ time.Sleep(100 * time.Millisecond)
+ // Do something with the swarm that needs the conns lock
+ _ = dialer.ConnsToPeer(listener.LocalPeer())
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+ }()
+
+ for i := 0; i < 10; i++ {
+ // Connect and disconnect to trigger a bunch of events
+ _, err := dialer.DialPeer(context.Background(), listener.LocalPeer())
+ require.NoError(t, err)
+ dialer.ClosePeer(listener.LocalPeer())
+ }
+
+ // The test should finish without deadlocking
+}
+
+func TestConnectednessEvents(t *testing.T) {
+ s1, sub1 := newSwarmWithSubscription(t)
+ const N = 100
+ peers := make([]*Swarm, N)
+ for i := 0; i < N; i++ {
+ peers[i] = swarmt.GenSwarm(t)
+ }
+
+ // First check all connected events
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ for i := 0; i < N; i++ {
+ e := <-sub1.Out()
+ evt, ok := e.(event.EvtPeerConnectednessChanged)
+ if !ok {
+ t.Error("invalid event received", e)
+ return
+ }
+ if evt.Connectedness != network.Connected {
+ t.Errorf("invalid event received: expected: Connected, got: %s", evt)
+ return
+ }
+ }
+ }()
+ for i := 0; i < N; i++ {
+ s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour)
+ _, err := s1.DialPeer(context.Background(), peers[i].LocalPeer())
+ require.NoError(t, err)
+ }
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Fatal("expected all connectedness events to be completed")
+ }
+
+ // Disconnect some peers
+ done = make(chan struct{})
+ go func() {
+ defer close(done)
+ for i := 0; i < N/2; i++ {
+ e := <-sub1.Out()
+ evt, ok := e.(event.EvtPeerConnectednessChanged)
+ if !ok {
+ t.Error("invalid event received", e)
+ return
+ }
+ if evt.Connectedness != network.NotConnected {
+ t.Errorf("invalid event received: expected: NotConnected, got: %s", evt)
+ return
+ }
+ }
+ }()
+ for i := 0; i < N/2; i++ {
+ err := s1.ClosePeer(peers[i].LocalPeer())
+ require.NoError(t, err)
+ }
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Fatal("expected all disconnected events to be completed")
+ }
+
+ // Check for disconnected events on swarm close
+ done = make(chan struct{})
+ go func() {
+ defer close(done)
+ for i := N / 2; i < N; i++ {
+ e := <-sub1.Out()
+ evt, ok := e.(event.EvtPeerConnectednessChanged)
+ if !ok {
+ t.Error("invalid event received", e)
+ return
+ }
+ if evt.Connectedness != network.NotConnected {
+ t.Errorf("invalid event received: expected: NotConnected, got: %s", evt)
+ return
+ }
+ }
+ }()
+ s1.Close()
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Fatal("expected all disconnected events after swarm close to be completed")
+ }
+}
+
+func TestConnectednessEventDeadlock(t *testing.T) {
+ s1, sub1 := newSwarmWithSubscription(t)
+ const N = 100
+ peers := make([]*Swarm, N)
+ for i := 0; i < N; i++ {
+ peers[i] = swarmt.GenSwarm(t)
+ }
+
+ // First check all connected events
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ count := 0
+ for count < N {
+ e := <-sub1.Out()
+ // sleep to simulate a slow consumer
+ evt, ok := e.(event.EvtPeerConnectednessChanged)
+ if !ok {
+ t.Error("invalid event received", e)
+ return
+ }
+ if evt.Connectedness != network.Connected {
+ continue
+ }
+ count++
+ s1.ClosePeer(evt.Peer)
+ }
+ }()
+ for i := 0; i < N; i++ {
+ s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour)
+ go func(i int) {
+ _, err := s1.DialPeer(context.Background(), peers[i].LocalPeer())
+ assert.NoError(t, err)
+ }(i)
+ }
+ select {
+ case <-done:
+ case <-time.After(100 * time.Second):
+ t.Fatal("expected all connectedness events to be completed")
+ }
+}
+
+func TestConnectednessEventDeadlockWithDial(t *testing.T) {
+ s1, sub1 := newSwarmWithSubscription(t)
+ const N = 200
+ peers := make([]*Swarm, N)
+ for i := 0; i < N; i++ {
+ peers[i] = swarmt.GenSwarm(t)
+ }
+ peers2 := make([]*Swarm, N)
+ for i := 0; i < N; i++ {
+ peers2[i] = swarmt.GenSwarm(t)
+ }
+
+ // First check all connected events
+ done := make(chan struct{})
+ var subWG sync.WaitGroup
+ subWG.Add(1)
+ go func() {
+ defer subWG.Done()
+ count := 0
+ for {
+ var e interface{}
+ select {
+ case e = <-sub1.Out():
+ case <-done:
+ return
+ }
+ // sleep to simulate a slow consumer
+ evt, ok := e.(event.EvtPeerConnectednessChanged)
+ if !ok {
+ t.Error("invalid event received", e)
+ return
+ }
+ if evt.Connectedness != network.Connected {
+ continue
+ }
+ if count < N {
+ time.Sleep(10 * time.Millisecond)
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond)
+ s1.Peerstore().AddAddrs(peers2[count].LocalPeer(), []ma.Multiaddr{peers2[count].ListenAddresses()[0]}, time.Hour)
+ s1.DialPeer(ctx, peers2[count].LocalPeer())
+ count++
+ cancel()
+ }
+ }
+ }()
+ var wg sync.WaitGroup
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ s1.Peerstore().AddAddrs(peers[i].LocalPeer(), []ma.Multiaddr{peers[i].ListenAddresses()[0]}, time.Hour)
+ go func(i int) {
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ s1.DialPeer(ctx, peers[i].LocalPeer())
+ cancel()
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+ s1.Close()
+
+ close(done)
+ subWG.Wait()
+}
diff --git a/p2p/net/swarm/swarm_listen.go b/p2p/net/swarm/swarm_listen.go
new file mode 100644
index 0000000000..1da22d6d67
--- /dev/null
+++ b/p2p/net/swarm/swarm_listen.go
@@ -0,0 +1,200 @@
+package swarm
+
+import (
+ "errors"
+ "fmt"
+ "slices"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/canonicallog"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+type OrderedListener interface {
+ // Transports optionally implement this interface to indicate the relative
+ // ordering that listeners should be setup. Some transports may optionally
+ // make use of other listeners if they are setup. e.g. WebRTC may reuse the
+ // same UDP port as QUIC, but only when QUIC is setup first.
+ // lower values are setup first.
+ ListenOrder() int
+}
+
+// Listen sets up listeners for all of the given addresses.
+// It returns as long as we successfully listen on at least *one* address.
+func (s *Swarm) Listen(addrs ...ma.Multiaddr) error {
+ errs := make([]error, len(addrs))
+ var succeeded int
+
+ type addrAndListener struct {
+ addr ma.Multiaddr
+ lTpt transport.Transport
+ }
+ sortedAddrsAndTpts := make([]addrAndListener, 0, len(addrs))
+ for _, a := range addrs {
+ t := s.TransportForListening(a)
+ sortedAddrsAndTpts = append(sortedAddrsAndTpts, addrAndListener{addr: a, lTpt: t})
+ }
+ slices.SortFunc(sortedAddrsAndTpts, func(a, b addrAndListener) int {
+ aOrder := 0
+ bOrder := 0
+ if l, ok := a.lTpt.(OrderedListener); ok {
+ aOrder = l.ListenOrder()
+ }
+ if l, ok := b.lTpt.(OrderedListener); ok {
+ bOrder = l.ListenOrder()
+ }
+ return aOrder - bOrder
+ })
+
+ for i, a := range sortedAddrsAndTpts {
+ if err := s.AddListenAddr(a.addr); err != nil {
+ errs[i] = err
+ } else {
+ succeeded++
+ }
+ }
+
+ for i, e := range errs {
+ if e != nil {
+ log.Warn("listening failed", "on", sortedAddrsAndTpts[i].addr, "err", errs[i])
+ }
+ }
+
+ if succeeded == 0 && len(sortedAddrsAndTpts) > 0 {
+ return fmt.Errorf("failed to listen on any addresses: %s", errs)
+ }
+
+ return nil
+}
+
+// ListenClose stop and delete listeners for all of the given addresses. If an
+// any address belongs to one of the addreses a Listener provides, then the
+// Listener will close for *all* addresses it provides. For example if you close
+// and address with `/quic`, then the QUIC listener will close and also close
+// any `/quic-v1` address.
+func (s *Swarm) ListenClose(addrs ...ma.Multiaddr) {
+ listenersToClose := make(map[transport.Listener]struct{}, len(addrs))
+
+ s.listeners.Lock()
+ for l := range s.listeners.m {
+ if !containsMultiaddr(addrs, l.Multiaddr()) {
+ continue
+ }
+
+ delete(s.listeners.m, l)
+ listenersToClose[l] = struct{}{}
+ }
+ s.listeners.cacheEOL = time.Time{}
+ s.listeners.Unlock()
+
+ for l := range listenersToClose {
+ l.Close()
+ }
+}
+
+// AddListenAddr tells the swarm to listen on a single address. Unlike Listen,
+// this method does not attempt to filter out bad addresses.
+func (s *Swarm) AddListenAddr(a ma.Multiaddr) error {
+ tpt := s.TransportForListening(a)
+ if tpt == nil {
+ // TransportForListening will return nil if either:
+ // 1. No transport has been registered.
+ // 2. We're closed (so we've nulled out the transport map.
+ //
+ // Distinguish between these two cases to avoid confusing users.
+ select {
+ case <-s.ctx.Done():
+ return ErrSwarmClosed
+ default:
+ return ErrNoTransport
+ }
+ }
+
+ list, err := tpt.Listen(a)
+ if err != nil {
+ return err
+ }
+
+ s.listeners.Lock()
+ if s.listeners.m == nil {
+ s.listeners.Unlock()
+ list.Close()
+ return ErrSwarmClosed
+ }
+ s.refs.Add(1)
+ s.listeners.m[list] = struct{}{}
+ s.listeners.cacheEOL = time.Time{}
+ s.listeners.Unlock()
+
+ maddr := list.Multiaddr()
+
+ // signal to our notifiees on listen.
+ s.notifyAll(func(n network.Notifiee) {
+ n.Listen(s, maddr)
+ })
+
+ go func() {
+ defer func() {
+ s.listeners.Lock()
+ _, ok := s.listeners.m[list]
+ if ok {
+ delete(s.listeners.m, list)
+ s.listeners.cacheEOL = time.Time{}
+ }
+ s.listeners.Unlock()
+
+ if ok {
+ list.Close()
+ log.Error("swarm listener unintentionally closed")
+ }
+
+ // signal to our notifiees on listen close.
+ s.notifyAll(func(n network.Notifiee) {
+ n.ListenClose(s, maddr)
+ })
+ s.refs.Done()
+ }()
+ for {
+ c, err := list.Accept()
+ if err != nil {
+ if !errors.Is(err, transport.ErrListenerClosed) {
+ log.Error("swarm listener accept error", "addr", a, "err", err)
+ }
+ return
+ }
+ canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound")
+ if s.metricsTracer != nil {
+ c = wrapWithMetrics(c, s.metricsTracer, time.Now(), network.DirInbound)
+ }
+
+ log.Debug("swarm listener accepted connection", "local_multiaddr", c.LocalMultiaddr(), "remote_multiaddr", c.RemoteMultiaddr())
+ s.refs.Add(1)
+ go func() {
+ defer s.refs.Done()
+ _, err := s.addConn(c, network.DirInbound)
+ switch err {
+ case nil:
+ case ErrSwarmClosed:
+ // ignore.
+ return
+ default:
+ log.Warn("adding connection failed", "to", a, "err", err)
+ return
+ }
+ }()
+ }
+ }()
+ return nil
+}
+
+func containsMultiaddr(addrs []ma.Multiaddr, addr ma.Multiaddr) bool {
+ for _, a := range addrs {
+ if addr.Equal(a) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/p2p/net/swarm/swarm_metrics.go b/p2p/net/swarm/swarm_metrics.go
new file mode 100644
index 0000000000..2413e9faed
--- /dev/null
+++ b/p2p/net/swarm/swarm_metrics.go
@@ -0,0 +1,298 @@
+package swarm
+
+import (
+ "context"
+ "errors"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+
+ ma "github.com/multiformats/go-multiaddr"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_swarm"
+
+var (
+ connsOpened = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connections_opened_total",
+ Help: "Connections Opened",
+ },
+ []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ keyTypes = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "key_types_total",
+ Help: "key type",
+ },
+ []string{"dir", "key_type"},
+ )
+ connsClosed = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connections_closed_total",
+ Help: "Connections Closed",
+ },
+ []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ dialError = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "dial_errors_total",
+ Help: "Dial Error",
+ },
+ []string{"transport", "error", "ip_version"},
+ )
+ connDuration = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "connection_duration_seconds",
+ Help: "Duration of a Connection",
+ Buckets: prometheus.ExponentialBuckets(1.0/16, 2, 25), // up to 24 days
+ },
+ []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ connHandshakeLatency = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "handshake_latency_seconds",
+ Help: "Duration of the libp2p Handshake",
+ Buckets: prometheus.ExponentialBuckets(0.001, 1.3, 35),
+ },
+ []string{"transport", "security", "muxer", "early_muxer", "ip_version"},
+ )
+ dialsPerPeer = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "dials_per_peer_total",
+ Help: "Number of addresses dialed per peer",
+ },
+ []string{"outcome", "num_dials"},
+ )
+ dialLatency = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "dial_latency_seconds",
+ Help: "time taken to establish connection with the peer",
+ Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2},
+ },
+ []string{"outcome", "num_dials"},
+ )
+ dialRankingDelay = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "dial_ranking_delay_seconds",
+ Help: "delay introduced by the dial ranking logic",
+ Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2},
+ },
+ )
+ blackHoleSuccessCounterState = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "black_hole_filter_state",
+ Help: "State of the black hole filter",
+ },
+ []string{"name"},
+ )
+ blackHoleSuccessCounterSuccessFraction = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "black_hole_filter_success_fraction",
+ Help: "Fraction of successful dials among the last n requests",
+ },
+ []string{"name"},
+ )
+ blackHoleSuccessCounterNextRequestAllowedAfter = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "black_hole_filter_next_request_allowed_after",
+ Help: "Number of requests after which the next request will be allowed",
+ },
+ []string{"name"},
+ )
+ collectors = []prometheus.Collector{
+ connsOpened,
+ keyTypes,
+ connsClosed,
+ dialError,
+ connDuration,
+ connHandshakeLatency,
+ dialsPerPeer,
+ dialRankingDelay,
+ dialLatency,
+ blackHoleSuccessCounterSuccessFraction,
+ blackHoleSuccessCounterState,
+ blackHoleSuccessCounterNextRequestAllowedAfter,
+ }
+)
+
+type MetricsTracer interface {
+ OpenedConnection(network.Direction, crypto.PubKey, network.ConnectionState, ma.Multiaddr)
+ ClosedConnection(network.Direction, time.Duration, network.ConnectionState, ma.Multiaddr)
+ CompletedHandshake(time.Duration, network.ConnectionState, ma.Multiaddr)
+ FailedDialing(ma.Multiaddr, error, error)
+ DialCompleted(success bool, totalDials int, latency time.Duration)
+ DialRankingDelay(d time.Duration)
+ UpdatedBlackHoleSuccessCounter(name string, state BlackHoleState, nextProbeAfter int, successFraction float64)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func appendConnectionState(tags []string, cs network.ConnectionState) []string {
+ if cs.Transport == "" {
+ // This shouldn't happen, unless the transport doesn't properly set the Transport field in the ConnectionState.
+ tags = append(tags, "unknown")
+ } else {
+ tags = append(tags, cs.Transport)
+ }
+ // These might be empty, depending on the transport.
+ // For example, QUIC doesn't set security nor muxer.
+ tags = append(tags, string(cs.Security))
+ tags = append(tags, string(cs.StreamMultiplexer))
+
+ earlyMuxer := "false"
+ if cs.UsedEarlyMuxerNegotiation {
+ earlyMuxer = "true"
+ }
+ tags = append(tags, earlyMuxer)
+ return tags
+}
+
+func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, metricshelper.GetDirection(dir))
+ *tags = appendConnectionState(*tags, cs)
+ *tags = append(*tags, metricshelper.GetIPVersion(laddr))
+ connsOpened.WithLabelValues(*tags...).Inc()
+
+ *tags = (*tags)[:0]
+ *tags = append(*tags, metricshelper.GetDirection(dir))
+ *tags = append(*tags, p.Type().String())
+ keyTypes.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, metricshelper.GetDirection(dir))
+ *tags = appendConnectionState(*tags, cs)
+ *tags = append(*tags, metricshelper.GetIPVersion(laddr))
+ connsClosed.WithLabelValues(*tags...).Inc()
+ connDuration.WithLabelValues(*tags...).Observe(duration.Seconds())
+}
+
+func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = appendConnectionState(*tags, cs)
+ *tags = append(*tags, metricshelper.GetIPVersion(laddr))
+ connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds())
+}
+
+func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, dialErr error, cause error) {
+ transport := metricshelper.GetTransport(addr)
+ e := "other"
+ // dial deadline exceeded or the the parent contexts deadline exceeded
+ if errors.Is(dialErr, context.DeadlineExceeded) || errors.Is(cause, context.DeadlineExceeded) {
+ e = "deadline"
+ } else if errors.Is(dialErr, context.Canceled) {
+ // dial was cancelled.
+ if errors.Is(cause, context.Canceled) {
+ // parent context was canceled
+ e = "application canceled"
+ } else if errors.Is(cause, errConcurrentDialSuccessful) {
+ e = "canceled: concurrent dial successful"
+ } else {
+ // something else
+ e = "canceled: other"
+ }
+ } else {
+ nerr, ok := dialErr.(net.Error)
+ if ok && nerr.Timeout() {
+ e = "timeout"
+ } else if strings.Contains(dialErr.Error(), "connect: connection refused") {
+ e = "connection refused"
+ }
+ }
+
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, transport, e)
+ *tags = append(*tags, metricshelper.GetIPVersion(addr))
+ dialError.WithLabelValues(*tags...).Inc()
+}
+
+func (m *metricsTracer) DialCompleted(success bool, totalDials int, latency time.Duration) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if success {
+ *tags = append(*tags, "success")
+ } else {
+ *tags = append(*tags, "failed")
+ }
+
+ numDialLabels := [...]string{"0", "1", "2", "3", "4", "5", ">=6"}
+ var numDials string
+ if totalDials < len(numDialLabels) {
+ numDials = numDialLabels[totalDials]
+ } else {
+ numDials = numDialLabels[len(numDialLabels)-1]
+ }
+ *tags = append(*tags, numDials)
+ dialsPerPeer.WithLabelValues(*tags...).Inc()
+ dialLatency.WithLabelValues(*tags...).Observe(latency.Seconds())
+}
+
+func (m *metricsTracer) DialRankingDelay(d time.Duration) {
+ dialRankingDelay.Observe(d.Seconds())
+}
+
+func (m *metricsTracer) UpdatedBlackHoleSuccessCounter(name string, state BlackHoleState,
+ nextProbeAfter int, successFraction float64) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, name)
+
+ blackHoleSuccessCounterState.WithLabelValues(*tags...).Set(float64(state))
+ blackHoleSuccessCounterSuccessFraction.WithLabelValues(*tags...).Set(successFraction)
+ blackHoleSuccessCounterNextRequestAllowedAfter.WithLabelValues(*tags...).Set(float64(nextProbeAfter))
+}
diff --git a/p2p/net/swarm/swarm_metrics_test.go b/p2p/net/swarm/swarm_metrics_test.go
new file mode 100644
index 0000000000..151765931b
--- /dev/null
+++ b/p2p/net/swarm/swarm_metrics_test.go
@@ -0,0 +1,113 @@
+//go:build nocover
+
+package swarm
+
+import (
+ "context"
+ "crypto/rand"
+ "net"
+ "syscall"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+
+ mrand "math/rand"
+
+ "github.com/stretchr/testify/require"
+)
+
+func BenchmarkMetricsConnOpen(b *testing.B) {
+ b.ReportAllocs()
+ quicConnState := network.ConnectionState{Transport: "quic"}
+ tcpConnState := network.ConnectionState{
+ StreamMultiplexer: "yamux",
+ Security: "tls",
+ Transport: "tcp",
+ }
+ _, pub, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(b, err)
+ quicAddr := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ tcpAddr := ma.StringCast("/ip4/1.2.3.4/tcp/1/")
+ tr := NewMetricsTracer()
+ for i := 0; i < b.N; i++ {
+ switch i % 2 {
+ case 0:
+ tr.OpenedConnection(network.DirInbound, pub, quicConnState, quicAddr)
+ case 1:
+ tr.OpenedConnection(network.DirInbound, pub, tcpConnState, tcpAddr)
+ }
+ }
+}
+
+func randItem[T any](items []T) T {
+ return items[mrand.Intn(len(items))]
+}
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ mt := NewMetricsTracer()
+
+ connections := []network.ConnectionState{
+ {StreamMultiplexer: "yamux", Security: "tls", Transport: "tcp", UsedEarlyMuxerNegotiation: true},
+ {StreamMultiplexer: "yamux", Security: "noise", Transport: "tcp", UsedEarlyMuxerNegotiation: false},
+ {StreamMultiplexer: "", Security: "", Transport: "quic"},
+ {StreamMultiplexer: "another-yamux", Security: "noise", Transport: "tcp"},
+ }
+
+ directions := []network.Direction{network.DirInbound, network.DirOutbound}
+
+ _, pub1, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ _, pub2, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ _, pub3, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ keys := []crypto.PubKey{pub1, pub2, pub3}
+
+ errors := []error{
+ context.Canceled,
+ context.DeadlineExceeded,
+ &net.OpError{Err: syscall.ETIMEDOUT},
+ }
+
+ addrs := []ma.Multiaddr{
+ ma.StringCast("/ip4/1.2.3.4/tcp/1"),
+ ma.StringCast("/ip4/1.2.3.4/tcp/2"),
+ ma.StringCast("/ip4/1.2.3.4/udp/2345"),
+ }
+
+ bhfNames := []string{"udp", "ipv6", "tcp", "icmp"}
+ bhfState := []BlackHoleState{blackHoleStateAllowed, blackHoleStateBlocked}
+
+ tests := map[string]func(){
+ "OpenedConnection": func() {
+ mt.OpenedConnection(randItem(directions), randItem(keys), randItem(connections), randItem(addrs))
+ },
+ "ClosedConnection": func() {
+ mt.ClosedConnection(randItem(directions), time.Duration(mrand.Intn(100))*time.Second, randItem(connections), randItem(addrs))
+ },
+ "CompletedHandshake": func() {
+ mt.CompletedHandshake(time.Duration(mrand.Intn(100))*time.Second, randItem(connections), randItem(addrs))
+ },
+ "FailedDialing": func() { mt.FailedDialing(randItem(addrs), randItem(errors), randItem(errors)) },
+ "DialCompleted": func() { mt.DialCompleted(mrand.Intn(2) == 1, mrand.Intn(10), time.Duration(mrand.Intn(1000_000_000))) },
+ "DialRankingDelay": func() { mt.DialRankingDelay(time.Duration(mrand.Intn(1e10))) },
+ "UpdatedBlackHoleSuccessCounter": func() {
+ mt.UpdatedBlackHoleSuccessCounter(
+ randItem(bhfNames),
+ randItem(bhfState),
+ mrand.Intn(100),
+ mrand.Float64(),
+ )
+ },
+ }
+
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+ if allocs > 0 {
+ t.Fatalf("Alloc Test: %s, got: %0.2f, expected: 0 allocs", method, allocs)
+ }
+ }
+}
diff --git a/p2p/net/swarm/swarm_net_test.go b/p2p/net/swarm/swarm_net_test.go
new file mode 100644
index 0000000000..1dbdc57bcc
--- /dev/null
+++ b/p2p/net/swarm/swarm_net_test.go
@@ -0,0 +1,163 @@
+package swarm_test
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ . "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TestConnectednessCorrect starts a few networks, connects a few
+// and tests Connectedness value is correct.
+func TestConnectednessCorrect(t *testing.T) {
+ nets := make([]network.Network, 4)
+ for i := 0; i < 4; i++ {
+ nets[i] = GenSwarm(t)
+ }
+
+ // connect 0-1, 0-2, 0-3, 1-2, 2-3
+
+ dial := func(a, b network.Network) {
+ DivulgeAddresses(b, a)
+ if _, err := a.DialPeer(context.Background(), b.LocalPeer()); err != nil {
+ t.Fatalf("Failed to dial: %s", err)
+ }
+ }
+
+ dial(nets[0], nets[1])
+ dial(nets[0], nets[3])
+ dial(nets[1], nets[2])
+ dial(nets[3], nets[2])
+
+ // The notifications for new connections get sent out asynchronously.
+ // There is the potential for a race condition here, so we sleep to ensure
+ // that they have been received.
+ time.Sleep(time.Millisecond * 100)
+
+ // test those connected show up correctly
+
+ // test connected
+ expectConnectedness(t, nets[0], nets[1], network.Connected)
+ expectConnectedness(t, nets[0], nets[3], network.Connected)
+ expectConnectedness(t, nets[1], nets[2], network.Connected)
+ expectConnectedness(t, nets[3], nets[2], network.Connected)
+
+ // test not connected
+ expectConnectedness(t, nets[0], nets[2], network.NotConnected)
+ expectConnectedness(t, nets[1], nets[3], network.NotConnected)
+
+ require.Len(t, nets[0].Peers(), 2, "expected net 0 to have two peers")
+ require.Len(t, nets[2].Peers(), 2, "expected net 2 to have two peers")
+ require.NotZerof(t, nets[1].ConnsToPeer(nets[3].LocalPeer()), "net 1 should have no connections to net 3")
+ require.NoError(t, nets[2].ClosePeer(nets[1].LocalPeer()))
+
+ time.Sleep(time.Millisecond * 50)
+ expectConnectedness(t, nets[2], nets[1], network.NotConnected)
+
+ for _, n := range nets {
+ n.Close()
+ }
+}
+
+func expectConnectedness(t *testing.T, a, b network.Network, expected network.Connectedness) {
+ es := "%s is connected to %s, but Connectedness incorrect. %s %s %s"
+ atob := a.Connectedness(b.LocalPeer())
+ btoa := b.Connectedness(a.LocalPeer())
+ if atob != expected {
+ t.Errorf(es, a, b, printConns(a), printConns(b), atob)
+ }
+
+ // test symmetric case
+ if btoa != expected {
+ t.Errorf(es, b, a, printConns(b), printConns(a), btoa)
+ }
+}
+
+func printConns(n network.Network) string {
+ s := fmt.Sprintf("Connections in %s:\n", n)
+ for _, c := range n.Conns() {
+ s = s + fmt.Sprintf("- %s\n", c)
+ }
+ return s
+}
+
+func TestNetworkOpenStream(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ testString := "hello ipfs"
+
+ nets := make([]network.Network, 4)
+ for i := 0; i < 4; i++ {
+ nets[i] = GenSwarm(t)
+ }
+
+ dial := func(a, b network.Network) {
+ DivulgeAddresses(b, a)
+ if _, err := a.DialPeer(ctx, b.LocalPeer()); err != nil {
+ t.Fatalf("Failed to dial: %s", err)
+ }
+ }
+
+ dial(nets[0], nets[1])
+ dial(nets[0], nets[3])
+ dial(nets[1], nets[2])
+
+ done := make(chan bool)
+ nets[1].SetStreamHandler(func(s network.Stream) {
+ defer close(done)
+ defer s.Close()
+
+ buf, err := io.ReadAll(s)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if string(buf) != testString {
+ t.Error("got wrong message")
+ }
+ })
+
+ s, err := nets[0].NewStream(ctx, nets[1].LocalPeer())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var numStreams int
+ for _, conn := range nets[0].ConnsToPeer(nets[1].LocalPeer()) {
+ numStreams += conn.Stat().NumStreams
+ }
+
+ if numStreams != 1 {
+ t.Fatal("should only have one stream there")
+ }
+
+ n, err := s.Write([]byte(testString))
+ if err != nil {
+ t.Fatal(err)
+ } else if n != len(testString) {
+ t.Errorf("expected to write %d bytes, wrote %d", len(testString), n)
+ }
+
+ err = s.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ select {
+ case <-done:
+ case <-time.After(time.Millisecond * 100):
+ t.Fatal("timed out waiting on stream")
+ }
+
+ _, err = nets[1].NewStream(ctx, nets[3].LocalPeer())
+ if err == nil {
+ t.Fatal("expected stream open 1->3 to fail")
+ }
+}
diff --git a/p2p/net/swarm/swarm_notif_test.go b/p2p/net/swarm/swarm_notif_test.go
new file mode 100644
index 0000000000..e6fb1698d3
--- /dev/null
+++ b/p2p/net/swarm/swarm_notif_test.go
@@ -0,0 +1,185 @@
+package swarm_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ . "github.com/libp2p/go-libp2p/p2p/net/swarm"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestNotifications(t *testing.T) {
+ const swarmSize = 5
+
+ notifiees := make([]*netNotifiee, swarmSize)
+
+ swarms := makeSwarms(t, swarmSize)
+ defer func() {
+ for i, s := range swarms {
+ select {
+ case <-notifiees[i].listenClose:
+ t.Error("should not have been closed")
+ default:
+ }
+ require.NoError(t, s.Close())
+ select {
+ case <-notifiees[i].listenClose:
+ default:
+ t.Error("expected a listen close notification")
+ }
+ }
+ }()
+
+ const timeout = 5 * time.Second
+
+ // signup notifs
+ for i, swarm := range swarms {
+ n := newNetNotifiee(swarmSize)
+ swarm.Notify(n)
+ notifiees[i] = n
+ }
+
+ connectSwarms(t, context.Background(), swarms)
+
+ time.Sleep(50 * time.Millisecond)
+ // should've gotten 5 by now.
+
+ // test everyone got the correct connection opened calls
+ for i, s := range swarms {
+ n := notifiees[i]
+ notifs := make(map[peer.ID][]network.Conn)
+ for j, s2 := range swarms {
+ if i == j {
+ continue
+ }
+
+ // this feels a little sketchy, but its probably okay
+ for len(s.ConnsToPeer(s2.LocalPeer())) != len(notifs[s2.LocalPeer()]) {
+ select {
+ case c := <-n.connected:
+ nfp := notifs[c.RemotePeer()]
+ notifs[c.RemotePeer()] = append(nfp, c)
+ case <-time.After(timeout):
+ t.Fatal("timeout")
+ }
+ }
+ }
+
+ for p, cons := range notifs {
+ expect := s.ConnsToPeer(p)
+ if len(expect) != len(cons) {
+ t.Fatal("got different number of connections")
+ }
+
+ for _, c := range cons {
+ var found bool
+ for _, c2 := range expect {
+ if c == c2 {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Fatal("connection not found!")
+ }
+ }
+ }
+ }
+
+ normalizeAddrs := func(a ma.Multiaddr, isLocal bool) ma.Multiaddr {
+ // remove certhashes
+ x, _ := ma.SplitFunc(a, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_CERTHASH
+ })
+ // on local addrs, replace 0.0.0.0 with 127.0.0.1
+ if isLocal {
+ if manet.IsIPUnspecified(x) {
+ ip, rest := ma.SplitFirst(x)
+ if ip.Protocol().Code == ma.P_IP4 {
+ return ma.StringCast("/ip4/127.0.0.1").Encapsulate(rest)
+ } else {
+ return ma.StringCast("/ip6/::1").Encapsulate(rest)
+ }
+ }
+ }
+ return x
+ }
+ complement := func(c network.Conn) (*Swarm, *netNotifiee, *Conn) {
+ for i, s := range swarms {
+ for _, c2 := range s.Conns() {
+ if normalizeAddrs(c.LocalMultiaddr(), true).Equal(normalizeAddrs(c2.RemoteMultiaddr(), false)) &&
+ normalizeAddrs(c2.LocalMultiaddr(), true).Equal(normalizeAddrs(c.RemoteMultiaddr(), false)) {
+ return s, notifiees[i], c2.(*Conn)
+ }
+ }
+ }
+ t.Fatal("complementary conn not found", c)
+ return nil, nil, nil
+ }
+
+ // close conns
+ for i, s := range swarms {
+ n := notifiees[i]
+ for _, c := range s.Conns() {
+ _, n2, c2 := complement(c)
+ c.Close()
+ c2.Close()
+
+ var c3, c4 network.Conn
+ select {
+ case c3 = <-n.disconnected:
+ case <-time.After(timeout):
+ t.Fatal("timeout")
+ }
+ if c != c3 {
+ t.Fatal("got incorrect conn", c, c3)
+ }
+
+ select {
+ case c4 = <-n2.disconnected:
+ case <-time.After(timeout):
+ t.Fatal("timeout")
+ }
+ if c2 != c4 {
+ t.Fatal("got incorrect conn", c, c2)
+ }
+ }
+ }
+}
+
+type netNotifiee struct {
+ listen chan ma.Multiaddr
+ listenClose chan ma.Multiaddr
+ connected chan network.Conn
+ disconnected chan network.Conn
+}
+
+func newNetNotifiee(buffer int) *netNotifiee {
+ return &netNotifiee{
+ listen: make(chan ma.Multiaddr, buffer),
+ listenClose: make(chan ma.Multiaddr, buffer),
+ connected: make(chan network.Conn, buffer),
+ disconnected: make(chan network.Conn, buffer),
+ }
+}
+
+func (nn *netNotifiee) Listen(_ network.Network, a ma.Multiaddr) {
+ nn.listen <- a
+}
+func (nn *netNotifiee) ListenClose(_ network.Network, a ma.Multiaddr) {
+ nn.listenClose <- a
+}
+func (nn *netNotifiee) Connected(_ network.Network, v network.Conn) {
+ nn.connected <- v
+}
+func (nn *netNotifiee) Disconnected(_ network.Network, v network.Conn) {
+ nn.disconnected <- v
+}
diff --git a/p2p/net/swarm/swarm_stream.go b/p2p/net/swarm/swarm_stream.go
new file mode 100644
index 0000000000..4fee368250
--- /dev/null
+++ b/p2p/net/swarm/swarm_stream.go
@@ -0,0 +1,185 @@
+package swarm
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+// Validate Stream conforms to the go-libp2p-net Stream interface
+var _ network.Stream = &Stream{}
+
+// Stream is the stream type used by swarm. In general, you won't use this type
+// directly.
+type Stream struct {
+ id uint64
+
+ stream network.MuxedStream
+ conn *Conn
+ scope network.StreamManagementScope
+
+ closeMx sync.Mutex
+ isClosed bool
+ // acceptStreamGoroutineCompleted indicates whether the goroutine handling the incoming stream has exited
+ acceptStreamGoroutineCompleted bool
+
+ protocol atomic.Pointer[protocol.ID]
+
+ stat network.Stats
+}
+
+func (s *Stream) ID() string {
+ // format: --
+ return fmt.Sprintf("%s-%d", s.conn.ID(), s.id)
+}
+
+func (s *Stream) String() string {
+ return fmt.Sprintf(
+ " %s (%s)>",
+ s.conn.conn.Transport(),
+ s.conn.LocalMultiaddr(),
+ s.conn.LocalPeer(),
+ s.conn.RemoteMultiaddr(),
+ s.conn.RemotePeer(),
+ )
+}
+
+// Conn returns the Conn associated with this stream, as an network.Conn
+func (s *Stream) Conn() network.Conn {
+ return s.conn
+}
+
+// Read reads bytes from a stream.
+func (s *Stream) Read(p []byte) (int, error) {
+ n, err := s.stream.Read(p)
+ // TODO: push this down to a lower level for better accuracy.
+ if s.conn.swarm.bwc != nil {
+ s.conn.swarm.bwc.LogRecvMessage(int64(n))
+ s.conn.swarm.bwc.LogRecvMessageStream(int64(n), s.Protocol(), s.Conn().RemotePeer())
+ }
+ return n, err
+}
+
+// Write writes bytes to a stream, flushing for each call.
+func (s *Stream) Write(p []byte) (int, error) {
+ n, err := s.stream.Write(p)
+ // TODO: push this down to a lower level for better accuracy.
+ if s.conn.swarm.bwc != nil {
+ s.conn.swarm.bwc.LogSentMessage(int64(n))
+ s.conn.swarm.bwc.LogSentMessageStream(int64(n), s.Protocol(), s.Conn().RemotePeer())
+ }
+ return n, err
+}
+
+// Close closes the stream, closing both ends and freeing all associated
+// resources.
+func (s *Stream) Close() error {
+ err := s.stream.Close()
+ s.closeAndRemoveStream()
+ return err
+}
+
+// Reset resets the stream, signaling an error on both ends and freeing all
+// associated resources.
+func (s *Stream) Reset() error {
+ err := s.stream.Reset()
+ s.closeAndRemoveStream()
+ return err
+}
+
+func (s *Stream) ResetWithError(errCode network.StreamErrorCode) error {
+ err := s.stream.ResetWithError(errCode)
+ s.closeAndRemoveStream()
+ return err
+}
+
+func (s *Stream) closeAndRemoveStream() {
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+ if s.isClosed {
+ return
+ }
+ s.isClosed = true
+ // We don't want to keep swarm from closing till the stream handler has exited
+ s.conn.swarm.refs.Done()
+ // Cleanup the stream from connection only after the stream handler has completed
+ if s.acceptStreamGoroutineCompleted {
+ s.conn.removeStream(s)
+ }
+}
+
+// CloseWrite closes the stream for writing, flushing all data and sending an EOF.
+// This function does not free resources, call Close or Reset when done with the
+// stream.
+func (s *Stream) CloseWrite() error {
+ return s.stream.CloseWrite()
+}
+
+// CloseRead closes the stream for reading. This function does not free resources,
+// call Close or Reset when done with the stream.
+func (s *Stream) CloseRead() error {
+ return s.stream.CloseRead()
+}
+
+func (s *Stream) completeAcceptStreamGoroutine() {
+ s.closeMx.Lock()
+ defer s.closeMx.Unlock()
+ if s.acceptStreamGoroutineCompleted {
+ return
+ }
+ s.acceptStreamGoroutineCompleted = true
+ if s.isClosed {
+ s.conn.removeStream(s)
+ }
+}
+
+// Protocol returns the protocol negotiated on this stream (if set).
+func (s *Stream) Protocol() protocol.ID {
+ p := s.protocol.Load()
+ if p == nil {
+ return ""
+ }
+ return *p
+}
+
+// SetProtocol sets the protocol for this stream.
+//
+// This doesn't actually *do* anything other than record the fact that we're
+// speaking the given protocol over this stream. It's still up to the user to
+// negotiate the protocol. This is usually done by the Host.
+func (s *Stream) SetProtocol(p protocol.ID) error {
+ if err := s.scope.SetProtocol(p); err != nil {
+ return err
+ }
+
+ s.protocol.Store(&p)
+ return nil
+}
+
+// SetDeadline sets the read and write deadlines for this stream.
+func (s *Stream) SetDeadline(t time.Time) error {
+ return s.stream.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline for this stream.
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ return s.stream.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline for this stream.
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ return s.stream.SetWriteDeadline(t)
+}
+
+// Stat returns metadata information for this stream.
+func (s *Stream) Stat() network.Stats {
+ return s.stat
+}
+
+func (s *Stream) Scope() network.StreamScope {
+ return s.scope
+}
diff --git a/p2p/net/swarm/swarm_test.go b/p2p/net/swarm/swarm_test.go
new file mode 100644
index 0000000000..eca721c2b4
--- /dev/null
+++ b/p2p/net/swarm/swarm_test.go
@@ -0,0 +1,628 @@
+package swarm_test
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "slices"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/control"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ . "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+var log = logging.Logger("swarm_test")
+
+func EchoStreamHandler(stream network.Stream) {
+ go func() {
+ defer stream.Close()
+
+ // pull out the ipfs conn
+ c := stream.Conn()
+ log.Info("ponging to peer", "local", c.LocalPeer(), "remote", c.RemotePeer())
+
+ buf := make([]byte, 4)
+
+ for {
+ if _, err := stream.Read(buf); err != nil {
+ if err != io.EOF {
+ log.Error("ping receive error", "err", err)
+ }
+ return
+ }
+
+ if !bytes.Equal(buf, []byte("ping")) {
+ log.Error("ping receive error", "err", fmt.Errorf("ping mismatch: %s", string(buf)))
+ return
+ }
+
+ if _, err := stream.Write([]byte("pong")); err != nil {
+ log.Error("pong send error", "err", err)
+ return
+ }
+ }
+ }()
+}
+
+func makeDialOnlySwarm(t *testing.T) *swarm.Swarm {
+ swarm := GenSwarm(t, OptDialOnly)
+ swarm.SetStreamHandler(EchoStreamHandler)
+ return swarm
+}
+
+func makeSwarms(t *testing.T, num int, opts ...Option) []*swarm.Swarm {
+ swarms := make([]*swarm.Swarm, 0, num)
+ for i := 0; i < num; i++ {
+ swarm := GenSwarm(t, opts...)
+ swarm.SetStreamHandler(EchoStreamHandler)
+ swarms = append(swarms, swarm)
+ }
+ return swarms
+}
+
+func connectSwarms(t *testing.T, ctx context.Context, swarms []*swarm.Swarm) {
+ var wg sync.WaitGroup
+ connect := func(s *swarm.Swarm, dst peer.ID, addrs []ma.Multiaddr) {
+ s.Peerstore().AddAddrs(dst, addrs, peerstore.TempAddrTTL)
+ if _, err := s.DialPeer(ctx, dst); err != nil {
+ t.Fatal("error swarm dialing to peer", err)
+ }
+ wg.Done()
+ }
+
+ log.Info("Connecting swarms simultaneously.")
+ for i, s1 := range swarms {
+ for _, s2 := range swarms[i+1:] {
+ wg.Add(1)
+ connect(s1, s2.LocalPeer(), s2.ListenAddresses())
+ }
+ }
+ wg.Wait()
+
+ for _, s := range swarms {
+ log.Info("swarm routing table", "peer", s.LocalPeer(), "peers", s.Peers())
+ }
+}
+
+func subtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {
+ swarms := makeSwarms(t, SwarmNum, OptDisableReuseport)
+
+ // connect everyone
+ connectSwarms(t, context.Background(), swarms)
+
+ // ping/pong
+ for _, s1 := range swarms {
+ log.Debug("-------------------------------------------------------")
+ log.Debug("ping pong round", "peer", s1.LocalPeer())
+ log.Debug("-------------------------------------------------------")
+
+ _, cancel := context.WithCancel(context.Background())
+ got := map[peer.ID]int{}
+ errChan := make(chan error, MsgNum*len(swarms))
+ streamChan := make(chan network.Stream, MsgNum)
+
+ // send out "ping" x MsgNum to every peer
+ go func() {
+ defer close(streamChan)
+
+ var wg sync.WaitGroup
+ send := func(p peer.ID) {
+ defer wg.Done()
+
+ // first, one stream per peer (nice)
+ stream, err := s1.NewStream(context.Background(), p)
+ if err != nil {
+ errChan <- err
+ return
+ }
+
+ // send out ping!
+ for k := 0; k < MsgNum; k++ { // with k messages
+ msg := "ping"
+ log.Debug("sending message", "local", s1.LocalPeer(), "msg", msg, "peer", p, "count", k)
+ if _, err := stream.Write([]byte(msg)); err != nil {
+ errChan <- err
+ continue
+ }
+ }
+
+ // read it later
+ streamChan <- stream
+ }
+
+ for _, s2 := range swarms {
+ if s2.LocalPeer() == s1.LocalPeer() {
+ continue // dont send to self...
+ }
+
+ wg.Add(1)
+ go send(s2.LocalPeer())
+ }
+ wg.Wait()
+ }()
+
+ // receive "pong" x MsgNum from every peer
+ go func() {
+ defer close(errChan)
+ count := 0
+ countShouldBe := MsgNum * (len(swarms) - 1)
+ for stream := range streamChan { // one per peer
+ // get peer on the other side
+ p := stream.Conn().RemotePeer()
+
+ // receive pings
+ msgCount := 0
+ msg := make([]byte, 4)
+ for k := 0; k < MsgNum; k++ { // with k messages
+
+ // read from the stream
+ if _, err := stream.Read(msg); err != nil {
+ errChan <- err
+ continue
+ }
+
+ if string(msg) != "pong" {
+ errChan <- fmt.Errorf("unexpected message: %s", msg)
+ continue
+ }
+
+ log.Debug("sending message", "local", s1.LocalPeer(), "msg", msg, "peer", p, "count", k)
+ msgCount++
+ }
+
+ got[p] = msgCount
+ count += msgCount
+ stream.Close()
+ }
+
+ if count != countShouldBe {
+ errChan <- fmt.Errorf("count mismatch: %d != %d", count, countShouldBe)
+ }
+ }()
+
+ // check any errors (blocks till consumer is done)
+ for err := range errChan {
+ if err != nil {
+ t.Error(err.Error())
+ }
+ }
+
+ log.Debug("got pongs", "peer", s1.LocalPeer())
+ if (len(swarms) - 1) != len(got) {
+ t.Errorf("got (%d) less messages than sent (%d).", len(got), len(swarms))
+ }
+
+ for p, n := range got {
+ if n != MsgNum {
+ t.Error("peer did not get all msgs", p, n, "/", MsgNum)
+ }
+ }
+
+ cancel()
+ <-time.After(10 * time.Millisecond)
+ }
+}
+
+func TestSwarm(t *testing.T) {
+ t.Parallel()
+ subtestSwarm(t, 5, 100)
+}
+
+func TestBasicSwarm(t *testing.T) {
+ // t.Skip("skipping for another test")
+ t.Parallel()
+ subtestSwarm(t, 2, 1)
+}
+
+func TestConnectionGating(t *testing.T) {
+ ctx := context.Background()
+ tcs := map[string]struct {
+ p1Gater func(gater *MockConnectionGater) *MockConnectionGater
+ p2Gater func(gater *MockConnectionGater) *MockConnectionGater
+
+ p1ConnectednessToP2 network.Connectedness
+ p2ConnectednessToP1 network.Connectedness
+ isP1OutboundErr bool
+ disableOnQUIC bool
+ }{
+ "no gating": {
+ p1ConnectednessToP2: network.Connected,
+ p2ConnectednessToP1: network.Connected,
+ isP1OutboundErr: false,
+ },
+ "p1 gates outbound peer dial": {
+ p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.PeerDial = func(_ peer.ID) bool { return false }
+ return c
+ },
+ p1ConnectednessToP2: network.NotConnected,
+ p2ConnectednessToP1: network.NotConnected,
+ isP1OutboundErr: true,
+ },
+ "p1 gates outbound addr dialing": {
+ p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.Dial = func(_ peer.ID, _ ma.Multiaddr) bool { return false }
+ return c
+ },
+ p1ConnectednessToP2: network.NotConnected,
+ p2ConnectednessToP1: network.NotConnected,
+ isP1OutboundErr: true,
+ },
+ "p2 accepts inbound peer dial if outgoing dial is gated": {
+ p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.Dial = func(peer.ID, ma.Multiaddr) bool { return false }
+ return c
+ },
+ p1ConnectednessToP2: network.Connected,
+ p2ConnectednessToP1: network.Connected,
+ isP1OutboundErr: false,
+ },
+ "p2 gates inbound peer dial before securing": {
+ p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.Accept = func(_ network.ConnMultiaddrs) bool { return false }
+ return c
+ },
+ p1ConnectednessToP2: network.NotConnected,
+ p2ConnectednessToP1: network.NotConnected,
+ isP1OutboundErr: true,
+ // QUIC gates the connection after completion of the handshake
+ disableOnQUIC: true,
+ },
+ "p2 gates inbound peer dial before multiplexing": {
+ p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { return false }
+ return c
+ },
+ p1ConnectednessToP2: network.NotConnected,
+ p2ConnectednessToP1: network.NotConnected,
+ isP1OutboundErr: true,
+ },
+ "p2 gates inbound peer dial after upgrading": {
+ p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.Upgraded = func(_ network.Conn) (bool, control.DisconnectReason) { return false, 0 }
+ return c
+ },
+ p1ConnectednessToP2: network.NotConnected,
+ p2ConnectednessToP1: network.NotConnected,
+ isP1OutboundErr: true,
+ },
+ "p2 gates outbound dials": {
+ p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
+ c.PeerDial = func(_ peer.ID) bool { return false }
+ return c
+ },
+ p1ConnectednessToP2: network.Connected,
+ p2ConnectednessToP1: network.Connected,
+ isP1OutboundErr: false,
+ },
+ }
+
+ for n, tc := range tcs {
+ for _, useQuic := range []bool{false, true} {
+ trString := "TCP"
+ optTransport := OptDisableQUIC
+ if useQuic {
+ if tc.disableOnQUIC {
+ continue
+ }
+ trString = "QUIC"
+ optTransport = OptDisableTCP
+ }
+ t.Run(fmt.Sprintf("%s %s", n, trString), func(t *testing.T) {
+ p1Gater := DefaultMockConnectionGater()
+ p2Gater := DefaultMockConnectionGater()
+ if tc.p1Gater != nil {
+ p1Gater = tc.p1Gater(p1Gater)
+ }
+ if tc.p2Gater != nil {
+ p2Gater = tc.p2Gater(p2Gater)
+ }
+
+ sw1 := GenSwarm(t, OptConnGater(p1Gater), optTransport)
+ sw2 := GenSwarm(t, OptConnGater(p2Gater), optTransport)
+
+ p1 := sw1.LocalPeer()
+ p2 := sw2.LocalPeer()
+ sw1.Peerstore().AddAddr(p2, sw2.ListenAddresses()[0], peerstore.PermanentAddrTTL)
+ // 1 -> 2
+ _, err := sw1.DialPeer(ctx, p2)
+
+ require.Equal(t, tc.isP1OutboundErr, err != nil, n)
+ require.Equal(t, tc.p1ConnectednessToP2, sw1.Connectedness(p2), n)
+
+ require.Eventually(t, func() bool {
+ return tc.p2ConnectednessToP1 == sw2.Connectedness(p1)
+ }, 2*time.Second, 100*time.Millisecond, n)
+ })
+ }
+ }
+}
+
+func TestNoDial(t *testing.T) {
+ swarms := makeSwarms(t, 2)
+
+ _, err := swarms[0].NewStream(network.WithNoDial(context.Background(), "swarm test"), swarms[1].LocalPeer())
+ if err != network.ErrNoConn {
+ t.Fatal("should have failed with ErrNoConn")
+ }
+}
+
+func TestCloseWithOpenStreams(t *testing.T) {
+ ctx := context.Background()
+ swarms := makeSwarms(t, 2)
+ connectSwarms(t, ctx, swarms)
+
+ s, err := swarms[0].NewStream(ctx, swarms[1].LocalPeer())
+ require.NoError(t, err)
+ defer s.Close()
+ // close swarm before stream.
+ require.NoError(t, swarms[0].Close())
+}
+
+func TestTypedNilConn(t *testing.T) {
+ s := GenSwarm(t)
+ defer s.Close()
+
+ // We can't dial ourselves.
+ c, err := s.DialPeer(context.Background(), s.LocalPeer())
+ require.Error(t, err)
+ // If we fail to dial, the connection should be nil.
+ require.Nil(t, c)
+}
+
+func TestPreventDialListenAddr(t *testing.T) {
+ s := GenSwarm(t, OptDialOnly)
+ if err := s.Listen(ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1")); err != nil {
+ t.Fatal(err)
+ }
+ addrs, err := s.InterfaceListenAddresses()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var addr ma.Multiaddr
+ for _, a := range addrs {
+ _, s, err := manet.DialArgs(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if strings.Split(s, ":")[0] == "127.0.0.1" {
+ addr = a
+ break
+ }
+ }
+ remote := test.RandPeerIDFatal(t)
+ s.Peerstore().AddAddr(remote, addr, time.Hour)
+ _, err = s.DialPeer(context.Background(), remote)
+ if !errors.Is(err, swarm.ErrNoGoodAddresses) {
+ t.Fatal("expected dial to fail: %w", err)
+ }
+}
+
+func TestStreamCount(t *testing.T) {
+ s1 := GenSwarm(t)
+ s2 := GenSwarm(t)
+ connectSwarms(t, context.Background(), []*swarm.Swarm{s2, s1})
+
+ countStreams := func() (n int) {
+ var num int
+ for _, c := range s1.ConnsToPeer(s2.LocalPeer()) {
+ n += c.Stat().NumStreams
+ num += len(c.GetStreams())
+ }
+ require.Equal(t, n, num, "inconsistent stream count")
+ return
+ }
+
+ streams := make(chan network.Stream, 20)
+ streamAccepted := make(chan struct{}, 1)
+ s1.SetStreamHandler(func(str network.Stream) {
+ streams <- str
+ streamAccepted <- struct{}{}
+ })
+
+ for i := 0; i < 10; i++ {
+ str, err := s2.NewStream(context.Background(), s1.LocalPeer())
+ require.NoError(t, err)
+ str.Write([]byte("foobar"))
+ <-streamAccepted
+ }
+ require.Eventually(t, func() bool { return len(streams) == 10 }, 5*time.Second, 10*time.Millisecond)
+ require.Equal(t, 10, countStreams())
+ (<-streams).Reset()
+ (<-streams).Close()
+ require.Equal(t, 8, countStreams())
+
+ str, err := s1.NewStream(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+ require.Equal(t, 9, countStreams())
+ str.Close()
+ require.Equal(t, 8, countStreams())
+}
+
+func TestResourceManager(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ rcmgr1 := mocknetwork.NewMockResourceManager(ctrl)
+ s1 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr1)))
+ defer s1.Close()
+
+ rcmgr2 := mocknetwork.NewMockResourceManager(ctrl)
+ s2 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr2)))
+ defer s2.Close()
+ connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})
+
+ strChan := make(chan network.Stream)
+ s2.SetStreamHandler(func(str network.Stream) { strChan <- str })
+
+ streamScope1 := mocknetwork.NewMockStreamManagementScope(ctrl)
+ rcmgr1.EXPECT().OpenStream(s2.LocalPeer(), network.DirOutbound).Return(streamScope1, nil)
+ streamScope2 := mocknetwork.NewMockStreamManagementScope(ctrl)
+ rcmgr2.EXPECT().OpenStream(s1.LocalPeer(), network.DirInbound).Return(streamScope2, nil)
+ str, err := s1.NewStream(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+ defer str.Close()
+ str.Write([]byte("foobar"))
+
+ p := protocol.ID("proto")
+ streamScope1.EXPECT().SetProtocol(p)
+ require.NoError(t, str.SetProtocol(p))
+
+ sstr := <-strChan
+ streamScope2.EXPECT().Done()
+ require.NoError(t, sstr.Close())
+ streamScope1.EXPECT().Done()
+}
+
+func TestResourceManagerNewStream(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ rcmgr1 := mocknetwork.NewMockResourceManager(ctrl)
+ s1 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr1)))
+ defer s1.Close()
+
+ s2 := GenSwarm(t)
+ defer s2.Close()
+
+ connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})
+
+ rerr := errors.New("denied")
+ rcmgr1.EXPECT().OpenStream(s2.LocalPeer(), network.DirOutbound).Return(nil, rerr)
+ _, err := s1.NewStream(context.Background(), s2.LocalPeer())
+ require.ErrorIs(t, err, rerr)
+}
+
+func TestResourceManagerAcceptStream(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ rcmgr1 := mocknetwork.NewMockResourceManager(ctrl)
+ s1 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr1)))
+ defer s1.Close()
+
+ rcmgr2 := mocknetwork.NewMockResourceManager(ctrl)
+ s2 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr2)))
+ defer s2.Close()
+ s2.SetStreamHandler(func(_ network.Stream) { t.Fatal("didn't expect to accept a stream") })
+
+ connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})
+
+ streamScope := mocknetwork.NewMockStreamManagementScope(ctrl)
+ rcmgr1.EXPECT().OpenStream(s2.LocalPeer(), network.DirOutbound).Return(streamScope, nil)
+ streamScope.EXPECT().Done()
+ rcmgr2.EXPECT().OpenStream(s1.LocalPeer(), network.DirInbound).Return(nil, errors.New("nope"))
+ str, err := s1.NewStream(context.Background(), s2.LocalPeer())
+ require.NoError(t, err)
+ // The peer's resource manager is blocking any new stream.
+ // Depending on how quickly we receive the stream reset, it surfaces either during the write or the read call.
+ _, err = str.Write([]byte("foobar"))
+ if err == nil {
+ _, err = str.Read([]byte{0})
+ }
+ require.ErrorContains(t, err, "stream reset")
+}
+
+func TestListenCloseCount(t *testing.T) {
+ s := GenSwarm(t, OptDialOnly)
+ addrsToListen := []ma.Multiaddr{
+ ma.StringCast("/ip4/0.0.0.0/tcp/0"),
+ ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1"),
+ }
+
+ if err := s.Listen(addrsToListen...); err != nil {
+ t.Fatal(err)
+ }
+ listenedAddrs := s.ListenAddresses()
+ require.Len(t, listenedAddrs, 2)
+ var addrToClose ma.Multiaddr
+ for _, addr := range listenedAddrs {
+ if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ // make a copy of the address to make sure the multiaddr comparison actually works
+ addrToClose = ma.StringCast(addr.String())
+ }
+ }
+
+ s.ListenClose(addrToClose)
+
+ remainingAddrs := s.ListenAddresses()
+ require.Len(t, remainingAddrs, 1)
+ _, err := remainingAddrs[0].ValueForProtocol(ma.P_TCP)
+ require.NoError(t, err, "expected the TCP address to still be present")
+}
+
+func TestAddCertHashes(t *testing.T) {
+ s := GenSwarm(t)
+
+ listenAddrs := s.ListenAddresses()
+ splitCertHashes := func(a ma.Multiaddr) (prefix, certhashes ma.Multiaddr, ok bool) {
+ for i, c := range a {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ return prefix, a[i:], true
+ }
+ prefix = append(prefix, c)
+ }
+ return prefix, certhashes, false
+ }
+ addrWithNewIPPort := func(addr ma.Multiaddr, newIPPort ma.Multiaddr) ma.Multiaddr {
+ a := slices.Clone(addr)
+ a[0] = newIPPort[0]
+ a[1] = newIPPort[1]
+ return a
+ }
+ publicIPPort := []ma.Multiaddr{
+ ma.StringCast("/ip4/1.1.1.1/udp/1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/1"),
+ ma.StringCast("/ip6/2005::/udp/1"),
+ }
+
+ certHashComponent := ma.StringCast("/certhash/uEgNmb28")
+ for _, a := range listenAddrs {
+ prefix, certhashes, ok := splitCertHashes(a)
+ if !ok {
+ continue
+ }
+ var publicAddrs []ma.Multiaddr
+ for _, tc := range publicIPPort {
+ publicAddrs = append(publicAddrs, addrWithNewIPPort(prefix, tc))
+ }
+ finalAddrs := s.AddCertHashes(publicAddrs)
+ for _, a := range finalAddrs {
+ _, certhash2, ok := splitCertHashes(a)
+ require.True(t, ok)
+ require.Equal(t, certhashes, certhash2)
+ }
+
+ // if the addr has a certhash already, check it isn't modified
+ publicAddrs = nil
+ for _, tc := range publicIPPort {
+ a := addrWithNewIPPort(prefix, tc)
+ a = append(a, certHashComponent...)
+ publicAddrs = append(publicAddrs, a)
+ }
+ finalAddrs = s.AddCertHashes(publicAddrs)
+ for _, a := range finalAddrs {
+ _, certhash2, ok := splitCertHashes(a)
+ require.True(t, ok)
+ require.Equal(t, certHashComponent, certhash2)
+ }
+ }
+}
diff --git a/p2p/net/swarm/swarm_transport.go b/p2p/net/swarm/swarm_transport.go
new file mode 100644
index 0000000000..ff944813bd
--- /dev/null
+++ b/p2p/net/swarm/swarm_transport.go
@@ -0,0 +1,116 @@
+package swarm
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// TransportForDialing retrieves the appropriate transport for dialing the given
+// multiaddr.
+func (s *Swarm) TransportForDialing(a ma.Multiaddr) transport.Transport {
+ if a == nil {
+ return nil
+ }
+ protocols := a.Protocols()
+ if len(protocols) == 0 {
+ return nil
+ }
+
+ s.transports.RLock()
+ defer s.transports.RUnlock()
+
+ if len(s.transports.m) == 0 {
+ // make sure we're not just shutting down.
+ if s.transports.m != nil {
+ log.Error("you have no transports configured")
+ }
+ return nil
+ }
+ if isRelayAddr(a) {
+ return s.transports.m[ma.P_CIRCUIT]
+ }
+ if id, _ := peer.IDFromP2PAddr(a); id != "" {
+ // This addr has a p2p component. Drop it so we can check transport.
+ a, _ = ma.SplitLast(a)
+ if a == nil {
+ return nil
+ }
+ }
+ for _, t := range s.transports.m {
+ if t.CanDial(a) {
+ return t
+ }
+ }
+ return nil
+}
+
+// TransportForListening retrieves the appropriate transport for listening on
+// the given multiaddr.
+func (s *Swarm) TransportForListening(a ma.Multiaddr) transport.Transport {
+ protocols := a.Protocols()
+ if len(protocols) == 0 {
+ return nil
+ }
+
+ s.transports.RLock()
+ defer s.transports.RUnlock()
+ if len(s.transports.m) == 0 {
+ return nil
+ }
+
+ selected := s.transports.m[protocols[len(protocols)-1].Code]
+ for _, p := range protocols {
+ transport, ok := s.transports.m[p.Code]
+ if !ok {
+ continue
+ }
+ if transport.Proxy() {
+ selected = transport
+ }
+ }
+ return selected
+}
+
+// AddTransport adds a transport to this swarm.
+//
+// Satisfies the Network interface from go-libp2p-transport.
+func (s *Swarm) AddTransport(t transport.Transport) error {
+ protocols := t.Protocols()
+
+ if len(protocols) == 0 {
+ return fmt.Errorf("useless transport handles no protocols: %T", t)
+ }
+
+ s.transports.Lock()
+ defer s.transports.Unlock()
+ if s.transports.m == nil {
+ return ErrSwarmClosed
+ }
+ var registered []string
+ for _, p := range protocols {
+ if _, ok := s.transports.m[p]; ok {
+ proto := ma.ProtocolWithCode(p)
+ name := proto.Name
+ if name == "" {
+ name = fmt.Sprintf("unknown (%d)", p)
+ }
+ registered = append(registered, name)
+ }
+ }
+ if len(registered) > 0 {
+ return fmt.Errorf(
+ "transports already registered for protocol(s): %s",
+ strings.Join(registered, ", "),
+ )
+ }
+
+ for _, p := range protocols {
+ s.transports.m[p] = t
+ }
+ return nil
+}
diff --git a/p2p/net/swarm/testing/testing.go b/p2p/net/swarm/testing/testing.go
new file mode 100644
index 0000000000..bafe49b769
--- /dev/null
+++ b/p2p/net/swarm/testing/testing.go
@@ -0,0 +1,314 @@
+package testing
+
+import (
+ "crypto/rand"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/control"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/metrics"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+ "github.com/stretchr/testify/require"
+)
+
+type config struct {
+ disableReuseport bool
+ dialOnly bool
+ disableTCP bool
+ disableQUIC bool
+ disableWebTransport bool
+ disableWebRTC bool
+ connectionGater connmgr.ConnectionGater
+ sk crypto.PrivKey
+ swarmOpts []swarm.Option
+ eventBus event.Bus
+ clock
+}
+
+type clock interface {
+ Now() time.Time
+}
+
+type realclock struct{}
+
+func (rc realclock) Now() time.Time {
+ return time.Now()
+}
+
+// Option is an option that can be passed when constructing a test swarm.
+type Option func(testing.TB, *config)
+
+// WithClock sets the clock to use for this swarm
+func WithClock(clock clock) Option {
+ return func(_ testing.TB, c *config) {
+ c.clock = clock
+ }
+}
+
+func WithSwarmOpts(swarmOpts ...swarm.Option) Option {
+ return func(_ testing.TB, c *config) {
+ c.swarmOpts = swarmOpts
+ }
+}
+
+// OptDisableReuseport disables reuseport in this test swarm.
+var OptDisableReuseport Option = func(_ testing.TB, c *config) {
+ c.disableReuseport = true
+}
+
+// OptDialOnly prevents the test swarm from listening.
+var OptDialOnly Option = func(_ testing.TB, c *config) {
+ c.dialOnly = true
+}
+
+// OptDisableTCP disables TCP.
+var OptDisableTCP Option = func(_ testing.TB, c *config) {
+ c.disableTCP = true
+}
+
+// OptDisableQUIC disables QUIC.
+var OptDisableQUIC Option = func(_ testing.TB, c *config) {
+ c.disableQUIC = true
+}
+
+// OptDisableWebTransport disables WebTransport.
+var OptDisableWebTransport Option = func(_ testing.TB, c *config) {
+ c.disableWebTransport = true
+}
+
+// OptDisableWebRTC disables WebRTC.
+var OptDisableWebRTC Option = func(_ testing.TB, c *config) {
+ c.disableWebRTC = true
+}
+
+// OptConnGater configures the given connection gater on the test
+func OptConnGater(cg connmgr.ConnectionGater) Option {
+ return func(_ testing.TB, c *config) {
+ c.connectionGater = cg
+ }
+}
+
+// OptPeerPrivateKey configures the peer private key which is then used to derive the public key and peer ID.
+func OptPeerPrivateKey(sk crypto.PrivKey) Option {
+ return func(_ testing.TB, c *config) {
+ c.sk = sk
+ }
+}
+
+func EventBus(b event.Bus) Option {
+ return func(_ testing.TB, c *config) {
+ c.eventBus = b
+ }
+}
+
+// GenUpgrader creates a new connection upgrader for use with this swarm.
+func GenUpgrader(t testing.TB, n *swarm.Swarm, connGater connmgr.ConnectionGater, opts ...tptu.Option) transport.Upgrader {
+ id := n.LocalPeer()
+ pk := n.Peerstore().PrivKey(id)
+ st := insecure.NewWithIdentity(insecure.ID, id, pk)
+
+ u, err := tptu.New([]sec.SecureTransport{st}, []tptu.StreamMuxer{{ID: yamux.ID, Muxer: yamux.DefaultTransport}}, nil, nil, connGater, opts...)
+ require.NoError(t, err)
+ return u
+}
+
+// GenSwarm generates a new test swarm.
+func GenSwarm(t testing.TB, opts ...Option) *swarm.Swarm {
+ var cfg config
+ cfg.clock = realclock{}
+ for _, o := range opts {
+ o(t, &cfg)
+ }
+
+ var priv crypto.PrivKey
+ if cfg.sk == nil {
+ var err error
+ priv, _, err = crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ } else {
+ priv = cfg.sk
+ }
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+
+ ps, err := pstoremem.NewPeerstore(pstoremem.WithClock(cfg.clock))
+ require.NoError(t, err)
+ ps.AddPubKey(id, priv.GetPublic())
+ ps.AddPrivKey(id, priv)
+ t.Cleanup(func() { ps.Close() })
+
+ swarmOpts := cfg.swarmOpts
+ swarmOpts = append(swarmOpts, swarm.WithMetrics(metrics.NewBandwidthCounter()))
+ if cfg.connectionGater != nil {
+ swarmOpts = append(swarmOpts, swarm.WithConnectionGater(cfg.connectionGater))
+ }
+
+ eventBus := cfg.eventBus
+ if eventBus == nil {
+ eventBus = eventbus.NewBus()
+ }
+ s, err := swarm.NewSwarm(id, ps, eventBus, swarmOpts...)
+ require.NoError(t, err)
+
+ upgrader := GenUpgrader(t, s, cfg.connectionGater)
+
+ if !cfg.disableTCP {
+ var tcpOpts []tcp.Option
+ if cfg.disableReuseport {
+ tcpOpts = append(tcpOpts, tcp.DisableReuseport())
+ }
+ tcpTransport, err := tcp.NewTCPTransport(upgrader, nil, nil, tcpOpts...)
+ require.NoError(t, err)
+ if err := s.AddTransport(tcpTransport); err != nil {
+ t.Fatal(err)
+ }
+ if !cfg.dialOnly {
+ if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0")); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ var reuse *quicreuse.ConnManager
+ if !cfg.disableQUIC {
+ reuse, err = quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ quicTransport, err := libp2pquic.NewTransport(priv, reuse, nil, cfg.connectionGater, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := s.AddTransport(quicTransport); err != nil {
+ t.Fatal(err)
+ }
+ if !cfg.dialOnly {
+ if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ if !cfg.disableWebTransport {
+ if reuse == nil {
+ reuse, err = quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ wtTransport, err := libp2pwebtransport.New(priv, nil, reuse, cfg.connectionGater, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := s.AddTransport(wtTransport); err != nil {
+ t.Fatal(err)
+ }
+ if !cfg.dialOnly {
+ if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ if !cfg.disableWebRTC {
+ listenUDPFn := func(network string, laddr *net.UDPAddr) (net.PacketConn, error) {
+ return net.ListenUDP(network, laddr)
+ }
+ wrtcTransport, err := libp2pwebrtc.New(priv, nil, cfg.connectionGater, nil, listenUDPFn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := s.AddTransport(wrtcTransport); err != nil {
+ t.Fatal(err)
+ }
+ if !cfg.dialOnly {
+ if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ if !cfg.dialOnly {
+ s.Peerstore().AddAddrs(id, s.ListenAddresses(), peerstore.PermanentAddrTTL)
+ }
+ return s
+}
+
+// DivulgeAddresses adds swarm a's addresses to swarm b's peerstore.
+func DivulgeAddresses(a, b network.Network) {
+ id := a.LocalPeer()
+ addrs := a.Peerstore().Addrs(id)
+ b.Peerstore().AddAddrs(id, addrs, peerstore.PermanentAddrTTL)
+}
+
+// MockConnectionGater is a mock connection gater to be used by the tests.
+type MockConnectionGater struct {
+ Dial func(p peer.ID, addr ma.Multiaddr) bool
+ PeerDial func(p peer.ID) bool
+ Accept func(c network.ConnMultiaddrs) bool
+ Secured func(network.Direction, peer.ID, network.ConnMultiaddrs) bool
+ Upgraded func(c network.Conn) (bool, control.DisconnectReason)
+}
+
+func DefaultMockConnectionGater() *MockConnectionGater {
+ m := &MockConnectionGater{}
+ m.Dial = func(_ peer.ID, _ ma.Multiaddr) bool {
+ return true
+ }
+
+ m.PeerDial = func(_ peer.ID) bool {
+ return true
+ }
+
+ m.Accept = func(_ network.ConnMultiaddrs) bool {
+ return true
+ }
+
+ m.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool {
+ return true
+ }
+
+ m.Upgraded = func(_ network.Conn) (bool, control.DisconnectReason) {
+ return true, 0
+ }
+
+ return m
+}
+
+func (m *MockConnectionGater) InterceptAddrDial(p peer.ID, addr ma.Multiaddr) (allow bool) {
+ return m.Dial(p, addr)
+}
+
+func (m *MockConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
+ return m.PeerDial(p)
+}
+
+func (m *MockConnectionGater) InterceptAccept(c network.ConnMultiaddrs) (allow bool) {
+ return m.Accept(c)
+}
+
+func (m *MockConnectionGater) InterceptSecured(d network.Direction, p peer.ID, c network.ConnMultiaddrs) (allow bool) {
+ return m.Secured(d, p, c)
+}
+
+func (m *MockConnectionGater) InterceptUpgraded(tc network.Conn) (allow bool, reason control.DisconnectReason) {
+ return m.Upgraded(tc)
+}
diff --git a/p2p/net/swarm/testing/testing_test.go b/p2p/net/swarm/testing/testing_test.go
new file mode 100644
index 0000000000..d4a43dfb59
--- /dev/null
+++ b/p2p/net/swarm/testing/testing_test.go
@@ -0,0 +1,13 @@
+package testing
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestGenSwarm(t *testing.T) {
+ swarm := GenSwarm(t)
+ require.NoError(t, swarm.Close())
+ GenUpgrader(t, swarm, nil)
+}
diff --git a/p2p/net/swarm/transport_test.go b/p2p/net/swarm/transport_test.go
new file mode 100644
index 0000000000..b19cb21e6b
--- /dev/null
+++ b/p2p/net/swarm/transport_test.go
@@ -0,0 +1,69 @@
+package swarm_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+type dummyTransport struct {
+ protocols []int
+ proxy bool
+ closed bool
+}
+
+func (dt *dummyTransport) Dial(_ context.Context, _ ma.Multiaddr, _ peer.ID) (transport.CapableConn, error) {
+ panic("unimplemented")
+}
+
+func (dt *dummyTransport) CanDial(_ ma.Multiaddr) bool {
+ panic("unimplemented")
+}
+
+func (dt *dummyTransport) Listen(_ ma.Multiaddr) (transport.Listener, error) {
+ panic("unimplemented")
+}
+
+func (dt *dummyTransport) Proxy() bool {
+ return dt.proxy
+}
+
+func (dt *dummyTransport) Protocols() []int {
+ return dt.protocols
+}
+func (dt *dummyTransport) Close() error {
+ dt.closed = true
+ return nil
+}
+
+func TestUselessTransport(t *testing.T) {
+ s := swarmt.GenSwarm(t)
+ require.Error(t, s.AddTransport(new(dummyTransport)), "adding a transport that supports no protocols should have failed")
+}
+
+func TestTransportClose(t *testing.T) {
+ s := swarmt.GenSwarm(t)
+ tpt := &dummyTransport{protocols: []int{1}}
+ require.NoError(t, s.AddTransport(tpt))
+ _ = s.Close()
+ if !tpt.closed {
+ t.Fatal("expected transport to be closed")
+ }
+}
+
+func TestTransportAfterClose(t *testing.T) {
+ s := swarmt.GenSwarm(t)
+ s.Close()
+
+ tpt := &dummyTransport{protocols: []int{1}}
+ if err := s.AddTransport(tpt); err != swarm.ErrSwarmClosed {
+ t.Fatal("expected swarm closed error, got: ", err)
+ }
+}
diff --git a/p2p/net/swarm/util_test.go b/p2p/net/swarm/util_test.go
new file mode 100644
index 0000000000..ab9baa7ed2
--- /dev/null
+++ b/p2p/net/swarm/util_test.go
@@ -0,0 +1,53 @@
+package swarm
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/test"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsFdConsuming(t *testing.T) {
+ tcs := map[string]struct {
+ addr string
+ isFdConsuming bool
+ }{
+ "tcp": {
+ addr: "/ip4/127.0.0.1/tcp/20",
+ isFdConsuming: true,
+ },
+ "quic": {
+ addr: "/ip4/127.0.0.1/udp/0/quic-v1",
+ isFdConsuming: false,
+ },
+ "addr-without-registered-transport": {
+ addr: "/ip4/127.0.0.1/tcp/20/ws",
+ isFdConsuming: true,
+ },
+ "relay-tcp": {
+ addr: fmt.Sprintf("/ip4/127.0.0.1/tcp/20/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)),
+ isFdConsuming: true,
+ },
+ "relay-quic": {
+ addr: fmt.Sprintf("/ip4/127.0.0.1/udp/20/quic/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)),
+ isFdConsuming: false,
+ },
+ "relay-without-serveraddr": {
+ addr: fmt.Sprintf("/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)),
+ isFdConsuming: true,
+ },
+ "relay-without-registered-transport-server": {
+ addr: fmt.Sprintf("/ip4/127.0.0.1/tcp/20/ws/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)),
+ isFdConsuming: true,
+ },
+ }
+
+ for name := range tcs {
+ maddr, err := ma.NewMultiaddr(tcs[name].addr)
+ require.NoError(t, err, name)
+ require.Equal(t, tcs[name].isFdConsuming, isFdConsumingAddr(maddr), name)
+ }
+}
diff --git a/p2p/net/upgrader/conn.go b/p2p/net/upgrader/conn.go
new file mode 100644
index 0000000000..2cc4dcfbb6
--- /dev/null
+++ b/p2p/net/upgrader/conn.go
@@ -0,0 +1,70 @@
+package upgrader
+
+import (
+ "fmt"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/transport"
+)
+
+type transportConn struct {
+ network.MuxedConn
+ network.ConnMultiaddrs
+ network.ConnSecurity
+ transport transport.Transport
+ scope network.ConnManagementScope
+ stat network.ConnStats
+
+ muxer protocol.ID
+ security protocol.ID
+ usedEarlyMuxerNegotiation bool
+}
+
+var _ transport.CapableConn = &transportConn{}
+
+func (t *transportConn) Transport() transport.Transport {
+ return t.transport
+}
+
+func (t *transportConn) String() string {
+ ts := ""
+ if s, ok := t.transport.(fmt.Stringer); ok {
+ ts = "[" + s.String() + "]"
+ }
+ return fmt.Sprintf(
+ " %s (%s)>",
+ ts,
+ t.LocalMultiaddr(),
+ t.LocalPeer(),
+ t.RemoteMultiaddr(),
+ t.RemotePeer(),
+ )
+}
+
+func (t *transportConn) Stat() network.ConnStats {
+ return t.stat
+}
+
+func (t *transportConn) Scope() network.ConnScope {
+ return t.scope
+}
+
+func (t *transportConn) Close() error {
+ defer t.scope.Done()
+ return t.MuxedConn.Close()
+}
+
+func (t *transportConn) ConnState() network.ConnectionState {
+ return network.ConnectionState{
+ StreamMultiplexer: t.muxer,
+ Security: t.security,
+ Transport: "tcp",
+ UsedEarlyMuxerNegotiation: t.usedEarlyMuxerNegotiation,
+ }
+}
+
+func (t *transportConn) CloseWithError(errCode network.ConnErrorCode) error {
+ defer t.scope.Done()
+ return t.MuxedConn.CloseWithError(errCode)
+}
diff --git a/p2p/net/upgrader/gater_test.go b/p2p/net/upgrader/gater_test.go
new file mode 100644
index 0000000000..1f47d3f823
--- /dev/null
+++ b/p2p/net/upgrader/gater_test.go
@@ -0,0 +1,60 @@
+package upgrader_test
+
+import (
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/control"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+type testGater struct {
+ sync.Mutex
+
+ blockAccept, blockSecured bool
+}
+
+var _ connmgr.ConnectionGater = (*testGater)(nil)
+
+func (t *testGater) BlockAccept(block bool) {
+ t.Lock()
+ defer t.Unlock()
+
+ t.blockAccept = block
+}
+
+func (t *testGater) BlockSecured(block bool) {
+ t.Lock()
+ defer t.Unlock()
+
+ t.blockSecured = block
+}
+
+func (t *testGater) InterceptPeerDial(_ peer.ID) (allow bool) {
+ panic("not implemented")
+}
+
+func (t *testGater) InterceptAddrDial(_ peer.ID, _ ma.Multiaddr) (allow bool) {
+ panic("not implemented")
+}
+
+func (t *testGater) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
+ t.Lock()
+ defer t.Unlock()
+
+ return !t.blockAccept
+}
+
+func (t *testGater) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
+ t.Lock()
+ defer t.Unlock()
+
+ return !t.blockSecured
+}
+
+func (t *testGater) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
+ panic("not implemented")
+}
diff --git a/p2p/net/upgrader/listener.go b/p2p/net/upgrader/listener.go
new file mode 100644
index 0000000000..8bb844292f
--- /dev/null
+++ b/p2p/net/upgrader/listener.go
@@ -0,0 +1,205 @@
+package upgrader
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ tec "github.com/jbenet/go-temp-err-catcher"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var log = logging.Logger("upgrader")
+
+type listener struct {
+ transport.GatedMaListener
+
+ transport transport.Transport
+ upgrader *upgrader
+ rcmgr network.ResourceManager
+
+ incoming chan transport.CapableConn
+ err error
+
+ // Used for backpressure
+ threshold *threshold
+
+ // Canceling this context isn't sufficient to tear down the listener.
+ // Call close.
+ ctx context.Context
+ cancel func()
+}
+
+var _ transport.Listener = (*listener)(nil)
+
+// Close closes the listener.
+func (l *listener) Close() error {
+ // Do this first to try to get any relevant errors.
+ err := l.GatedMaListener.Close()
+
+ l.cancel()
+ // Drain and wait.
+ for c := range l.incoming {
+ c.Close()
+ }
+ return err
+}
+
+// handles inbound connections.
+//
+// This function does a few interesting things that should be noted:
+//
+// 1. It logs and discards temporary/transient errors (errors with a Temporary()
+// function that returns true).
+// 2. It stops accepting new connections once AcceptQueueLength connections have
+// been fully negotiated but not accepted. This gives us a basic backpressure
+// mechanism while still allowing us to negotiate connections in parallel.
+func (l *listener) handleIncoming() {
+ var wg sync.WaitGroup
+ defer func() {
+ // make sure we're closed
+ l.GatedMaListener.Close()
+ if l.err == nil {
+ l.err = fmt.Errorf("listener closed")
+ }
+
+ wg.Wait()
+ close(l.incoming)
+ }()
+
+ var catcher tec.TempErrCatcher
+ for l.ctx.Err() == nil {
+ maconn, connScope, err := l.GatedMaListener.Accept()
+ if err != nil {
+ // Note: function may pause the accept loop.
+ if catcher.IsTemporary(err) {
+ log.Info("temporary accept error", "err", err)
+ continue
+ }
+ l.err = err
+ return
+ }
+ catcher.Reset()
+
+ if connScope == nil {
+ log.Error("BUG: got nil connScope for incoming connection", "remote_multiaddr", maconn.RemoteMultiaddr())
+ maconn.Close()
+ continue
+ }
+
+ // The go routine below calls Release when the context is
+ // canceled so there's no need to wait on it here.
+ l.threshold.Wait()
+
+ log.Debug("listener got connection",
+ "listener", l,
+ "local_multiaddr", maconn.LocalMultiaddr(),
+ "remote_multiaddr", maconn.RemoteMultiaddr())
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ ctx, cancel := context.WithTimeout(l.ctx, l.upgrader.acceptTimeout)
+ defer cancel()
+
+ conn, err := l.upgrader.Upgrade(ctx, l.transport, maconn, network.DirInbound, "", connScope)
+ if err != nil {
+ // Don't bother bubbling this up. We just failed
+ // to completely negotiate the connection.
+ log.Debug("accept upgrade error",
+ "err", err,
+ "local_multiaddr", maconn.LocalMultiaddr(),
+ "remote_multiaddr", maconn.RemoteMultiaddr())
+ connScope.Done()
+ return
+ }
+
+ log.Debug("listener accepted connection",
+ "listener", l,
+ "connection", conn)
+
+ // This records the fact that the connection has been
+ // setup and is waiting to be accepted. This call
+ // *never* blocks, even if we go over the threshold. It
+ // simply ensures that calls to Wait block while we're
+ // over the threshold.
+ l.threshold.Acquire()
+ defer l.threshold.Release()
+
+ select {
+ case l.incoming <- conn:
+ case <-ctx.Done():
+ // Listener not closed but the accept timeout expired.
+ if l.ctx.Err() == nil {
+ log.Warn("listener dropped connection due to slow accept", "remote_multiaddr", maconn.RemoteMultiaddr(), "peer", conn.RemotePeer())
+ }
+ conn.CloseWithError(network.ConnRateLimited)
+ }
+ }()
+ }
+}
+
+// Accept accepts a connection.
+func (l *listener) Accept() (transport.CapableConn, error) {
+ for c := range l.incoming {
+ // Could have been sitting there for a while.
+ if !c.IsClosed() {
+ return c, nil
+ }
+ }
+ if strings.Contains(l.err.Error(), "use of closed network connection") {
+ return nil, transport.ErrListenerClosed
+ }
+ return nil, l.err
+}
+
+func (l *listener) String() string {
+ if s, ok := l.transport.(fmt.Stringer); ok {
+ return fmt.Sprintf("", s, l.Multiaddr())
+ }
+ return fmt.Sprintf("", l.Multiaddr())
+}
+
+type gatedMaListener struct {
+ manet.Listener
+ rcmgr network.ResourceManager
+ connGater connmgr.ConnectionGater
+}
+
+var _ transport.GatedMaListener = &gatedMaListener{}
+
+func (l *gatedMaListener) Accept() (manet.Conn, network.ConnManagementScope, error) {
+ for {
+ conn, err := l.Listener.Accept()
+ if err != nil {
+ return nil, nil, err
+ }
+ // gate the connection if applicable
+ if l.connGater != nil && !l.connGater.InterceptAccept(conn) {
+ log.Debug("gater blocked incoming connection",
+ "local_multiaddr", conn.LocalMultiaddr(),
+ "remote_multiaddr", conn.RemoteMultiaddr())
+ if err := conn.Close(); err != nil {
+ log.Warn("failed to close incoming connection rejected by gater", "err", err)
+ }
+ continue
+ }
+
+ connScope, err := l.rcmgr.OpenConnection(network.DirInbound, true, conn.RemoteMultiaddr())
+ if err != nil {
+ log.Debug("resource manager blocked accept of new connection", "err", err)
+ if err := conn.Close(); err != nil {
+ log.Warn("failed to open incoming connection. Rejected by resource manager", "err", err)
+ }
+ continue
+ }
+ return conn, connScope, nil
+ }
+}
diff --git a/p2p/net/upgrader/listener_test.go b/p2p/net/upgrader/listener_test.go
new file mode 100644
index 0000000000..e2def7cc0c
--- /dev/null
+++ b/p2p/net/upgrader/listener_test.go
@@ -0,0 +1,417 @@
+package upgrader_test
+
+import (
+ "context"
+ "errors"
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+func createListener(t *testing.T, u transport.Upgrader) transport.Listener {
+ t.Helper()
+ addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
+ require.NoError(t, err)
+ ln, err := manet.Listen(addr)
+ require.NoError(t, err)
+ return u.UpgradeGatedMaListener(nil, u.GateMaListener(ln))
+}
+
+func TestAcceptSingleConn(t *testing.T) {
+ require := require.New(t)
+
+ id, u := createUpgrader(t)
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ cconn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+
+ sconn, err := ln.Accept()
+ require.NoError(err)
+
+ testConn(t, cconn, sconn)
+}
+
+func TestAcceptMultipleConns(t *testing.T) {
+ require := require.New(t)
+
+ id, u := createUpgrader(t)
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ var toClose []io.Closer
+ defer func() {
+ for _, c := range toClose {
+ _ = c.Close()
+ }
+ }()
+
+ for i := 0; i < 10; i++ {
+ cconn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ toClose = append(toClose, cconn)
+
+ sconn, err := ln.Accept()
+ require.NoError(err)
+ toClose = append(toClose, sconn)
+
+ testConn(t, cconn, sconn)
+ }
+}
+
+func TestConnectionsClosedIfNotAccepted(t *testing.T) {
+ require := require.New(t)
+
+ var timeout = 100 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ timeout = 500 * time.Millisecond
+ }
+
+ id, u := createUpgraderWithOpts(t, upgrader.WithAcceptTimeout(timeout))
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ conn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+
+ errCh := make(chan error)
+ go func() {
+ defer conn.Close()
+ str, err := conn.OpenStream(context.Background())
+ if err != nil {
+ errCh <- err
+ return
+ }
+ // start a Read. It will block until the connection is closed
+ _, _ = str.Read([]byte{0})
+ errCh <- nil
+ }()
+
+ time.Sleep(timeout / 2)
+ select {
+ case err := <-errCh:
+ t.Fatalf("connection closed earlier than expected. expected nothing on channel, got: %v", err)
+ default:
+ }
+
+ time.Sleep(timeout)
+ require.NoError(<-errCh)
+}
+
+func TestFailedUpgradeOnListen(t *testing.T) {
+ require := require.New(t)
+
+ id, u := createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "errorMuxer", Muxer: &errorMuxer{}}}, nil, nil)
+ ln := createListener(t, u)
+
+ errCh := make(chan error)
+ go func() {
+ _, err := ln.Accept()
+ errCh <- err
+ }()
+
+ _, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.Error(err)
+
+ // close the listener.
+ ln.Close()
+ require.Error(<-errCh)
+}
+
+func TestListenerClose(t *testing.T) {
+ require := require.New(t)
+
+ _, u := createUpgrader(t)
+ ln := createListener(t, u)
+
+ errCh := make(chan error)
+ go func() {
+ _, err := ln.Accept()
+ errCh <- err
+ }()
+
+ select {
+ case err := <-errCh:
+ t.Fatalf("connection closed earlier than expected. expected nothing on channel, got: %v", err)
+ case <-time.After(200 * time.Millisecond):
+ // nothing in 200ms.
+ }
+
+ // unblocks Accept when it is closed.
+ require.NoError(ln.Close())
+ err := <-errCh
+ require.Error(err)
+ require.Equal(err, transport.ErrListenerClosed)
+
+ // doesn't accept new connections when it is closed
+ _, err = dial(t, u, ln.Multiaddr(), peer.ID("1"), &network.NullScope{})
+ require.Error(err)
+}
+
+func TestListenerCloseClosesQueued(t *testing.T) {
+ require := require.New(t)
+
+ id, upgrader := createUpgrader(t)
+ ln := createListener(t, upgrader)
+
+ var conns []transport.CapableConn
+ for i := 0; i < 10; i++ {
+ conn, err := dial(t, upgrader, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ conns = append(conns, conn)
+ }
+
+ // wait for all the dials to happen.
+ time.Sleep(500 * time.Millisecond)
+
+ // all the connections are opened.
+ for _, c := range conns {
+ require.False(c.IsClosed())
+ }
+
+ // expect that all the connections will be closed.
+ err := ln.Close()
+ require.NoError(err)
+
+ // all the connections are closed.
+ require.Eventually(func() bool {
+ for _, c := range conns {
+ if !c.IsClosed() {
+ return false
+ }
+ }
+ return true
+ }, 3*time.Second, 100*time.Millisecond)
+
+ for _, c := range conns {
+ _ = c.Close()
+ }
+}
+
+func TestConcurrentAccept(t *testing.T) {
+ var num = 3 * upgrader.AcceptQueueLength
+
+ blockingMuxer := newBlockingMuxer()
+ id, u := createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "blockingMuxer", Muxer: blockingMuxer}}, nil, nil)
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ accepted := make(chan transport.CapableConn, num)
+ go func() {
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ return
+ }
+ _ = conn.Close()
+ accepted <- conn
+ }
+ }()
+
+ // start num dials, which all block while setting up the muxer
+ errCh := make(chan error, num)
+ var wg sync.WaitGroup
+ for i := 0; i < num; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ conn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ if err != nil {
+ errCh <- err
+ return
+ }
+ defer conn.Close()
+
+ _, err = conn.AcceptStream() // wait for conn to be accepted.
+ errCh <- err
+ }()
+ }
+
+ time.Sleep(200 * time.Millisecond)
+ // the dials are still blocked, so we shouldn't have any connection available yet
+ require.Empty(t, accepted)
+ blockingMuxer.Unblock() // make all dials succeed
+ require.Eventually(t, func() bool { return len(accepted) == num }, 3*time.Second, 100*time.Millisecond)
+ wg.Wait()
+}
+
+func TestAcceptQueueBacklogged(t *testing.T) {
+ require := require.New(t)
+
+ id, u := createUpgrader(t)
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ // setup AcceptQueueLength connections, but don't accept any of them
+ var counter atomic.Int32
+ doDial := func() {
+ conn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ counter.Add(1)
+ t.Cleanup(func() { conn.Close() })
+ }
+
+ for i := 0; i < upgrader.AcceptQueueLength; i++ {
+ go doDial()
+ }
+
+ require.Eventually(func() bool { return int(counter.Load()) == upgrader.AcceptQueueLength }, 2*time.Second, 50*time.Millisecond)
+
+ // dial a new connection. This connection should not complete setup, since the queue is full
+ go doDial()
+
+ time.Sleep(100 * time.Millisecond)
+ require.Equal(int(counter.Load()), upgrader.AcceptQueueLength)
+
+ // accept a single connection. Now the new connection should be set up, and fill the queue again
+ conn, err := ln.Accept()
+ require.NoError(err)
+ require.NoError(conn.Close())
+
+ require.Eventually(func() bool { return int(counter.Load()) == upgrader.AcceptQueueLength+1 }, 2*time.Second, 50*time.Millisecond)
+}
+
+func TestListenerConnectionGater(t *testing.T) {
+ require := require.New(t)
+
+ testGater := &testGater{}
+ id, u := createUpgraderWithConnGater(t, testGater)
+
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ // no gating.
+ conn, err := dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ require.False(conn.IsClosed())
+ _ = conn.Close()
+
+ // rejecting after handshake.
+ testGater.BlockSecured(true)
+ testGater.BlockAccept(false)
+ conn, err = dial(t, u, ln.Multiaddr(), "invalid", &network.NullScope{})
+ require.Error(err)
+ require.Nil(conn)
+
+ // rejecting on accept will trigger firupgrader.
+ testGater.BlockSecured(true)
+ testGater.BlockAccept(true)
+ conn, err = dial(t, u, ln.Multiaddr(), "invalid", &network.NullScope{})
+ require.Error(err)
+ require.Nil(conn)
+
+ // rejecting only on acceptance.
+ testGater.BlockSecured(false)
+ testGater.BlockAccept(true)
+ conn, err = dial(t, u, ln.Multiaddr(), "invalid", &network.NullScope{})
+ require.Error(err)
+ require.Nil(conn)
+
+ // back to normal
+ testGater.BlockSecured(false)
+ testGater.BlockAccept(false)
+ conn, err = dial(t, u, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ require.False(conn.IsClosed())
+ _ = conn.Close()
+}
+
+func TestListenerResourceManagement(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ id, upgrader := createUpgraderWithResourceManager(t, rcmgr)
+ ln := createListener(t, upgrader)
+ defer ln.Close()
+
+ connScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ gomock.InOrder(
+ rcmgr.EXPECT().OpenConnection(network.DirInbound, true, gomock.Not(ln.Multiaddr())).Return(connScope, nil),
+ connScope.EXPECT().PeerScope(),
+ connScope.EXPECT().SetPeer(id),
+ connScope.EXPECT().PeerScope(),
+ )
+
+ cconn, err := dial(t, upgrader, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(t, err)
+ defer cconn.Close()
+
+ sconn, err := ln.Accept()
+ require.NoError(t, err)
+ connScope.EXPECT().Done()
+ defer sconn.Close()
+}
+
+func TestListenerResourceManagementDenied(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ id, upgrader := createUpgraderWithResourceManager(t, rcmgr)
+ ln := createListener(t, upgrader)
+
+ rcmgr.EXPECT().OpenConnection(network.DirInbound, true, gomock.Not(ln.Multiaddr())).Return(nil, errors.New("nope"))
+ _, err := dial(t, upgrader, ln.Multiaddr(), id, &network.NullScope{})
+ require.Error(t, err)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ ln.Accept()
+ }()
+
+ select {
+ case <-done:
+ t.Fatal("accept shouldn't have accepted anything")
+ case <-time.After(50 * time.Millisecond):
+ }
+ require.NoError(t, ln.Close())
+ <-done
+}
+
+func TestNoCommonSecurityProto(t *testing.T) {
+ idA, privA := newPeer(t)
+ idB, privB := newPeer(t)
+ atInsecure := insecure.NewWithIdentity("/plaintext1", idA, privA)
+ btInsecure := insecure.NewWithIdentity("/plaintext2", idB, privB)
+
+ ua, err := upgrader.New([]sec.SecureTransport{atInsecure}, []upgrader.StreamMuxer{{ID: "negotiate", Muxer: &negotiatingMuxer{}}}, nil, nil, nil)
+ require.NoError(t, err)
+ ub, err := upgrader.New([]sec.SecureTransport{btInsecure}, []upgrader.StreamMuxer{{ID: "negotiate", Muxer: &negotiatingMuxer{}}}, nil, nil, nil)
+ require.NoError(t, err)
+
+ ln := createListener(t, ua)
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ ln.Accept()
+ }()
+
+ _, err = dial(t, ub, ln.Multiaddr(), idA, &network.NullScope{})
+ require.ErrorContains(t, err, "failed to negotiate security protocol: protocols not supported")
+ select {
+ case <-done:
+ t.Fatal("didn't expect to accept a connection")
+ case <-time.After(50 * time.Millisecond):
+ }
+
+ ln.Close()
+ <-done
+}
diff --git a/p2p/net/upgrader/threshold.go b/p2p/net/upgrader/threshold.go
new file mode 100644
index 0000000000..1e8b112cb8
--- /dev/null
+++ b/p2p/net/upgrader/threshold.go
@@ -0,0 +1,50 @@
+package upgrader
+
+import (
+ "sync"
+)
+
+func newThreshold(cutoff int) *threshold {
+ t := &threshold{
+ threshold: cutoff,
+ }
+ t.cond.L = &t.mu
+ return t
+}
+
+type threshold struct {
+ mu sync.Mutex
+ cond sync.Cond
+
+ count int
+ threshold int
+}
+
+// Acquire increments the counter. It will not block.
+func (t *threshold) Acquire() {
+ t.mu.Lock()
+ t.count++
+ t.mu.Unlock()
+}
+
+// Release decrements the counter.
+func (t *threshold) Release() {
+ t.mu.Lock()
+ if t.count == 0 {
+ panic("negative count")
+ }
+ if t.threshold == t.count {
+ t.cond.Broadcast()
+ }
+ t.count--
+ t.mu.Unlock()
+}
+
+// Wait waits for the counter to drop below the threshold
+func (t *threshold) Wait() {
+ t.mu.Lock()
+ for t.count >= t.threshold {
+ t.cond.Wait()
+ }
+ t.mu.Unlock()
+}
diff --git a/p2p/net/upgrader/upgrader.go b/p2p/net/upgrader/upgrader.go
new file mode 100644
index 0000000000..32139b1226
--- /dev/null
+++ b/p2p/net/upgrader/upgrader.go
@@ -0,0 +1,356 @@
+package upgrader
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ ipnet "github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/net/pnet"
+
+ manet "github.com/multiformats/go-multiaddr/net"
+ mss "github.com/multiformats/go-multistream"
+)
+
+// ErrNilPeer is returned when attempting to upgrade an outbound connection
+// without specifying a peer ID.
+var ErrNilPeer = errors.New("nil peer")
+
+// AcceptQueueLength is the number of connections to fully setup before not accepting any new connections
+var AcceptQueueLength = 16
+
+const (
+ defaultAcceptTimeout = 15 * time.Second
+ defaultNegotiateTimeout = 60 * time.Second
+)
+
+type Option func(*upgrader) error
+
+func WithAcceptTimeout(t time.Duration) Option {
+ return func(u *upgrader) error {
+ u.acceptTimeout = t
+ return nil
+ }
+}
+
+type StreamMuxer struct {
+ ID protocol.ID
+ Muxer network.Multiplexer
+}
+
+// Upgrader is a multistream upgrader that can upgrade an underlying connection
+// to a full transport connection (secure and multiplexed).
+type upgrader struct {
+ psk ipnet.PSK
+ connGater connmgr.ConnectionGater
+ rcmgr network.ResourceManager
+
+ muxerMuxer *mss.MultistreamMuxer[protocol.ID]
+ muxers []StreamMuxer
+ muxerIDs []protocol.ID
+
+ security []sec.SecureTransport
+ securityMuxer *mss.MultistreamMuxer[protocol.ID]
+ securityIDs []protocol.ID
+
+ // AcceptTimeout is the maximum duration an Accept is allowed to take.
+ // This includes the time between accepting the raw network connection,
+ // protocol selection as well as the handshake, if applicable.
+ //
+ // If unset, the default value (15s) is used.
+ acceptTimeout time.Duration
+}
+
+var _ transport.Upgrader = &upgrader{}
+
+func New(security []sec.SecureTransport, muxers []StreamMuxer, psk ipnet.PSK, rcmgr network.ResourceManager, connGater connmgr.ConnectionGater, opts ...Option) (transport.Upgrader, error) {
+ u := &upgrader{
+ acceptTimeout: defaultAcceptTimeout,
+ rcmgr: rcmgr,
+ connGater: connGater,
+ psk: psk,
+ muxerMuxer: mss.NewMultistreamMuxer[protocol.ID](),
+ muxers: muxers,
+ security: security,
+ securityMuxer: mss.NewMultistreamMuxer[protocol.ID](),
+ }
+ for _, opt := range opts {
+ if err := opt(u); err != nil {
+ return nil, err
+ }
+ }
+ if u.rcmgr == nil {
+ u.rcmgr = &network.NullResourceManager{}
+ }
+ u.muxerIDs = make([]protocol.ID, 0, len(muxers))
+ for _, m := range muxers {
+ u.muxerMuxer.AddHandler(m.ID, nil)
+ u.muxerIDs = append(u.muxerIDs, m.ID)
+ }
+ u.securityIDs = make([]protocol.ID, 0, len(security))
+ for _, s := range security {
+ u.securityMuxer.AddHandler(s.ID(), nil)
+ u.securityIDs = append(u.securityIDs, s.ID())
+ }
+ return u, nil
+}
+
+// UpgradeListener upgrades the passed multiaddr-net listener into a full libp2p-transport listener.
+func (u *upgrader) UpgradeListener(t transport.Transport, list manet.Listener) transport.Listener {
+ return u.UpgradeGatedMaListener(t, u.GateMaListener(list))
+}
+
+func (u *upgrader) GateMaListener(l manet.Listener) transport.GatedMaListener {
+ return &gatedMaListener{
+ Listener: l,
+ rcmgr: u.rcmgr,
+ connGater: u.connGater,
+ }
+}
+
+// UpgradeGatedMaListener upgrades the passed multiaddr-net listener into a full libp2p-transport listener.
+func (u *upgrader) UpgradeGatedMaListener(t transport.Transport, l transport.GatedMaListener) transport.Listener {
+ ctx, cancel := context.WithCancel(context.Background())
+ list := &listener{
+ GatedMaListener: l,
+ upgrader: u,
+ transport: t,
+ rcmgr: u.rcmgr,
+ threshold: newThreshold(AcceptQueueLength),
+ incoming: make(chan transport.CapableConn),
+ cancel: cancel,
+ ctx: ctx,
+ }
+ go list.handleIncoming()
+ return list
+}
+
+// Upgrade upgrades the multiaddr/net connection into a full libp2p-transport connection.
+func (u *upgrader) Upgrade(ctx context.Context, t transport.Transport, maconn manet.Conn, dir network.Direction, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
+ c, err := u.upgrade(ctx, t, maconn, dir, p, connScope)
+ if err != nil {
+ connScope.Done()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn manet.Conn, dir network.Direction, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
+ if dir == network.DirOutbound && p == "" {
+ return nil, ErrNilPeer
+ }
+ var stat network.ConnStats
+ if cs, ok := maconn.(network.ConnStat); ok {
+ stat = cs.Stat()
+ }
+
+ var conn net.Conn = maconn
+ if u.psk != nil {
+ pconn, err := pnet.NewProtectedConn(u.psk, conn)
+ if err != nil {
+ conn.Close()
+ return nil, fmt.Errorf("failed to setup private network protector: %w", err)
+ }
+ conn = pconn
+ } else if ipnet.ForcePrivateNetwork {
+ log.Error("tried to dial with no Private Network Protector but usage of Private Networks is forced by the environment")
+ return nil, ipnet.ErrNotInPrivateNetwork
+ }
+
+ isServer := dir == network.DirInbound
+ sconn, security, err := u.setupSecurity(ctx, conn, p, isServer)
+ if err != nil {
+ conn.Close()
+ return nil, fmt.Errorf("failed to negotiate security protocol: %w", err)
+ }
+
+ // call the connection gater, if one is registered.
+ if u.connGater != nil && !u.connGater.InterceptSecured(dir, sconn.RemotePeer(), maconn) {
+ if err := maconn.Close(); err != nil {
+ log.Error("failed to close connection", "peer", p, "remote_multiaddr", maconn.RemoteMultiaddr(), "err", err)
+ }
+ return nil, fmt.Errorf("gater rejected connection with peer %s and addr %s with direction %d",
+ sconn.RemotePeer(), maconn.RemoteMultiaddr(), dir)
+ }
+ // Only call SetPeer if it hasn't already been set -- this can happen when we don't know
+ // the peer in advance and in some bug scenarios.
+ if connScope.PeerScope() == nil {
+ if err := connScope.SetPeer(sconn.RemotePeer()); err != nil {
+ log.Debug("resource manager blocked connection for peer", "peer", sconn.RemotePeer(), "remote_addr", conn.RemoteAddr(), "err", err)
+ if err := maconn.Close(); err != nil {
+ log.Error("failed to close connection", "peer", p, "remote_multiaddr", maconn.RemoteMultiaddr(), "err", err)
+ }
+ return nil, fmt.Errorf("resource manager connection with peer %s and addr %s with direction %d",
+ sconn.RemotePeer(), maconn.RemoteMultiaddr(), dir)
+ }
+ }
+
+ muxer, smconn, err := u.setupMuxer(ctx, sconn, isServer, connScope.PeerScope())
+ if err != nil {
+ sconn.Close()
+ return nil, fmt.Errorf("failed to negotiate stream multiplexer: %w", err)
+ }
+
+ tc := &transportConn{
+ MuxedConn: smconn,
+ ConnMultiaddrs: maconn,
+ ConnSecurity: sconn,
+ transport: t,
+ stat: stat,
+ scope: connScope,
+ muxer: muxer,
+ security: security,
+ usedEarlyMuxerNegotiation: sconn.ConnState().UsedEarlyMuxerNegotiation,
+ }
+ return tc, nil
+}
+
+func (u *upgrader) setupSecurity(ctx context.Context, conn net.Conn, p peer.ID, isServer bool) (sec.SecureConn, protocol.ID, error) {
+ st, err := u.negotiateSecurity(ctx, conn, isServer)
+ if err != nil {
+ return nil, "", err
+ }
+ if isServer {
+ sconn, err := st.SecureInbound(ctx, conn, p)
+ return sconn, st.ID(), err
+ }
+ sconn, err := st.SecureOutbound(ctx, conn, p)
+ return sconn, st.ID(), err
+}
+
+func (u *upgrader) negotiateMuxer(nc net.Conn, isServer bool) (*StreamMuxer, error) {
+ if err := nc.SetDeadline(time.Now().Add(defaultNegotiateTimeout)); err != nil {
+ return nil, err
+ }
+
+ var proto protocol.ID
+ if isServer {
+ selected, _, err := u.muxerMuxer.Negotiate(nc)
+ if err != nil {
+ return nil, err
+ }
+ proto = selected
+ } else {
+ selected, err := mss.SelectOneOf(u.muxerIDs, nc)
+ if err != nil {
+ return nil, err
+ }
+ proto = selected
+ }
+
+ if err := nc.SetDeadline(time.Time{}); err != nil {
+ return nil, err
+ }
+
+ if m := u.getMuxerByID(proto); m != nil {
+ return m, nil
+ }
+ return nil, fmt.Errorf("selected protocol we don't have a transport for")
+}
+
+func (u *upgrader) getMuxerByID(id protocol.ID) *StreamMuxer {
+ for _, m := range u.muxers {
+ if m.ID == id {
+ return &m
+ }
+ }
+ return nil
+}
+
+func (u *upgrader) setupMuxer(ctx context.Context, conn sec.SecureConn, server bool, scope network.PeerScope) (protocol.ID, network.MuxedConn, error) {
+ muxerSelected := conn.ConnState().StreamMultiplexer
+ // Use muxer selected from security handshake if available. Otherwise fall back to multistream-selection.
+ if len(muxerSelected) > 0 {
+ m := u.getMuxerByID(muxerSelected)
+ if m == nil {
+ return "", nil, fmt.Errorf("selected a muxer we don't know: %s", muxerSelected)
+ }
+ c, err := m.Muxer.NewConn(conn, server, scope)
+ if err != nil {
+ return "", nil, err
+ }
+ return muxerSelected, c, nil
+ }
+
+ type result struct {
+ smconn network.MuxedConn
+ muxerID protocol.ID
+ err error
+ }
+
+ done := make(chan result, 1)
+ // TODO: The muxer should take a context.
+ go func() {
+ m, err := u.negotiateMuxer(conn, server)
+ if err != nil {
+ done <- result{err: err}
+ return
+ }
+ smconn, err := m.Muxer.NewConn(conn, server, scope)
+ done <- result{smconn: smconn, muxerID: m.ID, err: err}
+ }()
+
+ select {
+ case r := <-done:
+ return r.muxerID, r.smconn, r.err
+ case <-ctx.Done():
+ // interrupt this process
+ conn.Close()
+ // wait to finish
+ <-done
+ return "", nil, ctx.Err()
+ }
+}
+
+func (u *upgrader) getSecurityByID(id protocol.ID) sec.SecureTransport {
+ for _, s := range u.security {
+ if s.ID() == id {
+ return s
+ }
+ }
+ return nil
+}
+
+func (u *upgrader) negotiateSecurity(ctx context.Context, insecure net.Conn, server bool) (sec.SecureTransport, error) {
+ type result struct {
+ proto protocol.ID
+ err error
+ }
+
+ done := make(chan result, 1)
+ go func() {
+ if server {
+ var r result
+ r.proto, _, r.err = u.securityMuxer.Negotiate(insecure)
+ done <- r
+ return
+ }
+ var r result
+ r.proto, r.err = mss.SelectOneOf(u.securityIDs, insecure)
+ done <- r
+ }()
+
+ select {
+ case r := <-done:
+ if r.err != nil {
+ return nil, r.err
+ }
+ if s := u.getSecurityByID(r.proto); s != nil {
+ return s, nil
+ }
+ return nil, fmt.Errorf("selected unknown security transport: %s", r.proto)
+ case <-ctx.Done():
+ // We *must* do this. We have outstanding work on the connection, and it's no longer safe to use.
+ insecure.Close()
+ <-done // wait to stop using the connection.
+ return nil, ctx.Err()
+ }
+}
diff --git a/p2p/net/upgrader/upgrader_test.go b/p2p/net/upgrader/upgrader_test.go
new file mode 100644
index 0000000000..95fa49cbf7
--- /dev/null
+++ b/p2p/net/upgrader/upgrader_test.go
@@ -0,0 +1,204 @@
+package upgrader_test
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "net"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+func createUpgrader(t *testing.T) (peer.ID, transport.Upgrader) {
+ return createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "negotiate", Muxer: &negotiatingMuxer{}}}, nil, nil)
+}
+
+func createUpgraderWithConnGater(t *testing.T, connGater connmgr.ConnectionGater) (peer.ID, transport.Upgrader) {
+ return createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "negotiate", Muxer: &negotiatingMuxer{}}}, nil, connGater)
+}
+
+func createUpgraderWithResourceManager(t *testing.T, rcmgr network.ResourceManager) (peer.ID, transport.Upgrader) {
+ return createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "negotiate", Muxer: &negotiatingMuxer{}}}, rcmgr, nil)
+}
+
+func createUpgraderWithOpts(t *testing.T, opts ...upgrader.Option) (peer.ID, transport.Upgrader) {
+ return createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "negotiate", Muxer: &negotiatingMuxer{}}}, nil, nil, opts...)
+}
+
+func newPeer(t *testing.T) (peer.ID, crypto.PrivKey) {
+ t.Helper()
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ return id, priv
+}
+
+func createUpgraderWithMuxers(t *testing.T, muxers []upgrader.StreamMuxer, rcmgr network.ResourceManager, connGater connmgr.ConnectionGater, opts ...upgrader.Option) (peer.ID, transport.Upgrader) {
+ id, priv := newPeer(t)
+ u, err := upgrader.New([]sec.SecureTransport{insecure.NewWithIdentity(insecure.ID, id, priv)}, muxers, nil, rcmgr, connGater, opts...)
+ require.NoError(t, err)
+ return id, u
+}
+
+// negotiatingMuxer sets up a new yamux connection
+// It makes sure that this happens at the same time for client and server.
+type negotiatingMuxer struct{}
+
+func (m *negotiatingMuxer) NewConn(c net.Conn, isServer bool, scope network.PeerScope) (network.MuxedConn, error) {
+ var err error
+ // run a fake muxer negotiation
+ if isServer {
+ _, err = c.Write([]byte("setup"))
+ } else {
+ _, err = c.Read(make([]byte, 5))
+ }
+ if err != nil {
+ return nil, err
+ }
+ return yamux.DefaultTransport.NewConn(c, isServer, scope)
+}
+
+// blockingMuxer blocks the muxer negotiation until the contained chan is closed
+type blockingMuxer struct {
+ unblock chan struct{}
+}
+
+var _ network.Multiplexer = &blockingMuxer{}
+
+func newBlockingMuxer() *blockingMuxer {
+ return &blockingMuxer{unblock: make(chan struct{})}
+}
+
+func (m *blockingMuxer) NewConn(c net.Conn, isServer bool, scope network.PeerScope) (network.MuxedConn, error) {
+ <-m.unblock
+ return (&negotiatingMuxer{}).NewConn(c, isServer, scope)
+}
+
+func (m *blockingMuxer) Unblock() {
+ close(m.unblock)
+}
+
+// errorMuxer is a muxer that errors while setting up
+type errorMuxer struct{}
+
+var _ network.Multiplexer = &errorMuxer{}
+
+func (m *errorMuxer) NewConn(_ net.Conn, _ bool, _ network.PeerScope) (network.MuxedConn, error) {
+ return nil, errors.New("mux error")
+}
+
+func testConn(t *testing.T, clientConn, serverConn transport.CapableConn) {
+ t.Helper()
+ require := require.New(t)
+
+ cstr, err := clientConn.OpenStream(context.Background())
+ require.NoError(err)
+
+ _, err = cstr.Write([]byte("foobar"))
+ require.NoError(err)
+
+ sstr, err := serverConn.AcceptStream()
+ require.NoError(err)
+
+ b := make([]byte, 6)
+ _, err = sstr.Read(b)
+ require.NoError(err)
+ require.Equal([]byte("foobar"), b)
+}
+
+func dial(t *testing.T, upgrader transport.Upgrader, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (transport.CapableConn, error) {
+ t.Helper()
+
+ macon, err := manet.Dial(raddr)
+ if err != nil {
+ return nil, err
+ }
+ return upgrader.Upgrade(context.Background(), nil, macon, network.DirOutbound, p, scope)
+}
+
+func TestOutboundConnectionGating(t *testing.T) {
+ require := require.New(t)
+
+ id, u := createUpgrader(t)
+ ln := createListener(t, u)
+ defer ln.Close()
+
+ testGater := &testGater{}
+ _, dialUpgrader := createUpgraderWithConnGater(t, testGater)
+ conn, err := dial(t, dialUpgrader, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ require.NotNil(conn)
+ _ = conn.Close()
+
+ // blocking accepts doesn't affect the dialling side, only the listener.
+ testGater.BlockAccept(true)
+ conn, err = dial(t, dialUpgrader, ln.Multiaddr(), id, &network.NullScope{})
+ require.NoError(err)
+ require.NotNil(conn)
+ _ = conn.Close()
+
+ // now let's block all connections after being secured.
+ testGater.BlockSecured(true)
+ conn, err = dial(t, dialUpgrader, ln.Multiaddr(), id, &network.NullScope{})
+ require.Error(err)
+ require.Contains(err.Error(), "gater rejected connection")
+ require.Nil(conn)
+}
+
+func TestOutboundResourceManagement(t *testing.T) {
+ t.Run("successful handshake", func(t *testing.T) {
+ id, upgrader := createUpgrader(t)
+ ln := createListener(t, upgrader)
+ defer ln.Close()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ gomock.InOrder(
+ connScope.EXPECT().PeerScope(),
+ connScope.EXPECT().SetPeer(id),
+ connScope.EXPECT().PeerScope().Return(&network.NullScope{}),
+ )
+ _, dialUpgrader := createUpgrader(t)
+ conn, err := dial(t, dialUpgrader, ln.Multiaddr(), id, connScope)
+ require.NoError(t, err)
+ require.NotNil(t, conn)
+ connScope.EXPECT().Done()
+ require.NoError(t, conn.Close())
+ })
+
+ t.Run("failed negotiation", func(t *testing.T) {
+ id, upgrader := createUpgraderWithMuxers(t, []upgrader.StreamMuxer{{ID: "errorMuxer", Muxer: &errorMuxer{}}}, nil, nil)
+ ln := createListener(t, upgrader)
+ defer ln.Close()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ gomock.InOrder(
+ connScope.EXPECT().PeerScope(),
+ connScope.EXPECT().SetPeer(id),
+ connScope.EXPECT().PeerScope().Return(&network.NullScope{}),
+ connScope.EXPECT().Done(),
+ )
+ _, dialUpgrader := createUpgrader(t)
+ _, err := dial(t, dialUpgrader, ln.Multiaddr(), id, connScope)
+ require.Error(t, err)
+ })
+}
diff --git a/p2p/protocol/autonatv2/autonat.go b/p2p/protocol/autonatv2/autonat.go
new file mode 100644
index 0000000000..248830524e
--- /dev/null
+++ b/p2p/protocol/autonatv2/autonat.go
@@ -0,0 +1,294 @@
+package autonatv2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "iter"
+ "math/rand/v2"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+const (
+ ServiceName = "libp2p.autonatv2"
+ DialBackProtocol = "/libp2p/autonat/2/dial-back"
+ DialProtocol = "/libp2p/autonat/2/dial-request"
+
+ maxMsgSize = 8192
+ streamTimeout = 15 * time.Second
+ dialBackStreamTimeout = 5 * time.Second
+ dialBackDialTimeout = 10 * time.Second
+ dialBackMaxMsgSize = 1024
+ minHandshakeSizeBytes = 30_000 // for amplification attack prevention
+ maxHandshakeSizeBytes = 100_000
+ // maxPeerAddresses is the number of addresses in a dial request the server
+ // will inspect, rest are ignored.
+ maxPeerAddresses = 50
+
+ defaultThrottlePeerDuration = 2 * time.Minute
+)
+
+var (
+ // ErrNoPeers is returned when the client knows no autonatv2 servers.
+ ErrNoPeers = errors.New("no peers for autonat v2")
+ // ErrPrivateAddrs is returned when the request has private IP addresses.
+ ErrPrivateAddrs = errors.New("private addresses cannot be verified with autonatv2")
+
+ log = logging.Logger("autonatv2")
+)
+
+// Request is the request to verify reachability of a single address
+type Request struct {
+ // Addr is the multiaddr to verify
+ Addr ma.Multiaddr
+ // SendDialData indicates whether to send dial data if the server requests it for Addr
+ SendDialData bool
+}
+
+// Result is the result of the CheckReachability call
+type Result struct {
+ // Addr is the dialed address
+ Addr ma.Multiaddr
+ // Idx is the index of the address that was dialed
+ Idx int
+ // Reachability is the reachability for `Addr`
+ Reachability network.Reachability
+ // AllAddrsRefused is true when the server refused to dial all the addresses in the request.
+ AllAddrsRefused bool
+}
+
+// AutoNAT implements the AutoNAT v2 client and server.
+// Users can check reachability for their addresses using the CheckReachability method.
+// The server provides amplification attack prevention and rate limiting.
+type AutoNAT struct {
+ host host.Host
+
+ // for cleanly closing
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ srv *server
+ cli *client
+
+ mx sync.Mutex
+ peers *peersMap
+ throttlePeer map[peer.ID]time.Time
+ // throttlePeerDuration is the duration to wait before making another dial request to the
+ // same server.
+ throttlePeerDuration time.Duration
+ // allowPrivateAddrs enables using private and localhost addresses for reachability checks.
+ // This is only useful for testing.
+ allowPrivateAddrs bool
+}
+
+// New returns a new AutoNAT instance.
+// host and dialerHost should have the same dialing capabilities. In case the host doesn't support
+// a transport, dial back requests for address for that transport will be ignored.
+func New(dialerHost host.Host, opts ...AutoNATOption) (*AutoNAT, error) {
+ s := defaultSettings()
+ for _, o := range opts {
+ if err := o(s); err != nil {
+ return nil, fmt.Errorf("failed to apply option: %w", err)
+ }
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ an := &AutoNAT{
+ ctx: ctx,
+ cancel: cancel,
+ srv: newServer(dialerHost, s),
+ cli: newClient(s),
+ allowPrivateAddrs: s.allowPrivateAddrs,
+ peers: newPeersMap(),
+ throttlePeer: make(map[peer.ID]time.Time),
+ throttlePeerDuration: s.throttlePeerDuration,
+ }
+ return an, nil
+}
+
+func (an *AutoNAT) background(sub event.Subscription) {
+ ticker := time.NewTicker(10 * time.Minute)
+ for {
+ select {
+ case <-an.ctx.Done():
+ sub.Close()
+ an.wg.Done()
+ return
+ case e := <-sub.Out():
+ switch evt := e.(type) {
+ case event.EvtPeerProtocolsUpdated:
+ an.updatePeer(evt.Peer)
+ case event.EvtPeerConnectednessChanged:
+ an.updatePeer(evt.Peer)
+ case event.EvtPeerIdentificationCompleted:
+ an.updatePeer(evt.Peer)
+ default:
+ log.Error("unexpected event", "event_type", fmt.Sprintf("%T", e))
+ }
+ case <-ticker.C:
+ now := time.Now()
+ an.mx.Lock()
+ for p, t := range an.throttlePeer {
+ if t.Before(now) {
+ delete(an.throttlePeer, p)
+ }
+ }
+ an.mx.Unlock()
+ }
+ }
+}
+
+func (an *AutoNAT) Start(h host.Host) error {
+ an.host = h
+ // Listen on event.EvtPeerProtocolsUpdated, event.EvtPeerConnectednessChanged
+ // event.EvtPeerIdentificationCompleted to maintain our set of autonat supporting peers.
+ sub, err := an.host.EventBus().Subscribe([]interface{}{
+ new(event.EvtPeerProtocolsUpdated),
+ new(event.EvtPeerConnectednessChanged),
+ new(event.EvtPeerIdentificationCompleted),
+ })
+ if err != nil {
+ return fmt.Errorf("event subscription failed: %w", err)
+ }
+ an.cli.Start(h)
+ an.srv.Start(h)
+
+ an.wg.Add(1)
+ go an.background(sub)
+ return nil
+}
+
+func (an *AutoNAT) Close() {
+ an.cancel()
+ an.wg.Wait()
+ an.srv.Close()
+ an.cli.Close()
+ an.peers = nil
+}
+
+// GetReachability makes a single dial request for checking reachability for requested addresses
+func (an *AutoNAT) GetReachability(ctx context.Context, reqs []Request) (Result, error) {
+ var filteredReqs []Request
+ if !an.allowPrivateAddrs {
+ filteredReqs = make([]Request, 0, len(reqs))
+ for _, r := range reqs {
+ if manet.IsPublicAddr(r.Addr) {
+ filteredReqs = append(filteredReqs, r)
+ } else {
+ log.Error("private address in reachability check", "address", r.Addr)
+ }
+ }
+ if len(filteredReqs) == 0 {
+ return Result{}, ErrPrivateAddrs
+ }
+ } else {
+ filteredReqs = reqs
+ }
+ an.mx.Lock()
+ now := time.Now()
+ var p peer.ID
+ for pr := range an.peers.Shuffled() {
+ if t := an.throttlePeer[pr]; t.After(now) {
+ continue
+ }
+ p = pr
+ an.throttlePeer[p] = time.Now().Add(an.throttlePeerDuration)
+ break
+ }
+ an.mx.Unlock()
+ if p == "" {
+ return Result{}, ErrNoPeers
+ }
+ res, err := an.cli.GetReachability(ctx, p, filteredReqs)
+ if err != nil {
+ log.Debug("reachability check failed", "peer", p, "err", err)
+ return res, fmt.Errorf("reachability check with %s failed: %w", p, err)
+ }
+ // restore the correct index in case we'd filtered private addresses
+ for i, r := range reqs {
+ if r.Addr.Equal(res.Addr) {
+ res.Idx = i
+ break
+ }
+ }
+ log.Debug("reachability check successful", "peer", p)
+ return res, nil
+}
+
+func (an *AutoNAT) updatePeer(p peer.ID) {
+ an.mx.Lock()
+ defer an.mx.Unlock()
+
+ // There are no ordering gurantees between identify and swarm events. Check peerstore
+ // and swarm for the current state
+ protos, err := an.host.Peerstore().SupportsProtocols(p, DialProtocol)
+ connectedness := an.host.Network().Connectedness(p)
+ if err == nil && connectedness == network.Connected && slices.Contains(protos, DialProtocol) {
+ an.peers.Put(p)
+ } else {
+ an.peers.Delete(p)
+ }
+}
+
+// peersMap provides random access to a set of peers. This is useful when the map iteration order is
+// not sufficiently random.
+type peersMap struct {
+ peerIdx map[peer.ID]int
+ peers []peer.ID
+}
+
+func newPeersMap() *peersMap {
+ return &peersMap{
+ peerIdx: make(map[peer.ID]int),
+ peers: make([]peer.ID, 0),
+ }
+}
+
+// Shuffled iterates over the map in random order
+func (p *peersMap) Shuffled() iter.Seq[peer.ID] {
+ n := len(p.peers)
+ start := 0
+ if n > 0 {
+ start = rand.IntN(n)
+ }
+ return func(yield func(peer.ID) bool) {
+ for i := range n {
+ if !yield(p.peers[(i+start)%n]) {
+ return
+ }
+ }
+ }
+}
+
+func (p *peersMap) Put(id peer.ID) {
+ if _, ok := p.peerIdx[id]; ok {
+ return
+ }
+ p.peers = append(p.peers, id)
+ p.peerIdx[id] = len(p.peers) - 1
+}
+
+func (p *peersMap) Delete(id peer.ID) {
+ idx, ok := p.peerIdx[id]
+ if !ok {
+ return
+ }
+ n := len(p.peers)
+ lastPeer := p.peers[n-1]
+ p.peers[idx] = lastPeer
+ p.peerIdx[lastPeer] = idx
+ p.peers[n-1] = ""
+ p.peers = p.peers[:n-1]
+ delete(p.peerIdx, id)
+}
diff --git a/p2p/protocol/autonatv2/autonat_test.go b/p2p/protocol/autonatv2/autonat_test.go
new file mode 100644
index 0000000000..7ef2fa223a
--- /dev/null
+++ b/p2p/protocol/autonatv2/autonat_test.go
@@ -0,0 +1,828 @@
+package autonatv2
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "net/netip"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+
+ "github.com/libp2p/go-msgio/pbio"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func newAutoNAT(t testing.TB, dialer host.Host, opts ...AutoNATOption) *AutoNAT {
+ t.Helper()
+ b := eventbus.NewBus()
+ h := bhost.NewBlankHost(
+ swarmt.GenSwarm(t, swarmt.EventBus(b), swarmt.OptDisableWebTransport, swarmt.OptDisableWebRTC), bhost.WithEventBus(b))
+ if dialer == nil {
+ dialer = bhost.NewBlankHost(
+ swarmt.GenSwarm(t,
+ swarmt.WithSwarmOpts(
+ swarm.WithUDPBlackHoleSuccessCounter(nil),
+ swarm.WithIPv6BlackHoleSuccessCounter(nil))))
+ }
+ opts = append([]AutoNATOption{withThrottlePeerDuration(0)}, opts...)
+ an, err := New(dialer, opts...)
+ if err != nil {
+ t.Error(err)
+ }
+ require.NoError(t, an.Start(h))
+ t.Cleanup(an.Close)
+ return an
+}
+
+func parseAddrs(t *testing.T, msg *pb.Message) []ma.Multiaddr {
+ t.Helper()
+ req := msg.GetDialRequest()
+ addrs := make([]ma.Multiaddr, 0)
+ for _, ab := range req.Addrs {
+ a, err := ma.NewMultiaddrBytes(ab)
+ if err != nil {
+ t.Error("invalid addr bytes", ab)
+ }
+ addrs = append(addrs, a)
+ }
+ return addrs
+}
+
+// idAndConnect identifies b to a and connects them
+func idAndConnect(t testing.TB, a, b host.Host) {
+ a.Peerstore().AddAddrs(b.ID(), b.Addrs(), peerstore.PermanentAddrTTL)
+ a.Peerstore().AddProtocols(b.ID(), DialProtocol)
+
+ err := a.Connect(context.Background(), peer.AddrInfo{ID: b.ID()})
+ require.NoError(t, err)
+}
+
+// waitForPeer waits for a to have 1 peer in the peerMap
+func waitForPeer(t testing.TB, a *AutoNAT) {
+ t.Helper()
+ require.Eventually(t, func() bool {
+ a.mx.Lock()
+ defer a.mx.Unlock()
+ return len(a.peers.peers) != 0
+ }, 5*time.Second, 100*time.Millisecond)
+}
+
+// idAndWait provides server address and protocol to client
+func idAndWait(t testing.TB, cli *AutoNAT, srv *AutoNAT) {
+ idAndConnect(t, cli.host, srv.host)
+ waitForPeer(t, cli)
+}
+
+func TestAutoNATPrivateAddr(t *testing.T) {
+ an := newAutoNAT(t, nil)
+ res, err := an.GetReachability(context.Background(), []Request{{Addr: ma.StringCast("/ip4/192.168.0.1/udp/10/quic-v1")}})
+ require.Equal(t, res, Result{})
+ require.ErrorIs(t, err, ErrPrivateAddrs)
+}
+
+func TestClientRequest(t *testing.T) {
+ an := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ b := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer b.Close()
+ idAndConnect(t, an.host, b)
+ waitForPeer(t, an)
+
+ addrs := an.host.Addrs()
+ addrbs := make([][]byte, len(addrs))
+ for i := 0; i < len(addrs); i++ {
+ addrbs[i] = addrs[i].Bytes()
+ }
+
+ var receivedRequest atomic.Bool
+ b.SetStreamHandler(DialProtocol, func(s network.Stream) {
+ receivedRequest.Store(true)
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ var msg pb.Message
+ assert.NoError(t, r.ReadMsg(&msg))
+ assert.NotNil(t, msg.GetDialRequest())
+ assert.Equal(t, addrbs, msg.GetDialRequest().Addrs)
+ s.Reset()
+ })
+
+ res, err := an.GetReachability(context.Background(), []Request{
+ {Addr: addrs[0], SendDialData: true}, {Addr: addrs[1]},
+ })
+ require.Equal(t, res, Result{})
+ require.NotNil(t, err)
+ require.True(t, receivedRequest.Load())
+}
+
+func TestClientServerError(t *testing.T) {
+ an := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ b := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer b.Close()
+ idAndConnect(t, an.host, b)
+ waitForPeer(t, an)
+
+ tests := []struct {
+ handler func(network.Stream)
+ errorStr string
+ }{
+ {
+ handler: func(s network.Stream) {
+ s.Reset()
+ },
+ errorStr: "stream reset",
+ },
+ {
+ handler: func(s network.Stream) {
+ w := pbio.NewDelimitedWriter(s)
+ assert.NoError(t, w.WriteMsg(
+ &pb.Message{Msg: &pb.Message_DialRequest{DialRequest: &pb.DialRequest{}}}))
+ },
+ errorStr: "invalid msg type",
+ },
+ }
+
+ for i, tc := range tests {
+ t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) {
+ b.SetStreamHandler(DialProtocol, tc.handler)
+ addrs := an.host.Addrs()
+ res, err := an.GetReachability(
+ context.Background(),
+ newTestRequests(addrs, false))
+ require.Equal(t, res, Result{})
+ require.NotNil(t, err)
+ require.Contains(t, err.Error(), tc.errorStr)
+ })
+ }
+}
+
+func TestClientDataRequest(t *testing.T) {
+ an := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ b := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer b.Close()
+ idAndConnect(t, an.host, b)
+ waitForPeer(t, an)
+
+ tests := []struct {
+ handler func(network.Stream)
+ name string
+ }{
+ {
+ name: "provides dial data",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ var msg pb.Message
+ assert.NoError(t, r.ReadMsg(&msg))
+ w := pbio.NewDelimitedWriter(s)
+ if err := w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialDataRequest{
+ DialDataRequest: &pb.DialDataRequest{
+ AddrIdx: 0,
+ NumBytes: 10000,
+ },
+ }},
+ ); err != nil {
+ t.Error(err)
+ s.Reset()
+ return
+ }
+ var dialData []byte
+ for len(dialData) < 10000 {
+ if err := r.ReadMsg(&msg); err != nil {
+ t.Error(err)
+ s.Reset()
+ return
+ }
+ if msg.GetDialDataResponse() == nil {
+ t.Errorf("expected to receive msg of type DialDataResponse")
+ s.Reset()
+ return
+ }
+ dialData = append(dialData, msg.GetDialDataResponse().Data...)
+ }
+ s.Reset()
+ },
+ },
+ {
+ name: "low priority addr",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ var msg pb.Message
+ assert.NoError(t, r.ReadMsg(&msg))
+ w := pbio.NewDelimitedWriter(s)
+ if err := w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialDataRequest{
+ DialDataRequest: &pb.DialDataRequest{
+ AddrIdx: 1,
+ NumBytes: 10000,
+ },
+ }},
+ ); err != nil {
+ t.Error(err)
+ s.Reset()
+ return
+ }
+ assert.Error(t, r.ReadMsg(&msg))
+ s.Reset()
+ },
+ },
+ {
+ name: "too high dial data request",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ var msg pb.Message
+ assert.NoError(t, r.ReadMsg(&msg))
+ w := pbio.NewDelimitedWriter(s)
+ if err := w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialDataRequest{
+ DialDataRequest: &pb.DialDataRequest{
+ AddrIdx: 0,
+ NumBytes: 1 << 32,
+ },
+ }},
+ ); err != nil {
+ t.Error(err)
+ s.Reset()
+ return
+ }
+ assert.Error(t, r.ReadMsg(&msg))
+ s.Reset()
+ },
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ b.SetStreamHandler(DialProtocol, tc.handler)
+ addrs := an.host.Addrs()
+
+ res, err := an.GetReachability(
+ context.Background(),
+ []Request{
+ {Addr: addrs[0], SendDialData: true},
+ {Addr: addrs[1]},
+ })
+ require.Equal(t, res, Result{})
+ require.NotNil(t, err)
+ })
+ }
+}
+
+func TestAutoNATPrivateAndPublicAddrs(t *testing.T) {
+ an := newAutoNAT(t, nil)
+ defer an.Close()
+ defer an.host.Close()
+
+ b := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer b.Close()
+ idAndConnect(t, an.host, b)
+ waitForPeer(t, an)
+
+ dialerHost := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer dialerHost.Close()
+ handler := func(s network.Stream) {
+ w := pbio.NewDelimitedWriter(s)
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ var msg pb.Message
+ assert.NoError(t, r.ReadMsg(&msg))
+ w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: pb.DialStatus_E_DIAL_ERROR,
+ AddrIdx: 0,
+ },
+ },
+ })
+ s.Close()
+ }
+
+ b.SetStreamHandler(DialProtocol, handler)
+ privateAddr := ma.StringCast("/ip4/192.168.0.1/udp/10/quic-v1")
+ publicAddr := ma.StringCast("/ip4/1.2.3.4/udp/10/quic-v1")
+ res, err := an.GetReachability(context.Background(),
+ []Request{
+ {Addr: privateAddr},
+ {Addr: publicAddr},
+ })
+ require.NoError(t, err)
+ require.Equal(t, res.Addr, publicAddr, "%s\n%s", res.Addr, publicAddr)
+ require.Equal(t, res.Idx, 1)
+ require.Equal(t, res.Reachability, network.ReachabilityPrivate)
+}
+
+func TestClientDialBacks(t *testing.T) {
+ an := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ b := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer b.Close()
+ idAndConnect(t, an.host, b)
+ waitForPeer(t, an)
+
+ dialerHost := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer dialerHost.Close()
+
+ readReq := func(r pbio.Reader) ([]ma.Multiaddr, uint64, error) {
+ var msg pb.Message
+ if err := r.ReadMsg(&msg); err != nil {
+ return nil, 0, err
+ }
+ if msg.GetDialRequest() == nil {
+ return nil, 0, errors.New("no dial request in msg")
+ }
+ addrs := parseAddrs(t, &msg)
+ return addrs, msg.GetDialRequest().GetNonce(), nil
+ }
+
+ writeNonce := func(addr ma.Multiaddr, nonce uint64) error {
+ pid := an.host.ID()
+ dialerHost.Peerstore().AddAddr(pid, addr, peerstore.PermanentAddrTTL)
+ defer func() {
+ dialerHost.Network().ClosePeer(pid)
+ dialerHost.Peerstore().RemovePeer(pid)
+ dialerHost.Peerstore().ClearAddrs(pid)
+ }()
+ as, err := dialerHost.NewStream(context.Background(), pid, DialBackProtocol)
+ if err != nil {
+ return err
+ }
+ w := pbio.NewDelimitedWriter(as)
+ if err := w.WriteMsg(&pb.DialBack{Nonce: nonce}); err != nil {
+ return err
+ }
+ as.CloseWrite()
+ data := make([]byte, 1)
+ as.Read(data)
+ as.Close()
+ return nil
+ }
+
+ tests := []struct {
+ name string
+ handler func(network.Stream)
+ success bool
+ }{
+ {
+ name: "correct dial attempt",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ w := pbio.NewDelimitedWriter(s)
+
+ addrs, nonce, err := readReq(r)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ if err := writeNonce(addrs[1], nonce); err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: pb.DialStatus_OK,
+ AddrIdx: 1,
+ },
+ },
+ })
+ s.Close()
+ },
+ success: true,
+ },
+ {
+ name: "no dial attempt",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ if _, _, err := readReq(r); err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ resp := &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: pb.DialStatus_OK,
+ AddrIdx: 0,
+ }
+ w := pbio.NewDelimitedWriter(s)
+ w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: resp,
+ },
+ })
+ s.Close()
+ },
+ success: false,
+ },
+ {
+ name: "invalid reported address",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ addrs, nonce, err := readReq(r)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+
+ if err := writeNonce(addrs[1], nonce); err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+
+ w := pbio.NewDelimitedWriter(s)
+ w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: pb.DialStatus_OK,
+ AddrIdx: 0,
+ },
+ },
+ })
+ s.Close()
+ },
+ success: false,
+ },
+ {
+ name: "invalid nonce",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ addrs, nonce, err := readReq(r)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ if err := writeNonce(addrs[0], nonce-1); err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ w := pbio.NewDelimitedWriter(s)
+ w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: pb.DialStatus_OK,
+ AddrIdx: 0,
+ },
+ },
+ })
+ s.Close()
+ },
+ success: false,
+ },
+ {
+ name: "invalid addr index",
+ handler: func(s network.Stream) {
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ _, _, err := readReq(r)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ w := pbio.NewDelimitedWriter(s)
+ w.WriteMsg(&pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: pb.DialStatus_OK,
+ AddrIdx: 10,
+ },
+ },
+ })
+ s.Close()
+ },
+ success: false,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ addrs := an.host.Addrs()
+ b.SetStreamHandler(DialProtocol, tc.handler)
+ res, err := an.GetReachability(
+ context.Background(),
+ []Request{
+ {Addr: addrs[0], SendDialData: true},
+ {Addr: addrs[1]},
+ })
+ if !tc.success {
+ require.Error(t, err)
+ require.Equal(t, Result{}, res)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, res.Reachability, network.ReachabilityPublic)
+ }
+ })
+ }
+}
+
+func TestEventSubscription(t *testing.T) {
+ an := newAutoNAT(t, nil)
+ defer an.host.Close()
+
+ b := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer b.Close()
+ c := bhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer c.Close()
+
+ idAndConnect(t, an.host, b)
+ require.Eventually(t, func() bool {
+ an.mx.Lock()
+ defer an.mx.Unlock()
+ return len(an.peers.peers) == 1
+ }, 5*time.Second, 100*time.Millisecond)
+
+ idAndConnect(t, an.host, c)
+ require.Eventually(t, func() bool {
+ an.mx.Lock()
+ defer an.mx.Unlock()
+ return len(an.peers.peers) == 2
+ }, 5*time.Second, 100*time.Millisecond)
+
+ an.host.Network().ClosePeer(b.ID())
+ require.Eventually(t, func() bool {
+ an.mx.Lock()
+ defer an.mx.Unlock()
+ return len(an.peers.peers) == 1
+ }, 5*time.Second, 100*time.Millisecond)
+
+ an.host.Network().ClosePeer(c.ID())
+ require.Eventually(t, func() bool {
+ an.mx.Lock()
+ defer an.mx.Unlock()
+ return len(an.peers.peers) == 0
+ }, 5*time.Second, 100*time.Millisecond)
+}
+
+func TestAreAddrsConsistency(t *testing.T) {
+ c := &client{}
+ tests := []struct {
+ name string
+ localAddr ma.Multiaddr
+ dialAddr ma.Multiaddr
+ success bool
+ }{
+ {
+ name: "simple match",
+ localAddr: ma.StringCast("/ip4/192.168.0.1/tcp/12345"),
+ dialAddr: ma.StringCast("/ip4/1.2.3.4/tcp/23232"),
+ success: true,
+ },
+ {
+ name: "nat64",
+ localAddr: ma.StringCast("/ip6/1::1/tcp/12345"),
+ dialAddr: ma.StringCast("/ip4/1.2.3.4/tcp/23232"),
+ success: false,
+ },
+ {
+ name: "simple mismatch",
+ localAddr: ma.StringCast("/ip4/192.168.0.1/tcp/12345"),
+ dialAddr: ma.StringCast("/ip4/1.2.3.4/udp/23232/quic-v1"),
+ success: false,
+ },
+ {
+ name: "quic-vs-webtransport",
+ localAddr: ma.StringCast("/ip4/192.168.0.1/udp/12345/quic-v1"),
+ dialAddr: ma.StringCast("/ip4/1.2.3.4/udp/123/quic-v1/webtransport"),
+ success: false,
+ },
+ {
+ name: "webtransport-certhash",
+ localAddr: ma.StringCast("/ip4/192.168.0.1/udp/12345/quic-v1/webtransport"),
+ dialAddr: ma.StringCast("/ip4/1.2.3.4/udp/123/quic-v1/webtransport/certhash/uEgNmb28"),
+ success: true,
+ },
+ {
+ name: "dns",
+ localAddr: ma.StringCast("/dns/lib.p2p/udp/12345/quic-v1"),
+ dialAddr: ma.StringCast("/ip6/1::1/udp/123/quic-v1/"),
+ success: false,
+ },
+ {
+ name: "dns6",
+ localAddr: ma.StringCast("/dns6/lib.p2p/udp/12345/quic-v1"),
+ dialAddr: ma.StringCast("/ip4/1.2.3.4/udp/123/quic-v1/"),
+ success: false,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ if c.areAddrsConsistent(tc.localAddr, tc.dialAddr) != tc.success {
+ wantStr := "match"
+ if !tc.success {
+ wantStr = "mismatch"
+ }
+ t.Errorf("expected %s between\nlocal addr: %s\ndial addr: %s", wantStr, tc.localAddr, tc.dialAddr)
+ }
+ })
+ }
+}
+
+func TestPeerMap(t *testing.T) {
+ pm := newPeersMap()
+ // Add 1, 2, 3
+ pm.Put(peer.ID("1"))
+ pm.Put(peer.ID("2"))
+ pm.Put(peer.ID("3"))
+
+ // Remove 3, 2
+ pm.Delete(peer.ID("3"))
+ pm.Delete(peer.ID("2"))
+
+ // Add 4
+ pm.Put(peer.ID("4"))
+
+ // Remove 3, 2 again. Should be no op
+ pm.Delete(peer.ID("3"))
+ pm.Delete(peer.ID("2"))
+
+ contains := []peer.ID{"1", "4"}
+ elems := make([]peer.ID, 0)
+ for p := range pm.Shuffled() {
+ elems = append(elems, p)
+ }
+ require.ElementsMatch(t, contains, elems)
+}
+
+func FuzzClient(f *testing.F) {
+ a := newAutoNAT(f, nil, allowPrivateAddrs, WithServerRateLimit(math.MaxInt32, math.MaxInt32, math.MaxInt32, 2))
+ c := newAutoNAT(f, nil)
+ idAndWait(f, c, a)
+
+ // TODO: Move this to go-multiaddrs
+ getProto := func(protos []byte) ma.Multiaddr {
+ protoType := 0
+ if len(protos) > 0 {
+ protoType = int(protos[0])
+ }
+
+ port1, port2 := 0, 0
+ if len(protos) > 1 {
+ port1 = int(protos[1])
+ }
+ if len(protos) > 2 {
+ port2 = int(protos[2])
+ }
+ protoTemplates := []string{
+ "/tcp/%d/",
+ "/udp/%d/",
+ "/udp/%d/quic-v1/",
+ "/udp/%d/quic-v1/tcp/%d",
+ "/udp/%d/quic-v1/webtransport/",
+ "/udp/%d/webrtc/",
+ "/udp/%d/webrtc-direct/",
+ "/unix/hello/",
+ }
+ s := protoTemplates[protoType%len(protoTemplates)]
+ port1 %= (1 << 16)
+ if strings.Count(s, "%d") == 1 {
+ return ma.StringCast(fmt.Sprintf(s, port1))
+ }
+ port2 %= (1 << 16)
+ return ma.StringCast(fmt.Sprintf(s, port1, port2))
+ }
+
+ getIP := func(ips []byte) ma.Multiaddr {
+ ipType := 0
+ if len(ips) > 0 {
+ ipType = int(ips[0])
+ }
+ ips = ips[1:]
+ var x, y int64
+ split := 128 / 8
+ if len(ips) < split {
+ split = len(ips)
+ }
+ var b [8]byte
+ copy(b[:], ips[:split])
+ x = int64(binary.LittleEndian.Uint64(b[:]))
+ clear(b[:])
+ copy(b[:], ips[split:])
+ y = int64(binary.LittleEndian.Uint64(b[:]))
+
+ var ip netip.Addr
+ switch ipType % 3 {
+ case 0:
+ ip = netip.AddrFrom4([4]byte{byte(x), byte(x >> 8), byte(x >> 16), byte(x >> 24)})
+ return ma.StringCast(fmt.Sprintf("/ip4/%s/", ip))
+ case 1:
+ pubIP := net.ParseIP("2005::") // Public IP address
+ x := int64(binary.LittleEndian.Uint64(pubIP[0:8]))
+ ip = netip.AddrFrom16([16]byte{
+ byte(x), byte(x >> 8), byte(x >> 16), byte(x >> 24),
+ byte(x >> 32), byte(x >> 40), byte(x >> 48), byte(x >> 56),
+ byte(y), byte(y >> 8), byte(y >> 16), byte(y >> 24),
+ byte(y >> 32), byte(y >> 40), byte(y >> 48), byte(y >> 56),
+ })
+ return ma.StringCast(fmt.Sprintf("/ip6/%s/", ip))
+ default:
+ ip := netip.AddrFrom16([16]byte{
+ byte(x), byte(x >> 8), byte(x >> 16), byte(x >> 24),
+ byte(x >> 32), byte(x >> 40), byte(x >> 48), byte(x >> 56),
+ byte(y), byte(y >> 8), byte(y >> 16), byte(y >> 24),
+ byte(y >> 32), byte(y >> 40), byte(y >> 48), byte(y >> 56),
+ })
+ return ma.StringCast(fmt.Sprintf("/ip6/%s/", ip))
+ }
+ }
+
+ getAddr := func(addrType int, ips, protos []byte) ma.Multiaddr {
+ switch addrType % 4 {
+ case 0:
+ return getIP(ips).Encapsulate(getProto(protos))
+ case 1:
+ return getProto(protos)
+ case 2:
+ return nil
+ default:
+ return getIP(ips).Encapsulate(getProto(protos))
+ }
+ }
+
+ getDNSAddr := func(hostNameBytes, protos []byte) ma.Multiaddr {
+ hostName := strings.ReplaceAll(string(hostNameBytes), "\\", "")
+ hostName = strings.ReplaceAll(hostName, "/", "")
+ if hostName == "" {
+ hostName = "localhost"
+ }
+ dnsType := 0
+ if len(hostNameBytes) > 0 {
+ dnsType = int(hostNameBytes[0])
+ }
+ dnsProtos := []string{"dns", "dns4", "dns6", "dnsaddr"}
+ da := ma.StringCast(fmt.Sprintf("/%s/%s/", dnsProtos[dnsType%len(dnsProtos)], hostName))
+ return da.Encapsulate(getProto(protos))
+ }
+
+ const maxAddrs = 100
+ getAddrs := func(numAddrs int, ips, protos, hostNames []byte) []ma.Multiaddr {
+ if len(ips) == 0 || len(protos) == 0 || len(hostNames) == 0 {
+ return nil
+ }
+ numAddrs = ((numAddrs % maxAddrs) + maxAddrs) % maxAddrs
+ addrs := make([]ma.Multiaddr, numAddrs)
+ ipIdx := 0
+ protoIdx := 0
+ for i := range numAddrs {
+ addrs[i] = getAddr(i, ips[ipIdx:], protos[protoIdx:])
+ ipIdx = (ipIdx + 1) % len(ips)
+ protoIdx = (protoIdx + 1) % len(protos)
+ }
+ maxDNSAddrs := 10
+ protoIdx = 0
+ for i := 0; i < len(hostNames) && i < maxDNSAddrs; i += 2 {
+ ed := min(i+2, len(hostNames))
+ addrs = append(addrs, getDNSAddr(hostNames[i:ed], protos[protoIdx:]))
+ protoIdx = (protoIdx + 1) % len(protos)
+ }
+ return addrs
+ }
+ // reduce the streamTimeout before running this. TODO: fix this
+ f.Fuzz(func(_ *testing.T, numAddrs int, ips, protos, hostNames []byte) {
+ addrs := getAddrs(numAddrs, ips, protos, hostNames)
+ reqs := make([]Request, len(addrs))
+ for i, addr := range addrs {
+ reqs[i] = Request{Addr: addr, SendDialData: true}
+ }
+ c.GetReachability(context.Background(), reqs)
+ })
+}
+
+func TestNormalizeMultiaddr(t *testing.T) {
+ require.Equal(t,
+ "/ip4/1.2.3.4/udp/9999/quic-v1/webtransport",
+ normalizeMultiaddr(ma.StringCast("/ip4/1.2.3.4/udp/9999/quic-v1/webtransport/certhash/uEgNmb28")).String(),
+ )
+}
diff --git a/p2p/protocol/autonatv2/client.go b/p2p/protocol/autonatv2/client.go
new file mode 100644
index 0000000000..5cbe698dd5
--- /dev/null
+++ b/p2p/protocol/autonatv2/client.go
@@ -0,0 +1,387 @@
+package autonatv2
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime/debug"
+ "sync"
+ "time"
+
+ "math/rand/v2"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ "github.com/libp2p/go-msgio/pbio"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// client implements the client for making dial requests for AutoNAT v2. It verifies successful
+// dials and provides an option to send data for dial requests.
+type client struct {
+ host host.Host
+ dialData []byte
+ metricsTracer MetricsTracer
+
+ mu sync.Mutex
+ // dialBackQueues maps nonce to the channel for providing the local multiaddr of the connection
+ // the nonce was received on
+ dialBackQueues map[uint64]chan ma.Multiaddr
+}
+
+func newClient(s *autoNATSettings) *client {
+ return &client{
+ dialData: make([]byte, 4000),
+ dialBackQueues: make(map[uint64]chan ma.Multiaddr),
+ metricsTracer: s.metricsTracer,
+ }
+}
+
+func (ac *client) Start(h host.Host) {
+ ac.host = h
+ ac.host.SetStreamHandler(DialBackProtocol, ac.handleDialBack)
+}
+
+func (ac *client) Close() {
+ ac.host.RemoveStreamHandler(DialBackProtocol)
+}
+
+// GetReachability verifies address reachability with a AutoNAT v2 server p.
+func (ac *client) GetReachability(ctx context.Context, p peer.ID, reqs []Request) (Result, error) {
+ result, err := ac.getReachability(ctx, p, reqs)
+
+ // Track metrics
+ if ac.metricsTracer != nil {
+ ac.metricsTracer.ClientCompletedRequest(reqs, result, err)
+ }
+
+ return result, err
+}
+
+func (ac *client) getReachability(ctx context.Context, p peer.ID, reqs []Request) (Result, error) {
+ ctx, cancel := context.WithTimeout(ctx, streamTimeout)
+ defer cancel()
+
+ s, err := ac.host.NewStream(ctx, p, DialProtocol)
+ if err != nil {
+ return Result{}, fmt.Errorf("open %s stream failed: %w", DialProtocol, err)
+ }
+
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("attach stream %s to service %s failed: %w", DialProtocol, ServiceName, err)
+ }
+
+ if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("failed to reserve memory for stream %s: %w", DialProtocol, err)
+ }
+ defer s.Scope().ReleaseMemory(maxMsgSize)
+
+ s.SetDeadline(time.Now().Add(streamTimeout))
+ defer s.Close()
+
+ nonce := rand.Uint64()
+ ch := make(chan ma.Multiaddr, 1)
+ ac.mu.Lock()
+ ac.dialBackQueues[nonce] = ch
+ ac.mu.Unlock()
+ defer func() {
+ ac.mu.Lock()
+ delete(ac.dialBackQueues, nonce)
+ ac.mu.Unlock()
+ }()
+
+ msg := newDialRequest(reqs, nonce)
+ w := pbio.NewDelimitedWriter(s)
+ if err := w.WriteMsg(&msg); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("dial request write failed: %w", err)
+ }
+
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ if err := r.ReadMsg(&msg); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("dial msg read failed: %w", err)
+ }
+
+ switch {
+ case msg.GetDialResponse() != nil:
+ break
+ // provide dial data if appropriate
+ case msg.GetDialDataRequest() != nil:
+ if err := validateDialDataRequest(reqs, &msg); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("invalid dial data request: %s %w", s.Conn().RemoteMultiaddr(), err)
+ }
+ // dial data request is valid and we want to send data
+ if err := sendDialData(ac.dialData, int(msg.GetDialDataRequest().GetNumBytes()), w, &msg); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("dial data send failed: %w", err)
+ }
+ if err := r.ReadMsg(&msg); err != nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("dial response read failed: %w", err)
+ }
+ if msg.GetDialResponse() == nil {
+ s.Reset()
+ return Result{}, fmt.Errorf("invalid response type: %T", msg.Msg)
+ }
+ default:
+ s.Reset()
+ return Result{}, fmt.Errorf("invalid msg type: %T", msg.Msg)
+ }
+
+ resp := msg.GetDialResponse()
+ if resp.GetStatus() != pb.DialResponse_OK {
+ // E_DIAL_REFUSED has implication for deciding future address verificiation priorities
+ // wrap a distinct error for convenient errors.Is usage
+ if resp.GetStatus() == pb.DialResponse_E_DIAL_REFUSED {
+ return Result{AllAddrsRefused: true}, nil
+ }
+ return Result{}, fmt.Errorf("dial request failed: response status %d %s", resp.GetStatus(),
+ pb.DialResponse_ResponseStatus_name[int32(resp.GetStatus())])
+ }
+ if resp.GetDialStatus() == pb.DialStatus_UNUSED {
+ return Result{}, fmt.Errorf("invalid response: invalid dial status UNUSED")
+ }
+ if int(resp.AddrIdx) >= len(reqs) {
+ return Result{}, fmt.Errorf("invalid response: addr index out of range: %d [0-%d)", resp.AddrIdx, len(reqs))
+ }
+ // wait for nonce from the server
+ var dialBackAddr ma.Multiaddr
+ if resp.GetDialStatus() == pb.DialStatus_OK {
+ timer := time.NewTimer(dialBackStreamTimeout)
+ select {
+ case at := <-ch:
+ dialBackAddr = at
+ case <-ctx.Done():
+ case <-timer.C:
+ }
+ timer.Stop()
+ }
+ return ac.newResult(resp, reqs, dialBackAddr)
+}
+
+func validateDialDataRequest(reqs []Request, msg *pb.Message) error {
+ idx := int(msg.GetDialDataRequest().AddrIdx)
+ if idx >= len(reqs) { // invalid address index
+ return fmt.Errorf("addr index out of range: %d [0-%d)", idx, len(reqs))
+ }
+ if msg.GetDialDataRequest().NumBytes > maxHandshakeSizeBytes { // data request is too high
+ return fmt.Errorf("requested data too high: %d", msg.GetDialDataRequest().NumBytes)
+ }
+ if !reqs[idx].SendDialData { // low priority addr
+ return fmt.Errorf("low priority addr: %s index %d", reqs[idx].Addr, idx)
+ }
+ return nil
+}
+
+func (ac *client) newResult(resp *pb.DialResponse, reqs []Request, dialBackAddr ma.Multiaddr) (Result, error) {
+ idx := int(resp.AddrIdx)
+ if idx >= len(reqs) {
+ // This should have been validated by this point, but checking this is cheap.
+ return Result{}, fmt.Errorf("addrs index(%d) greater than len(reqs)(%d)", idx, len(reqs))
+ }
+ addr := reqs[idx].Addr
+
+ rch := network.ReachabilityUnknown //nolint:ineffassign
+ switch resp.DialStatus {
+ case pb.DialStatus_OK:
+ if !ac.areAddrsConsistent(dialBackAddr, addr) {
+ // the server is misinforming us about the address it successfully dialed
+ // either we received no dialback or the address on the dialback is inconsistent with
+ // what the server is telling us
+ return Result{}, fmt.Errorf("invalid response: dialBackAddr: %s, respAddr: %s", dialBackAddr, addr)
+ }
+ rch = network.ReachabilityPublic
+ case pb.DialStatus_E_DIAL_BACK_ERROR:
+ if !ac.areAddrsConsistent(dialBackAddr, addr) {
+ return Result{}, fmt.Errorf("dial-back stream error: dialBackAddr: %s, respAddr: %s", dialBackAddr, addr)
+ }
+ // We received the dial back but the server claims the dial back errored.
+ // As long as we received the correct nonce in dial back it is safe to assume
+ // that we are public.
+ rch = network.ReachabilityPublic
+ case pb.DialStatus_E_DIAL_ERROR:
+ rch = network.ReachabilityPrivate
+ default:
+ // Unexpected response code. Discard the response and fail.
+ log.Warn("invalid status code received in response",
+ "address", addr,
+ "dial_status", resp.DialStatus)
+ return Result{}, fmt.Errorf("invalid response: invalid status code for addr %s: %d", addr, resp.DialStatus)
+ }
+
+ return Result{
+ Addr: addr,
+ Idx: idx,
+ Reachability: rch,
+ }, nil
+}
+
+func sendDialData(dialData []byte, numBytes int, w pbio.Writer, msg *pb.Message) (err error) {
+ ddResp := &pb.DialDataResponse{Data: dialData}
+ *msg = pb.Message{
+ Msg: &pb.Message_DialDataResponse{
+ DialDataResponse: ddResp,
+ },
+ }
+ for remain := numBytes; remain > 0; {
+ if remain < len(ddResp.Data) {
+ ddResp.Data = ddResp.Data[:remain]
+ }
+ if err := w.WriteMsg(msg); err != nil {
+ return fmt.Errorf("write failed: %w", err)
+ }
+ remain -= len(dialData)
+ }
+ return nil
+}
+
+func newDialRequest(reqs []Request, nonce uint64) pb.Message {
+ addrbs := make([][]byte, len(reqs))
+ for i, r := range reqs {
+ addrbs[i] = r.Addr.Bytes()
+ }
+ return pb.Message{
+ Msg: &pb.Message_DialRequest{
+ DialRequest: &pb.DialRequest{
+ Addrs: addrbs,
+ Nonce: nonce,
+ },
+ },
+ }
+}
+
+// handleDialBack receives the nonce on the dial-back stream
+func (ac *client) handleDialBack(s network.Stream) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
+ }
+ s.Reset()
+ }()
+
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Debug("failed to attach stream to service",
+ "service_name", ServiceName,
+ "error", err)
+ s.Reset()
+ return
+ }
+
+ if err := s.Scope().ReserveMemory(dialBackMaxMsgSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("failed to reserve memory for stream",
+ "protocol", DialBackProtocol,
+ "error", err)
+ s.Reset()
+ return
+ }
+ defer s.Scope().ReleaseMemory(dialBackMaxMsgSize)
+
+ s.SetDeadline(time.Now().Add(dialBackStreamTimeout))
+ defer s.Close()
+
+ r := pbio.NewDelimitedReader(s, dialBackMaxMsgSize)
+ var msg pb.DialBack
+ if err := r.ReadMsg(&msg); err != nil {
+ log.Debug("failed to read dialback message",
+ "remote_peer", s.Conn().RemotePeer(),
+ "error", err)
+ s.Reset()
+ return
+ }
+ nonce := msg.GetNonce()
+
+ ac.mu.Lock()
+ ch := ac.dialBackQueues[nonce]
+ ac.mu.Unlock()
+ if ch == nil {
+ log.Debug("dialback received with invalid nonce",
+ "local_multiaddr", s.Conn().LocalMultiaddr(),
+ "remote_peer", s.Conn().RemotePeer(),
+ "nonce", nonce)
+ s.Reset()
+ return
+ }
+ select {
+ case ch <- s.Conn().LocalMultiaddr():
+ default:
+ log.Debug("multiple dialbacks received",
+ "local_multiaddr", s.Conn().LocalMultiaddr(),
+ "remote_peer", s.Conn().RemotePeer())
+ s.Reset()
+ return
+ }
+ w := pbio.NewDelimitedWriter(s)
+ res := pb.DialBackResponse{}
+ if err := w.WriteMsg(&res); err != nil {
+ log.Debug("failed to write dialback response",
+ "error", err)
+ s.Reset()
+ }
+}
+
+// normalizeMultiaddr returns a multiaddr suitable for equality checks.
+// If the multiaddr is a webtransport component, it removes the certhashes.
+func normalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr {
+ ok, n := libp2pwebtransport.IsWebtransportMultiaddr(addr)
+ if !ok {
+ ok, n = libp2pwebrtc.IsWebRTCDirectMultiaddr(addr)
+ }
+ if ok && n > 0 {
+ out := addr
+ for i := 0; i < n; i++ {
+ out, _ = ma.SplitLast(out)
+ }
+ return out
+ }
+ return addr
+}
+
+func (ac *client) areAddrsConsistent(connLocalAddr, dialedAddr ma.Multiaddr) bool {
+ if len(connLocalAddr) == 0 || len(dialedAddr) == 0 {
+ return false
+ }
+ connLocalAddr = normalizeMultiaddr(connLocalAddr)
+ dialedAddr = normalizeMultiaddr(dialedAddr)
+
+ localProtos := connLocalAddr.Protocols()
+ externalProtos := dialedAddr.Protocols()
+ if len(localProtos) != len(externalProtos) {
+ return false
+ }
+ for i, lp := range localProtos {
+ ep := externalProtos[i]
+ if i == 0 {
+ switch ep.Code {
+ case ma.P_DNS, ma.P_DNSADDR:
+ if lp.Code == ma.P_IP4 || lp.Code == ma.P_IP6 {
+ continue
+ }
+ return false
+ case ma.P_DNS4:
+ if lp.Code == ma.P_IP4 {
+ continue
+ }
+ return false
+ case ma.P_DNS6:
+ if lp.Code == ma.P_IP6 {
+ continue
+ }
+ return false
+ }
+ if lp.Code != ep.Code {
+ return false
+ }
+ } else if lp.Code != ep.Code {
+ return false
+ }
+ }
+ return true
+}
diff --git a/p2p/protocol/autonatv2/metrics.go b/p2p/protocol/autonatv2/metrics.go
new file mode 100644
index 0000000000..55ba003529
--- /dev/null
+++ b/p2p/protocol/autonatv2/metrics.go
@@ -0,0 +1,154 @@
+package autonatv2
+
+import (
+ "strconv"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type MetricsTracer interface {
+ CompletedRequest(EventDialRequestCompleted)
+ ClientCompletedRequest([]Request, Result, error)
+}
+
+const metricNamespace = "libp2p_autonatv2"
+
+var (
+ requestsCompleted = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "requests_completed_total",
+ Help: "Requests Completed",
+ },
+ []string{"server_error", "response_status", "dial_status", "dial_data_required", "ip_or_dns_version", "transport"},
+ )
+ clientRequestsCompleted = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "client_requests_completed_total",
+ Help: "Client Requests Completed",
+ },
+ []string{"ip_or_dns_version", "transport", "addr_count", "dial_refused", "reachability"},
+ )
+ clientRequestsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "client_requests_total",
+ Help: "Client Requests Total",
+ },
+ []string{"outcome"},
+ )
+)
+
+type metricsTracer struct {
+}
+
+func NewMetricsTracer(reg prometheus.Registerer) MetricsTracer {
+ metricshelper.RegisterCollectors(reg, requestsCompleted, clientRequestsCompleted, clientRequestsTotal)
+ return &metricsTracer{}
+}
+
+func (m *metricsTracer) CompletedRequest(e EventDialRequestCompleted) {
+ labels := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(labels)
+
+ errStr := getErrString(e.Error)
+
+ dialData := "false"
+ if e.DialDataRequired {
+ dialData = "true"
+ }
+
+ var ip, transport string
+ if e.DialedAddr != nil {
+ ip = getIPOrDNSVersion(e.DialedAddr)
+ transport = metricshelper.GetTransport(e.DialedAddr)
+ }
+
+ *labels = append(*labels,
+ errStr,
+ pb.DialResponse_ResponseStatus_name[int32(e.ResponseStatus)],
+ pb.DialStatus_name[int32(e.DialStatus)],
+ dialData,
+ ip,
+ transport,
+ )
+ requestsCompleted.WithLabelValues(*labels...).Inc()
+}
+
+func (m *metricsTracer) ClientCompletedRequest(reqs []Request, result Result, err error) {
+ labels := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(labels)
+
+ if err != nil {
+ clientRequestsTotal.WithLabelValues("failure").Inc()
+ return
+ }
+ clientRequestsTotal.WithLabelValues("success").Inc()
+
+ addrCount := len(reqs)
+ dialRefused := "false"
+ if result.AllAddrsRefused {
+ dialRefused = "true"
+ }
+ reachability := "unknown"
+ switch result.Reachability {
+ case network.ReachabilityPublic:
+ reachability = "public"
+ case network.ReachabilityPrivate:
+ reachability = "private"
+ }
+
+ ipOrDNSVersion := "unknown"
+ transport := "unknown"
+ if result.Addr != nil {
+ ipOrDNSVersion = getIPOrDNSVersion(result.Addr)
+ transport = metricshelper.GetTransport(result.Addr)
+ }
+
+ *labels = append(*labels,
+ ipOrDNSVersion,
+ transport,
+ strconv.Itoa(addrCount),
+ dialRefused,
+ reachability,
+ )
+ clientRequestsCompleted.WithLabelValues(*labels...).Inc()
+}
+
+func getIPOrDNSVersion(a ma.Multiaddr) string {
+ if len(a) == 0 {
+ return ""
+ }
+ res := "unknown"
+ switch a[0].Protocol().Code {
+ case ma.P_DNS, ma.P_DNSADDR:
+ res = "dns"
+ case ma.P_DNS4:
+ res = "dns4"
+ case ma.P_DNS6:
+ res = "dns6"
+ case ma.P_IP4:
+ res = "ip4"
+ case ma.P_IP6:
+ res = "ip6"
+ }
+ return res
+}
+
+func getErrString(e error) string {
+ var errStr string
+ switch e {
+ case nil:
+ errStr = "nil"
+ case errBadRequest, errDialDataRefused, errResourceLimitExceeded:
+ errStr = e.Error()
+ default:
+ errStr = "other"
+ }
+ return errStr
+}
diff --git a/p2p/protocol/autonatv2/metrics_test.go b/p2p/protocol/autonatv2/metrics_test.go
new file mode 100644
index 0000000000..92d311e141
--- /dev/null
+++ b/p2p/protocol/autonatv2/metrics_test.go
@@ -0,0 +1,60 @@
+package autonatv2
+
+import (
+ "errors"
+ "math/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ mt := NewMetricsTracer(prometheus.DefaultRegisterer)
+ respStatuses := []pb.DialResponse_ResponseStatus{
+ pb.DialResponse_E_DIAL_REFUSED,
+ pb.DialResponse_OK,
+ }
+ dialStatuses := []pb.DialStatus{
+ pb.DialStatus_OK,
+ pb.DialStatus_E_DIAL_BACK_ERROR,
+ }
+ errs := []error{
+ nil,
+ errBadRequest,
+ errDialDataRefused,
+ errors.New("write failed"),
+ }
+ addrs := []ma.Multiaddr{
+ nil,
+ ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1"),
+ ma.StringCast("/ip4/1.1.1.1/tcp/1/"),
+ }
+ reqs := [][]Request{
+ {{Addr: addrs[0]}, {Addr: addrs[1], SendDialData: true}},
+ {{Addr: addrs[1]}, {Addr: addrs[2]}},
+ }
+
+ tests := map[string]func(){
+ "CompletedRequest": func() {
+ mt.CompletedRequest(EventDialRequestCompleted{
+ Error: errs[rand.Intn(len(errs))],
+ ResponseStatus: respStatuses[rand.Intn(len(respStatuses))],
+ DialStatus: dialStatuses[rand.Intn(len(dialStatuses))],
+ DialDataRequired: rand.Intn(2) == 1,
+ DialedAddr: addrs[rand.Intn(len(addrs))],
+ })
+ },
+ "CompletedClientRequest": func() {
+ mt.ClientCompletedRequest(reqs[rand.Intn(len(reqs))], Result{AllAddrsRefused: rand.Intn(2) == 1, Reachability: network.Reachability(rand.Intn(2)), Addr: addrs[rand.Intn(len(addrs))]}, errs[rand.Intn(len(errs))])
+ },
+ }
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(10000, f)
+ if allocs > 0 {
+ t.Fatalf("%s alloc test failed expected 0 received %0.2f", method, allocs)
+ }
+ }
+}
diff --git a/p2p/protocol/autonatv2/msg_reader.go b/p2p/protocol/autonatv2/msg_reader.go
new file mode 100644
index 0000000000..87849a55ac
--- /dev/null
+++ b/p2p/protocol/autonatv2/msg_reader.go
@@ -0,0 +1,38 @@
+package autonatv2
+
+import (
+ "io"
+
+ "github.com/multiformats/go-varint"
+)
+
+// msgReader reads a varint prefixed message from R without any buffering
+type msgReader struct {
+ R io.Reader
+ Buf []byte
+}
+
+func (m *msgReader) ReadByte() (byte, error) {
+ buf := m.Buf[:1]
+ _, err := m.R.Read(buf)
+ return buf[0], err
+}
+
+func (m *msgReader) ReadMsg() ([]byte, error) {
+ sz, err := varint.ReadUvarint(m)
+ if err != nil {
+ return nil, err
+ }
+ if sz > uint64(len(m.Buf)) {
+ return nil, io.ErrShortBuffer
+ }
+ n := 0
+ for n < int(sz) {
+ nr, err := m.R.Read(m.Buf[n:sz])
+ if err != nil {
+ return nil, err
+ }
+ n += nr
+ }
+ return m.Buf[:sz], nil
+}
diff --git a/p2p/protocol/autonatv2/options.go b/p2p/protocol/autonatv2/options.go
new file mode 100644
index 0000000000..f7cf4b7178
--- /dev/null
+++ b/p2p/protocol/autonatv2/options.go
@@ -0,0 +1,76 @@
+package autonatv2
+
+import "time"
+
+// autoNATSettings is used to configure AutoNAT
+type autoNATSettings struct {
+ allowPrivateAddrs bool
+ serverRPM int
+ serverPerPeerRPM int
+ serverDialDataRPM int
+ maxConcurrentRequestsPerPeer int
+ dataRequestPolicy dataRequestPolicyFunc
+ now func() time.Time
+ amplificatonAttackPreventionDialWait time.Duration
+ metricsTracer MetricsTracer
+ throttlePeerDuration time.Duration
+}
+
+func defaultSettings() *autoNATSettings {
+ return &autoNATSettings{
+ allowPrivateAddrs: false,
+ serverRPM: 60, // 1 every second
+ serverPerPeerRPM: 12, // 1 every 5 seconds
+ serverDialDataRPM: 12, // 1 every 5 seconds
+ maxConcurrentRequestsPerPeer: 2,
+ dataRequestPolicy: amplificationAttackPrevention,
+ amplificatonAttackPreventionDialWait: 3 * time.Second,
+ now: time.Now,
+ throttlePeerDuration: defaultThrottlePeerDuration,
+ }
+}
+
+type AutoNATOption func(s *autoNATSettings) error
+
+func WithServerRateLimit(rpm, perPeerRPM, dialDataRPM int, maxConcurrentRequestsPerPeer int) AutoNATOption {
+ return func(s *autoNATSettings) error {
+ s.serverRPM = rpm
+ s.serverPerPeerRPM = perPeerRPM
+ s.serverDialDataRPM = dialDataRPM
+ s.maxConcurrentRequestsPerPeer = maxConcurrentRequestsPerPeer
+ return nil
+ }
+}
+
+func WithMetricsTracer(m MetricsTracer) AutoNATOption {
+ return func(s *autoNATSettings) error {
+ s.metricsTracer = m
+ return nil
+ }
+}
+
+func withDataRequestPolicy(drp dataRequestPolicyFunc) AutoNATOption {
+ return func(s *autoNATSettings) error {
+ s.dataRequestPolicy = drp
+ return nil
+ }
+}
+
+func allowPrivateAddrs(s *autoNATSettings) error {
+ s.allowPrivateAddrs = true
+ return nil
+}
+
+func withAmplificationAttackPreventionDialWait(d time.Duration) AutoNATOption {
+ return func(s *autoNATSettings) error {
+ s.amplificatonAttackPreventionDialWait = d
+ return nil
+ }
+}
+
+func withThrottlePeerDuration(d time.Duration) AutoNATOption {
+ return func(s *autoNATSettings) error {
+ s.throttlePeerDuration = d
+ return nil
+ }
+}
diff --git a/p2p/protocol/autonatv2/pb/autonatv2.pb.go b/p2p/protocol/autonatv2/pb/autonatv2.pb.go
new file mode 100644
index 0000000000..a4cc318522
--- /dev/null
+++ b/p2p/protocol/autonatv2/pb/autonatv2.pb.go
@@ -0,0 +1,695 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/protocol/autonatv2/pb/autonatv2.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type DialStatus int32
+
+const (
+ DialStatus_UNUSED DialStatus = 0
+ DialStatus_E_DIAL_ERROR DialStatus = 100
+ DialStatus_E_DIAL_BACK_ERROR DialStatus = 101
+ DialStatus_OK DialStatus = 200
+)
+
+// Enum value maps for DialStatus.
+var (
+ DialStatus_name = map[int32]string{
+ 0: "UNUSED",
+ 100: "E_DIAL_ERROR",
+ 101: "E_DIAL_BACK_ERROR",
+ 200: "OK",
+ }
+ DialStatus_value = map[string]int32{
+ "UNUSED": 0,
+ "E_DIAL_ERROR": 100,
+ "E_DIAL_BACK_ERROR": 101,
+ "OK": 200,
+ }
+)
+
+func (x DialStatus) Enum() *DialStatus {
+ p := new(DialStatus)
+ *p = x
+ return p
+}
+
+func (x DialStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DialStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes[0].Descriptor()
+}
+
+func (DialStatus) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes[0]
+}
+
+func (x DialStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DialStatus.Descriptor instead.
+func (DialStatus) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{0}
+}
+
+type DialResponse_ResponseStatus int32
+
+const (
+ DialResponse_E_INTERNAL_ERROR DialResponse_ResponseStatus = 0
+ DialResponse_E_REQUEST_REJECTED DialResponse_ResponseStatus = 100
+ DialResponse_E_DIAL_REFUSED DialResponse_ResponseStatus = 101
+ DialResponse_OK DialResponse_ResponseStatus = 200
+)
+
+// Enum value maps for DialResponse_ResponseStatus.
+var (
+ DialResponse_ResponseStatus_name = map[int32]string{
+ 0: "E_INTERNAL_ERROR",
+ 100: "E_REQUEST_REJECTED",
+ 101: "E_DIAL_REFUSED",
+ 200: "OK",
+ }
+ DialResponse_ResponseStatus_value = map[string]int32{
+ "E_INTERNAL_ERROR": 0,
+ "E_REQUEST_REJECTED": 100,
+ "E_DIAL_REFUSED": 101,
+ "OK": 200,
+ }
+)
+
+func (x DialResponse_ResponseStatus) Enum() *DialResponse_ResponseStatus {
+ p := new(DialResponse_ResponseStatus)
+ *p = x
+ return p
+}
+
+func (x DialResponse_ResponseStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DialResponse_ResponseStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes[1].Descriptor()
+}
+
+func (DialResponse_ResponseStatus) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes[1]
+}
+
+func (x DialResponse_ResponseStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DialResponse_ResponseStatus.Descriptor instead.
+func (DialResponse_ResponseStatus) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{3, 0}
+}
+
+type DialBackResponse_DialBackStatus int32
+
+const (
+ DialBackResponse_OK DialBackResponse_DialBackStatus = 0
+)
+
+// Enum value maps for DialBackResponse_DialBackStatus.
+var (
+ DialBackResponse_DialBackStatus_name = map[int32]string{
+ 0: "OK",
+ }
+ DialBackResponse_DialBackStatus_value = map[string]int32{
+ "OK": 0,
+ }
+)
+
+func (x DialBackResponse_DialBackStatus) Enum() *DialBackResponse_DialBackStatus {
+ p := new(DialBackResponse_DialBackStatus)
+ *p = x
+ return p
+}
+
+func (x DialBackResponse_DialBackStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DialBackResponse_DialBackStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes[2].Descriptor()
+}
+
+func (DialBackResponse_DialBackStatus) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes[2]
+}
+
+func (x DialBackResponse_DialBackStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DialBackResponse_DialBackStatus.Descriptor instead.
+func (DialBackResponse_DialBackStatus) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{6, 0}
+}
+
+type Message struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Msg:
+ //
+ // *Message_DialRequest
+ // *Message_DialResponse
+ // *Message_DialDataRequest
+ // *Message_DialDataResponse
+ Msg isMessage_Msg `protobuf_oneof:"msg"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message) Reset() {
+ *x = Message{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Message) GetMsg() isMessage_Msg {
+ if x != nil {
+ return x.Msg
+ }
+ return nil
+}
+
+func (x *Message) GetDialRequest() *DialRequest {
+ if x != nil {
+ if x, ok := x.Msg.(*Message_DialRequest); ok {
+ return x.DialRequest
+ }
+ }
+ return nil
+}
+
+func (x *Message) GetDialResponse() *DialResponse {
+ if x != nil {
+ if x, ok := x.Msg.(*Message_DialResponse); ok {
+ return x.DialResponse
+ }
+ }
+ return nil
+}
+
+func (x *Message) GetDialDataRequest() *DialDataRequest {
+ if x != nil {
+ if x, ok := x.Msg.(*Message_DialDataRequest); ok {
+ return x.DialDataRequest
+ }
+ }
+ return nil
+}
+
+func (x *Message) GetDialDataResponse() *DialDataResponse {
+ if x != nil {
+ if x, ok := x.Msg.(*Message_DialDataResponse); ok {
+ return x.DialDataResponse
+ }
+ }
+ return nil
+}
+
+type isMessage_Msg interface {
+ isMessage_Msg()
+}
+
+type Message_DialRequest struct {
+ DialRequest *DialRequest `protobuf:"bytes,1,opt,name=dialRequest,proto3,oneof"`
+}
+
+type Message_DialResponse struct {
+ DialResponse *DialResponse `protobuf:"bytes,2,opt,name=dialResponse,proto3,oneof"`
+}
+
+type Message_DialDataRequest struct {
+ DialDataRequest *DialDataRequest `protobuf:"bytes,3,opt,name=dialDataRequest,proto3,oneof"`
+}
+
+type Message_DialDataResponse struct {
+ DialDataResponse *DialDataResponse `protobuf:"bytes,4,opt,name=dialDataResponse,proto3,oneof"`
+}
+
+func (*Message_DialRequest) isMessage_Msg() {}
+
+func (*Message_DialResponse) isMessage_Msg() {}
+
+func (*Message_DialDataRequest) isMessage_Msg() {}
+
+func (*Message_DialDataResponse) isMessage_Msg() {}
+
+type DialRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Addrs [][]byte `protobuf:"bytes,1,rep,name=addrs,proto3" json:"addrs,omitempty"`
+ Nonce uint64 `protobuf:"fixed64,2,opt,name=nonce,proto3" json:"nonce,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DialRequest) Reset() {
+ *x = DialRequest{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DialRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DialRequest) ProtoMessage() {}
+
+func (x *DialRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DialRequest.ProtoReflect.Descriptor instead.
+func (*DialRequest) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *DialRequest) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
+ }
+ return nil
+}
+
+func (x *DialRequest) GetNonce() uint64 {
+ if x != nil {
+ return x.Nonce
+ }
+ return 0
+}
+
+type DialDataRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ AddrIdx uint32 `protobuf:"varint,1,opt,name=addrIdx,proto3" json:"addrIdx,omitempty"`
+ NumBytes uint64 `protobuf:"varint,2,opt,name=numBytes,proto3" json:"numBytes,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DialDataRequest) Reset() {
+ *x = DialDataRequest{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DialDataRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DialDataRequest) ProtoMessage() {}
+
+func (x *DialDataRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DialDataRequest.ProtoReflect.Descriptor instead.
+func (*DialDataRequest) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DialDataRequest) GetAddrIdx() uint32 {
+ if x != nil {
+ return x.AddrIdx
+ }
+ return 0
+}
+
+func (x *DialDataRequest) GetNumBytes() uint64 {
+ if x != nil {
+ return x.NumBytes
+ }
+ return 0
+}
+
+type DialResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Status DialResponse_ResponseStatus `protobuf:"varint,1,opt,name=status,proto3,enum=autonatv2.pb.DialResponse_ResponseStatus" json:"status,omitempty"`
+ AddrIdx uint32 `protobuf:"varint,2,opt,name=addrIdx,proto3" json:"addrIdx,omitempty"`
+ DialStatus DialStatus `protobuf:"varint,3,opt,name=dialStatus,proto3,enum=autonatv2.pb.DialStatus" json:"dialStatus,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DialResponse) Reset() {
+ *x = DialResponse{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DialResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DialResponse) ProtoMessage() {}
+
+func (x *DialResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DialResponse.ProtoReflect.Descriptor instead.
+func (*DialResponse) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *DialResponse) GetStatus() DialResponse_ResponseStatus {
+ if x != nil {
+ return x.Status
+ }
+ return DialResponse_E_INTERNAL_ERROR
+}
+
+func (x *DialResponse) GetAddrIdx() uint32 {
+ if x != nil {
+ return x.AddrIdx
+ }
+ return 0
+}
+
+func (x *DialResponse) GetDialStatus() DialStatus {
+ if x != nil {
+ return x.DialStatus
+ }
+ return DialStatus_UNUSED
+}
+
+type DialDataResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DialDataResponse) Reset() {
+ *x = DialDataResponse{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DialDataResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DialDataResponse) ProtoMessage() {}
+
+func (x *DialDataResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DialDataResponse.ProtoReflect.Descriptor instead.
+func (*DialDataResponse) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *DialDataResponse) GetData() []byte {
+ if x != nil {
+ return x.Data
+ }
+ return nil
+}
+
+type DialBack struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Nonce uint64 `protobuf:"fixed64,1,opt,name=nonce,proto3" json:"nonce,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DialBack) Reset() {
+ *x = DialBack{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DialBack) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DialBack) ProtoMessage() {}
+
+func (x *DialBack) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DialBack.ProtoReflect.Descriptor instead.
+func (*DialBack) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DialBack) GetNonce() uint64 {
+ if x != nil {
+ return x.Nonce
+ }
+ return 0
+}
+
+type DialBackResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Status DialBackResponse_DialBackStatus `protobuf:"varint,1,opt,name=status,proto3,enum=autonatv2.pb.DialBackResponse_DialBackStatus" json:"status,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DialBackResponse) Reset() {
+ *x = DialBackResponse{}
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DialBackResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DialBackResponse) ProtoMessage() {}
+
+func (x *DialBackResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DialBackResponse.ProtoReflect.Descriptor instead.
+func (*DialBackResponse) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *DialBackResponse) GetStatus() DialBackResponse_DialBackStatus {
+ if x != nil {
+ return x.Status
+ }
+ return DialBackResponse_OK
+}
+
+var File_p2p_protocol_autonatv2_pb_autonatv2_proto protoreflect.FileDescriptor
+
+const file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDesc = "" +
+ "\n" +
+ ")p2p/protocol/autonatv2/pb/autonatv2.proto\x12\fautonatv2.pb\"\xaa\x02\n" +
+ "\aMessage\x12=\n" +
+ "\vdialRequest\x18\x01 \x01(\v2\x19.autonatv2.pb.DialRequestH\x00R\vdialRequest\x12@\n" +
+ "\fdialResponse\x18\x02 \x01(\v2\x1a.autonatv2.pb.DialResponseH\x00R\fdialResponse\x12I\n" +
+ "\x0fdialDataRequest\x18\x03 \x01(\v2\x1d.autonatv2.pb.DialDataRequestH\x00R\x0fdialDataRequest\x12L\n" +
+ "\x10dialDataResponse\x18\x04 \x01(\v2\x1e.autonatv2.pb.DialDataResponseH\x00R\x10dialDataResponseB\x05\n" +
+ "\x03msg\"9\n" +
+ "\vDialRequest\x12\x14\n" +
+ "\x05addrs\x18\x01 \x03(\fR\x05addrs\x12\x14\n" +
+ "\x05nonce\x18\x02 \x01(\x06R\x05nonce\"G\n" +
+ "\x0fDialDataRequest\x12\x18\n" +
+ "\aaddrIdx\x18\x01 \x01(\rR\aaddrIdx\x12\x1a\n" +
+ "\bnumBytes\x18\x02 \x01(\x04R\bnumBytes\"\x82\x02\n" +
+ "\fDialResponse\x12A\n" +
+ "\x06status\x18\x01 \x01(\x0e2).autonatv2.pb.DialResponse.ResponseStatusR\x06status\x12\x18\n" +
+ "\aaddrIdx\x18\x02 \x01(\rR\aaddrIdx\x128\n" +
+ "\n" +
+ "dialStatus\x18\x03 \x01(\x0e2\x18.autonatv2.pb.DialStatusR\n" +
+ "dialStatus\"[\n" +
+ "\x0eResponseStatus\x12\x14\n" +
+ "\x10E_INTERNAL_ERROR\x10\x00\x12\x16\n" +
+ "\x12E_REQUEST_REJECTED\x10d\x12\x12\n" +
+ "\x0eE_DIAL_REFUSED\x10e\x12\a\n" +
+ "\x02OK\x10\xc8\x01\"&\n" +
+ "\x10DialDataResponse\x12\x12\n" +
+ "\x04data\x18\x01 \x01(\fR\x04data\" \n" +
+ "\bDialBack\x12\x14\n" +
+ "\x05nonce\x18\x01 \x01(\x06R\x05nonce\"s\n" +
+ "\x10DialBackResponse\x12E\n" +
+ "\x06status\x18\x01 \x01(\x0e2-.autonatv2.pb.DialBackResponse.DialBackStatusR\x06status\"\x18\n" +
+ "\x0eDialBackStatus\x12\x06\n" +
+ "\x02OK\x10\x00*J\n" +
+ "\n" +
+ "DialStatus\x12\n" +
+ "\n" +
+ "\x06UNUSED\x10\x00\x12\x10\n" +
+ "\fE_DIAL_ERROR\x10d\x12\x15\n" +
+ "\x11E_DIAL_BACK_ERROR\x10e\x12\a\n" +
+ "\x02OK\x10\xc8\x01B7Z5github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pbb\x06proto3"
+
+var (
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescOnce sync.Once
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescData []byte
+)
+
+func file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescGZIP() []byte {
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescOnce.Do(func() {
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDesc), len(file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDesc)))
+ })
+ return file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDescData
+}
+
+var file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_p2p_protocol_autonatv2_pb_autonatv2_proto_goTypes = []any{
+ (DialStatus)(0), // 0: autonatv2.pb.DialStatus
+ (DialResponse_ResponseStatus)(0), // 1: autonatv2.pb.DialResponse.ResponseStatus
+ (DialBackResponse_DialBackStatus)(0), // 2: autonatv2.pb.DialBackResponse.DialBackStatus
+ (*Message)(nil), // 3: autonatv2.pb.Message
+ (*DialRequest)(nil), // 4: autonatv2.pb.DialRequest
+ (*DialDataRequest)(nil), // 5: autonatv2.pb.DialDataRequest
+ (*DialResponse)(nil), // 6: autonatv2.pb.DialResponse
+ (*DialDataResponse)(nil), // 7: autonatv2.pb.DialDataResponse
+ (*DialBack)(nil), // 8: autonatv2.pb.DialBack
+ (*DialBackResponse)(nil), // 9: autonatv2.pb.DialBackResponse
+}
+var file_p2p_protocol_autonatv2_pb_autonatv2_proto_depIdxs = []int32{
+ 4, // 0: autonatv2.pb.Message.dialRequest:type_name -> autonatv2.pb.DialRequest
+ 6, // 1: autonatv2.pb.Message.dialResponse:type_name -> autonatv2.pb.DialResponse
+ 5, // 2: autonatv2.pb.Message.dialDataRequest:type_name -> autonatv2.pb.DialDataRequest
+ 7, // 3: autonatv2.pb.Message.dialDataResponse:type_name -> autonatv2.pb.DialDataResponse
+ 1, // 4: autonatv2.pb.DialResponse.status:type_name -> autonatv2.pb.DialResponse.ResponseStatus
+ 0, // 5: autonatv2.pb.DialResponse.dialStatus:type_name -> autonatv2.pb.DialStatus
+ 2, // 6: autonatv2.pb.DialBackResponse.status:type_name -> autonatv2.pb.DialBackResponse.DialBackStatus
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_p2p_protocol_autonatv2_pb_autonatv2_proto_init() }
+func file_p2p_protocol_autonatv2_pb_autonatv2_proto_init() {
+ if File_p2p_protocol_autonatv2_pb_autonatv2_proto != nil {
+ return
+ }
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes[0].OneofWrappers = []any{
+ (*Message_DialRequest)(nil),
+ (*Message_DialResponse)(nil),
+ (*Message_DialDataRequest)(nil),
+ (*Message_DialDataResponse)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDesc), len(file_p2p_protocol_autonatv2_pb_autonatv2_proto_rawDesc)),
+ NumEnums: 3,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_protocol_autonatv2_pb_autonatv2_proto_goTypes,
+ DependencyIndexes: file_p2p_protocol_autonatv2_pb_autonatv2_proto_depIdxs,
+ EnumInfos: file_p2p_protocol_autonatv2_pb_autonatv2_proto_enumTypes,
+ MessageInfos: file_p2p_protocol_autonatv2_pb_autonatv2_proto_msgTypes,
+ }.Build()
+ File_p2p_protocol_autonatv2_pb_autonatv2_proto = out.File
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_goTypes = nil
+ file_p2p_protocol_autonatv2_pb_autonatv2_proto_depIdxs = nil
+}
diff --git a/p2p/protocol/autonatv2/pb/autonatv2.proto b/p2p/protocol/autonatv2/pb/autonatv2.proto
new file mode 100644
index 0000000000..0d7ad06336
--- /dev/null
+++ b/p2p/protocol/autonatv2/pb/autonatv2.proto
@@ -0,0 +1,66 @@
+syntax = "proto3";
+
+package autonatv2.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb";
+
+message Message {
+ oneof msg {
+ DialRequest dialRequest = 1;
+ DialResponse dialResponse = 2;
+ DialDataRequest dialDataRequest = 3;
+ DialDataResponse dialDataResponse = 4;
+ }
+}
+
+message DialRequest {
+ repeated bytes addrs = 1;
+ fixed64 nonce = 2;
+}
+
+
+message DialDataRequest {
+ uint32 addrIdx = 1;
+ uint64 numBytes = 2;
+}
+
+
+enum DialStatus {
+ UNUSED = 0;
+ E_DIAL_ERROR = 100;
+ E_DIAL_BACK_ERROR = 101;
+ OK = 200;
+}
+
+
+message DialResponse {
+ enum ResponseStatus {
+ E_INTERNAL_ERROR = 0;
+ E_REQUEST_REJECTED = 100;
+ E_DIAL_REFUSED = 101;
+ OK = 200;
+ }
+
+ ResponseStatus status = 1;
+ uint32 addrIdx = 2;
+ DialStatus dialStatus = 3;
+}
+
+
+message DialDataResponse {
+ bytes data = 1;
+}
+
+
+message DialBack {
+ fixed64 nonce = 1;
+}
+
+
+message DialBackResponse {
+ enum DialBackStatus {
+ OK = 0;
+ }
+
+ DialBackStatus status = 1;
+}
diff --git a/p2p/protocol/autonatv2/server.go b/p2p/protocol/autonatv2/server.go
new file mode 100644
index 0000000000..167d3d8ec4
--- /dev/null
+++ b/p2p/protocol/autonatv2/server.go
@@ -0,0 +1,561 @@
+package autonatv2
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime/debug"
+ "sync"
+ "time"
+
+ pool "github.com/libp2p/go-buffer-pool"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+ "github.com/libp2p/go-msgio/pbio"
+
+ "math/rand"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var (
+ errResourceLimitExceeded = errors.New("resource limit exceeded")
+ errBadRequest = errors.New("bad request")
+ errDialDataRefused = errors.New("dial data refused")
+)
+
+type dataRequestPolicyFunc = func(observedAddr, dialAddr ma.Multiaddr) bool
+
+type EventDialRequestCompleted struct {
+ Error error
+ ResponseStatus pb.DialResponse_ResponseStatus
+ DialStatus pb.DialStatus
+ DialDataRequired bool
+ DialedAddr ma.Multiaddr
+}
+
+// server implements the AutoNATv2 server.
+// It can ask client to provide dial data before attempting the requested dial.
+// It rate limits requests on a global level, per peer level and on whether the request requires dial data.
+type server struct {
+ host host.Host
+ dialerHost host.Host
+ limiter *rateLimiter
+
+ // dialDataRequestPolicy is used to determine whether dialing the address requires receiving
+ // dial data. It is set to amplification attack prevention by default.
+ dialDataRequestPolicy dataRequestPolicyFunc
+ amplificatonAttackPreventionDialWait time.Duration
+ metricsTracer MetricsTracer
+
+ // for tests
+ now func() time.Time
+ allowPrivateAddrs bool
+}
+
+func newServer(dialer host.Host, s *autoNATSettings) *server {
+ return &server{
+ dialerHost: dialer,
+ dialDataRequestPolicy: s.dataRequestPolicy,
+ amplificatonAttackPreventionDialWait: s.amplificatonAttackPreventionDialWait,
+ allowPrivateAddrs: s.allowPrivateAddrs,
+ limiter: &rateLimiter{
+ RPM: s.serverRPM,
+ PerPeerRPM: s.serverPerPeerRPM,
+ DialDataRPM: s.serverDialDataRPM,
+ MaxConcurrentRequestsPerPeer: s.maxConcurrentRequestsPerPeer,
+ now: s.now,
+ },
+ now: s.now,
+ metricsTracer: s.metricsTracer,
+ }
+}
+
+// Enable attaches the stream handler to the host.
+func (as *server) Start(h host.Host) {
+ as.host = h
+ as.host.SetStreamHandler(DialProtocol, as.handleDialRequest)
+}
+
+func (as *server) Close() {
+ as.host.RemoveStreamHandler(DialProtocol)
+ as.dialerHost.Close()
+ as.limiter.Close()
+}
+
+// handleDialRequest is the dial-request protocol stream handler
+func (as *server) handleDialRequest(s network.Stream) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
+ s.Reset()
+ }
+ }()
+
+ log.Debug("received dial-request",
+ "remote_peer", s.Conn().RemotePeer(),
+ "remote_multiaddr", s.Conn().RemoteMultiaddr())
+ evt := as.serveDialRequest(s)
+ log.Debug("completed dial-request",
+ "remote_peer", s.Conn().RemotePeer(),
+ "response_status", evt.ResponseStatus,
+ "dial_status", evt.DialStatus,
+ "error", evt.Error)
+ if as.metricsTracer != nil {
+ as.metricsTracer.CompletedRequest(evt)
+ }
+}
+
+func (as *server) serveDialRequest(s network.Stream) EventDialRequestCompleted {
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ s.Reset()
+ log.Debug("failed to attach stream to service",
+ "service_name", ServiceName,
+ "error", err)
+ return EventDialRequestCompleted{
+ Error: errors.New("failed to attach stream to autonat-v2"),
+ }
+ }
+
+ if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
+ s.Reset()
+ log.Debug("failed to reserve memory for stream",
+ "protocol", DialProtocol,
+ "error", err)
+ return EventDialRequestCompleted{Error: errResourceLimitExceeded}
+ }
+ defer s.Scope().ReleaseMemory(maxMsgSize)
+
+ deadline := as.now().Add(streamTimeout)
+ ctx, cancel := context.WithDeadline(context.Background(), deadline)
+ defer cancel()
+ s.SetDeadline(as.now().Add(streamTimeout))
+ defer s.Close()
+
+ p := s.Conn().RemotePeer()
+
+ var msg pb.Message
+ w := pbio.NewDelimitedWriter(s)
+ // Check for rate limit before parsing the request
+ if !as.limiter.Accept(p) {
+ msg = pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_E_REQUEST_REJECTED,
+ },
+ },
+ }
+ if err := w.WriteMsg(&msg); err != nil {
+ s.Reset()
+ log.Debug("failed to write request rejected response",
+ "remote_peer", p,
+ "error", err)
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED,
+ Error: fmt.Errorf("write failed: %w", err),
+ }
+ }
+ log.Debug("rejected request",
+ "remote_peer", p,
+ "reason", "rate limit exceeded")
+ return EventDialRequestCompleted{ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED}
+ }
+ defer as.limiter.CompleteRequest(p)
+
+ r := pbio.NewDelimitedReader(s, maxMsgSize)
+ if err := r.ReadMsg(&msg); err != nil {
+ s.Reset()
+ log.Debug("failed to read request",
+ "remote_peer", p,
+ "error", err)
+ return EventDialRequestCompleted{Error: fmt.Errorf("read failed: %w", err)}
+ }
+ if msg.GetDialRequest() == nil {
+ s.Reset()
+ log.Debug("invalid message type",
+ "remote_peer", p,
+ "actual_type", fmt.Sprintf("%T", msg.Msg),
+ "expected_type", "DialRequest")
+ return EventDialRequestCompleted{Error: errBadRequest}
+ }
+
+ // parse peer's addresses
+ var dialAddr ma.Multiaddr
+ var addrIdx int
+ for i, ab := range msg.GetDialRequest().GetAddrs() {
+ if i >= maxPeerAddresses {
+ break
+ }
+ a, err := ma.NewMultiaddrBytes(ab)
+ if err != nil {
+ continue
+ }
+ if !as.allowPrivateAddrs && !manet.IsPublicAddr(a) {
+ continue
+ }
+ if !as.dialerHost.Network().CanDial(p, a) {
+ continue
+ }
+ dialAddr = a
+ addrIdx = i
+ break
+ }
+ // No dialable address
+ if dialAddr == nil {
+ msg = pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_E_DIAL_REFUSED,
+ },
+ },
+ }
+ if err := w.WriteMsg(&msg); err != nil {
+ s.Reset()
+ log.Debug("failed to write dial refused response",
+ "remote_peer", p,
+ "error", err)
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_E_DIAL_REFUSED,
+ Error: fmt.Errorf("write failed: %w", err),
+ }
+ }
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_E_DIAL_REFUSED,
+ }
+ }
+
+ nonce := msg.GetDialRequest().Nonce
+
+ isDialDataRequired := as.dialDataRequestPolicy(s.Conn().RemoteMultiaddr(), dialAddr)
+ if isDialDataRequired && !as.limiter.AcceptDialDataRequest() {
+ msg = pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_E_REQUEST_REJECTED,
+ },
+ },
+ }
+ if err := w.WriteMsg(&msg); err != nil {
+ s.Reset()
+ log.Debug("failed to write request rejected response",
+ "remote_peer", p,
+ "error", err)
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED,
+ Error: fmt.Errorf("write failed: %w", err),
+ DialDataRequired: true,
+ }
+ }
+ log.Debug("rejected request",
+ "remote_peer", p,
+ "reason", "rate limit exceeded")
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_E_REQUEST_REJECTED,
+ DialDataRequired: true,
+ }
+ }
+
+ if isDialDataRequired {
+ if err := getDialData(w, s, &msg, addrIdx); err != nil {
+ s.Reset()
+ log.Debug("dial data request refused",
+ "remote_peer", p,
+ "error", err)
+ return EventDialRequestCompleted{
+ Error: errDialDataRefused,
+ DialDataRequired: true,
+ DialedAddr: dialAddr,
+ }
+ }
+ // wait for a bit to prevent thundering herd style attacks on a victim
+ waitTime := time.Duration(rand.Intn(int(as.amplificatonAttackPreventionDialWait) + 1)) // the range is [0, n)
+ t := time.NewTimer(waitTime)
+ defer t.Stop()
+ select {
+ case <-ctx.Done():
+ s.Reset()
+ log.Debug("rejecting request without dialing",
+ "remote_peer", p,
+ "error", ctx.Err())
+ return EventDialRequestCompleted{Error: ctx.Err(), DialDataRequired: true, DialedAddr: dialAddr}
+ case <-t.C:
+ }
+ }
+
+ dialStatus := as.dialBack(ctx, s.Conn().RemotePeer(), dialAddr, nonce)
+ msg = pb.Message{
+ Msg: &pb.Message_DialResponse{
+ DialResponse: &pb.DialResponse{
+ Status: pb.DialResponse_OK,
+ DialStatus: dialStatus,
+ AddrIdx: uint32(addrIdx),
+ },
+ },
+ }
+ if err := w.WriteMsg(&msg); err != nil {
+ s.Reset()
+ log.Debug("failed to write response",
+ "remote_peer", p,
+ "error", err)
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_OK,
+ DialStatus: dialStatus,
+ Error: fmt.Errorf("write failed: %w", err),
+ DialDataRequired: isDialDataRequired,
+ DialedAddr: dialAddr,
+ }
+ }
+ return EventDialRequestCompleted{
+ ResponseStatus: pb.DialResponse_OK,
+ DialStatus: dialStatus,
+ Error: nil,
+ DialDataRequired: isDialDataRequired,
+ DialedAddr: dialAddr,
+ }
+}
+
+// getDialData gets data from the client for dialing the address
+func getDialData(w pbio.Writer, s network.Stream, msg *pb.Message, addrIdx int) error {
+ numBytes := minHandshakeSizeBytes + rand.Intn(maxHandshakeSizeBytes-minHandshakeSizeBytes)
+ *msg = pb.Message{
+ Msg: &pb.Message_DialDataRequest{
+ DialDataRequest: &pb.DialDataRequest{
+ AddrIdx: uint32(addrIdx),
+ NumBytes: uint64(numBytes),
+ },
+ },
+ }
+ if err := w.WriteMsg(msg); err != nil {
+ return fmt.Errorf("dial data write: %w", err)
+ }
+ // pbio.Reader that we used so far on this stream is buffered. But at this point
+ // there is nothing unread on the stream. So it is safe to use the raw stream to
+ // read, reducing allocations.
+ return readDialData(numBytes, s)
+}
+
+func readDialData(numBytes int, r io.Reader) error {
+ mr := &msgReader{R: r, Buf: pool.Get(maxMsgSize)}
+ defer pool.Put(mr.Buf)
+ for remain := numBytes; remain > 0; {
+ msg, err := mr.ReadMsg()
+ if err != nil {
+ return fmt.Errorf("dial data read: %w", err)
+ }
+ // protobuf format is:
+ // (oneof dialDataResponse:)(dial data:)
+ bytesLen := len(msg)
+ bytesLen -= 2 // fieldTag + varint first byte
+ if bytesLen > 127 {
+ bytesLen -= 1 // varint second byte
+ }
+ bytesLen -= 2 // second fieldTag + varint first byte
+ if bytesLen > 127 {
+ bytesLen -= 1 // varint second byte
+ }
+ if bytesLen > 0 {
+ remain -= bytesLen
+ }
+ // Check if the peer is not sending too little data forcing us to just do a lot of compute
+ if bytesLen < 100 && remain > 0 {
+ return fmt.Errorf("dial data msg too small: %d", bytesLen)
+ }
+ }
+ return nil
+}
+
+func (as *server) dialBack(ctx context.Context, p peer.ID, addr ma.Multiaddr, nonce uint64) pb.DialStatus {
+ ctx, cancel := context.WithTimeout(ctx, dialBackDialTimeout)
+ ctx = network.WithForceDirectDial(ctx, "autonatv2")
+ as.dialerHost.Peerstore().AddAddr(p, addr, peerstore.TempAddrTTL)
+ defer func() {
+ cancel()
+ as.dialerHost.Network().ClosePeer(p)
+ as.dialerHost.Peerstore().ClearAddrs(p)
+ as.dialerHost.Peerstore().RemovePeer(p)
+ }()
+
+ err := as.dialerHost.Connect(ctx, peer.AddrInfo{ID: p})
+ if err != nil {
+ return pb.DialStatus_E_DIAL_ERROR
+ }
+
+ s, err := as.dialerHost.NewStream(ctx, p, DialBackProtocol)
+ if err != nil {
+ return pb.DialStatus_E_DIAL_BACK_ERROR
+ }
+
+ defer s.Close()
+ s.SetDeadline(as.now().Add(dialBackStreamTimeout))
+
+ w := pbio.NewDelimitedWriter(s)
+ if err := w.WriteMsg(&pb.DialBack{Nonce: nonce}); err != nil {
+ s.Reset()
+ return pb.DialStatus_E_DIAL_BACK_ERROR
+ }
+
+ // Since the underlying connection is on a separate dialer, it'll be closed after this
+ // function returns. Connection close will drop all the queued writes. To ensure message
+ // delivery, do a CloseWrite and read a byte from the stream. The peer actually sends a
+ // response of type DialBackResponse but we only care about the fact that the DialBack
+ // message has reached the peer. So we ignore that message on the read side.
+ s.CloseWrite()
+ s.SetDeadline(as.now().Add(5 * time.Second)) // 5 is a magic number
+ b := make([]byte, 1) // Read 1 byte here because 0 len reads are free to return (0, nil) immediately
+ s.Read(b)
+
+ return pb.DialStatus_OK
+}
+
+// rateLimiter implements a sliding window rate limit of requests per minute. It allows 1 concurrent request
+// per peer. It rate limits requests globally, at a peer level and depending on whether it requires dial data.
+type rateLimiter struct {
+ // PerPeerRPM is the rate limit per peer
+ PerPeerRPM int
+ // RPM is the global rate limit
+ RPM int
+ // DialDataRPM is the rate limit for requests that require dial data
+ DialDataRPM int
+ // MaxConcurrentRequestsPerPeer is the maximum number of concurrent requests per peer
+ MaxConcurrentRequestsPerPeer int
+
+ mu sync.Mutex
+ closed bool
+ reqs []entry
+ peerReqs map[peer.ID][]time.Time
+ dialDataReqs []time.Time
+ // inProgressReqs tracks in progress requests. This is used to limit multiple
+ // concurrent requests by the same peer.
+ inProgressReqs map[peer.ID]int
+
+ now func() time.Time // for tests
+}
+
+type entry struct {
+ PeerID peer.ID
+ Time time.Time
+}
+
+func (r *rateLimiter) init() {
+ if r.peerReqs == nil {
+ r.peerReqs = make(map[peer.ID][]time.Time)
+ r.inProgressReqs = make(map[peer.ID]int)
+ }
+}
+
+func (r *rateLimiter) Accept(p peer.ID) bool {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.closed {
+ return false
+ }
+ r.init()
+ nw := r.now()
+ r.cleanup(nw)
+
+ if r.inProgressReqs[p] >= r.MaxConcurrentRequestsPerPeer {
+ return false
+ }
+ if len(r.reqs) >= r.RPM || len(r.peerReqs[p]) >= r.PerPeerRPM {
+ return false
+ }
+
+ r.inProgressReqs[p]++
+ r.reqs = append(r.reqs, entry{PeerID: p, Time: nw})
+ r.peerReqs[p] = append(r.peerReqs[p], nw)
+ return true
+}
+
+func (r *rateLimiter) AcceptDialDataRequest() bool {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.closed {
+ return false
+ }
+ r.init()
+ nw := r.now()
+ r.cleanup(nw)
+ if len(r.dialDataReqs) >= r.DialDataRPM {
+ return false
+ }
+ r.dialDataReqs = append(r.dialDataReqs, nw)
+ return true
+}
+
+// cleanup removes stale requests.
+//
+// This is fast enough in rate limited cases and the state is small enough to
+// clean up quickly when blocking requests.
+func (r *rateLimiter) cleanup(now time.Time) {
+ idx := len(r.reqs)
+ for i, e := range r.reqs {
+ if now.Sub(e.Time) >= time.Minute {
+ pi := len(r.peerReqs[e.PeerID])
+ for j, t := range r.peerReqs[e.PeerID] {
+ if now.Sub(t) < time.Minute {
+ pi = j
+ break
+ }
+ }
+ r.peerReqs[e.PeerID] = r.peerReqs[e.PeerID][pi:]
+ if len(r.peerReqs[e.PeerID]) == 0 {
+ delete(r.peerReqs, e.PeerID)
+ }
+ } else {
+ idx = i
+ break
+ }
+ }
+ r.reqs = r.reqs[idx:]
+
+ idx = len(r.dialDataReqs)
+ for i, t := range r.dialDataReqs {
+ if now.Sub(t) < time.Minute {
+ idx = i
+ break
+ }
+ }
+ r.dialDataReqs = r.dialDataReqs[idx:]
+}
+
+func (r *rateLimiter) CompleteRequest(p peer.ID) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.inProgressReqs[p]--
+ if r.inProgressReqs[p] <= 0 {
+ delete(r.inProgressReqs, p)
+ if r.inProgressReqs[p] < 0 {
+ log.Error("BUG: negative in progress requests",
+ "remote_peer", p)
+ }
+ }
+}
+
+func (r *rateLimiter) Close() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.closed = true
+ r.peerReqs = nil
+ r.inProgressReqs = nil
+ r.dialDataReqs = nil
+}
+
+// amplificationAttackPrevention is a dialDataRequestPolicy which requests data when the peer's observed
+// IP address is different from the dial back IP address
+func amplificationAttackPrevention(observedAddr, dialAddr ma.Multiaddr) bool {
+ observedIP, err := manet.ToIP(observedAddr)
+ if err != nil {
+ return true
+ }
+ dialIP, err := manet.ToIP(dialAddr) // can be dns addr
+ if err != nil {
+ return true
+ }
+ return !observedIP.Equal(dialIP)
+}
diff --git a/p2p/protocol/autonatv2/server_test.go b/p2p/protocol/autonatv2/server_test.go
new file mode 100644
index 0000000000..fb50b3a556
--- /dev/null
+++ b/p2p/protocol/autonatv2/server_test.go
@@ -0,0 +1,647 @@
+package autonatv2
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/test"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+ "github.com/libp2p/go-msgio/pbio"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-varint"
+ "github.com/stretchr/testify/require"
+)
+
+func newTestRequests(addrs []ma.Multiaddr, sendDialData bool) (reqs []Request) {
+ reqs = make([]Request, len(addrs))
+ for i := 0; i < len(addrs); i++ {
+ reqs[i] = Request{Addr: addrs[i], SendDialData: sendDialData}
+ }
+ return
+}
+
+func TestServerInvalidAddrsRejected(t *testing.T) {
+ c := newAutoNAT(t, nil, allowPrivateAddrs, withAmplificationAttackPreventionDialWait(0))
+ defer c.Close()
+ defer c.host.Close()
+
+ t.Run("no transport", func(t *testing.T) {
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableQUIC, swarmt.OptDisableTCP))
+ an := newAutoNAT(t, dialer, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ idAndWait(t, c, an)
+
+ res, err := c.GetReachability(context.Background(), newTestRequests(c.host.Addrs(), true))
+ require.NoError(t, err)
+ require.Equal(t, Result{AllAddrsRefused: true}, res)
+ })
+
+ t.Run("black holed addr", func(t *testing.T) {
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(
+ t, swarmt.WithSwarmOpts(swarm.WithReadOnlyBlackHoleDetector())))
+ an := newAutoNAT(t, dialer)
+ defer an.Close()
+ defer an.host.Close()
+
+ idAndWait(t, c, an)
+
+ res, err := c.GetReachability(context.Background(),
+ []Request{{
+ Addr: ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1"),
+ SendDialData: true,
+ }})
+ require.NoError(t, err)
+ require.Equal(t, Result{AllAddrsRefused: true}, res)
+ })
+
+ t.Run("private addrs", func(t *testing.T) {
+ an := newAutoNAT(t, nil)
+ defer an.Close()
+ defer an.host.Close()
+
+ idAndWait(t, c, an)
+
+ res, err := c.GetReachability(context.Background(), newTestRequests(c.host.Addrs(), true))
+ require.NoError(t, err)
+ require.Equal(t, Result{AllAddrsRefused: true}, res)
+ })
+
+ t.Run("relay addrs", func(t *testing.T) {
+ an := newAutoNAT(t, nil)
+ defer an.Close()
+ defer an.host.Close()
+
+ idAndWait(t, c, an)
+
+ res, err := c.GetReachability(context.Background(), newTestRequests(
+ []ma.Multiaddr{ma.StringCast(fmt.Sprintf("/ip4/1.2.3.4/tcp/1/p2p/%s/p2p-circuit/p2p/%s", c.host.ID(), c.srv.dialerHost.ID()))}, true))
+ require.NoError(t, err)
+ require.Equal(t, Result{AllAddrsRefused: true}, res)
+ })
+
+ t.Run("no addr", func(t *testing.T) {
+ _, err := c.GetReachability(context.Background(), nil)
+ require.Error(t, err)
+ })
+
+ t.Run("too many address", func(t *testing.T) {
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP))
+ an := newAutoNAT(t, dialer, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ var addrs []ma.Multiaddr
+ for i := 0; i < 100; i++ {
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)))
+ }
+ addrs = append(addrs, c.host.Addrs()...)
+ // The dial should still fail because we have too many addresses that the server cannot dial
+ idAndWait(t, c, an)
+
+ res, err := c.GetReachability(context.Background(), newTestRequests(addrs, true))
+ require.NoError(t, err)
+ require.Equal(t, Result{AllAddrsRefused: true}, res)
+ })
+
+ t.Run("msg too large", func(t *testing.T) {
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP))
+ an := newAutoNAT(t, dialer, allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ var addrs []ma.Multiaddr
+ for i := 0; i < 10000; i++ {
+ addrs = append(addrs, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", 2000+i)))
+ }
+ addrs = append(addrs, c.host.Addrs()...)
+ // The dial should still fail because we have too many addresses that the server cannot dial
+ idAndWait(t, c, an)
+
+ res, err := c.GetReachability(context.Background(), newTestRequests(addrs, true))
+ require.ErrorIs(t, err, network.ErrReset)
+ require.Equal(t, Result{}, res)
+ })
+}
+
+func TestServerDataRequest(t *testing.T) {
+ // server will skip all tcp addresses
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP))
+ // ask for dial data for quic address
+ an := newAutoNAT(t, dialer, allowPrivateAddrs, withDataRequestPolicy(
+ func(_, dialAddr ma.Multiaddr) bool {
+ if _, err := dialAddr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ return true
+ }
+ return false
+ }),
+ WithServerRateLimit(10, 10, 10, 2),
+ withAmplificationAttackPreventionDialWait(0),
+ )
+ defer an.Close()
+ defer an.host.Close()
+
+ c := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer c.Close()
+ defer c.host.Close()
+
+ idAndWait(t, c, an)
+
+ var quicAddr, tcpAddr ma.Multiaddr
+ for _, a := range c.host.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ quicAddr = a
+ } else if _, err := a.ValueForProtocol(ma.P_TCP); err == nil {
+ tcpAddr = a
+ }
+ }
+
+ _, err := c.GetReachability(context.Background(), []Request{{Addr: tcpAddr, SendDialData: true}, {Addr: quicAddr}})
+ require.Error(t, err)
+
+ res, err := c.GetReachability(context.Background(), []Request{{Addr: quicAddr, SendDialData: true}, {Addr: tcpAddr}})
+ require.NoError(t, err)
+
+ require.Equal(t, Result{
+ Addr: quicAddr,
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ }, res)
+
+ // Small messages should be rejected for dial data
+ c.cli.dialData = c.cli.dialData[:10]
+ _, err = c.GetReachability(context.Background(), []Request{{Addr: quicAddr, SendDialData: true}, {Addr: tcpAddr}})
+ require.Error(t, err)
+}
+
+func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) {
+ const concurrentRequests = 5
+
+ stallChan := make(chan struct{})
+ an := newAutoNAT(t, nil, allowPrivateAddrs, withDataRequestPolicy(
+ // stall all allowed requests
+ func(_, _ ma.Multiaddr) bool {
+ <-stallChan
+ return true
+ }),
+ WithServerRateLimit(10, 10, 10, concurrentRequests),
+ withAmplificationAttackPreventionDialWait(0),
+ )
+ defer an.Close()
+ defer an.host.Close()
+
+ // server will skip all tcp addresses
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP))
+ c := newAutoNAT(t, dialer, allowPrivateAddrs)
+ defer c.Close()
+ defer c.host.Close()
+
+ idAndWait(t, c, an)
+
+ errChan := make(chan error)
+ const n = 10
+ // num concurrentRequests will stall and n will fail
+ for i := 0; i < concurrentRequests+n; i++ {
+ go func() {
+ _, err := c.GetReachability(context.Background(), []Request{{Addr: c.host.Addrs()[0], SendDialData: false}})
+ errChan <- err
+ }()
+ }
+
+ // check N failures
+ for i := 0; i < n; i++ {
+ select {
+ case err := <-errChan:
+ require.Error(t, err)
+ if !strings.Contains(err.Error(), "stream reset") && !strings.Contains(err.Error(), "E_REQUEST_REJECTED") {
+ t.Fatalf("invalid error: %s expected: stream reset or E_REQUEST_REJECTED", err)
+ }
+ case <-time.After(10 * time.Second):
+ t.Fatalf("expected %d errors: got: %d", n, i)
+ }
+ }
+
+ close(stallChan) // complete stalled requests
+ // check concurrentRequests failures, as we won't send dial data
+ for i := 0; i < concurrentRequests; i++ {
+ select {
+ case err := <-errChan:
+ require.Error(t, err)
+ case <-time.After(5 * time.Second):
+ t.Fatalf("expected %d errors: got: %d", concurrentRequests, i)
+ }
+ }
+ select {
+ case err := <-errChan:
+ t.Fatalf("expected no more errors: got: %v", err)
+ default:
+ }
+}
+
+func TestServerDataRequestJitter(t *testing.T) {
+ // server will skip all tcp addresses
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP))
+ // ask for dial data for quic address
+ an := newAutoNAT(t, dialer, allowPrivateAddrs, withDataRequestPolicy(
+ func(_, dialAddr ma.Multiaddr) bool {
+ if _, err := dialAddr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ return true
+ }
+ return false
+ }),
+ WithServerRateLimit(10, 10, 10, 2),
+ withAmplificationAttackPreventionDialWait(5*time.Second),
+ )
+ defer an.Close()
+ defer an.host.Close()
+
+ c := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer c.Close()
+ defer c.host.Close()
+
+ idAndWait(t, c, an)
+
+ var quicAddr, tcpAddr ma.Multiaddr
+ for _, a := range c.host.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ quicAddr = a
+ } else if _, err := a.ValueForProtocol(ma.P_TCP); err == nil {
+ tcpAddr = a
+ }
+ }
+
+ for i := 0; i < 10; i++ {
+ st := time.Now()
+ res, err := c.GetReachability(context.Background(), []Request{{Addr: quicAddr, SendDialData: true}, {Addr: tcpAddr}})
+ took := time.Since(st)
+ require.NoError(t, err)
+
+ require.Equal(t, Result{
+ Addr: quicAddr,
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ }, res)
+ if took > 500*time.Millisecond {
+ return
+ }
+ }
+ t.Fatalf("expected server to delay at least 1 dial")
+}
+
+func TestServerDial(t *testing.T) {
+ an := newAutoNAT(t, nil, WithServerRateLimit(10, 10, 10, 2), allowPrivateAddrs)
+ defer an.Close()
+ defer an.host.Close()
+
+ c := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer c.Close()
+ defer c.host.Close()
+
+ idAndWait(t, c, an)
+
+ unreachableAddr := ma.StringCast("/ip4/1.2.3.4/tcp/2")
+ hostAddrs := c.host.Addrs()
+
+ t.Run("unreachable addr", func(t *testing.T) {
+ res, err := c.GetReachability(context.Background(),
+ append([]Request{{Addr: unreachableAddr, SendDialData: true}}, newTestRequests(hostAddrs, false)...))
+ require.NoError(t, err)
+ require.Equal(t, Result{
+ Addr: unreachableAddr,
+ Idx: 0,
+ Reachability: network.ReachabilityPrivate,
+ }, res)
+ })
+
+ t.Run("reachable addr", func(t *testing.T) {
+ res, err := c.GetReachability(context.Background(), newTestRequests(c.host.Addrs(), false))
+ require.NoError(t, err)
+ require.Equal(t, Result{
+ Addr: hostAddrs[0],
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ }, res)
+ for _, addr := range c.host.Addrs() {
+ res, err := c.GetReachability(context.Background(), newTestRequests([]ma.Multiaddr{addr}, false))
+ require.NoError(t, err)
+ require.Equal(t, Result{
+ Addr: addr,
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ }, res)
+ }
+ })
+
+ t.Run("dialback error", func(t *testing.T) {
+ c.host.RemoveStreamHandler(DialBackProtocol)
+ res, err := c.GetReachability(context.Background(), newTestRequests(c.host.Addrs(), false))
+ require.ErrorContains(t, err, "dial-back stream error")
+ require.Equal(t, Result{}, res)
+ })
+}
+
+func TestRateLimiter(t *testing.T) {
+ cl := test.NewMockClock()
+ r := rateLimiter{RPM: 3, PerPeerRPM: 2, DialDataRPM: 1, now: cl.Now, MaxConcurrentRequestsPerPeer: 1}
+
+ require.True(t, r.Accept("peer1"))
+
+ cl.AdvanceBy(10 * time.Second)
+ require.False(t, r.Accept("peer1")) // first request is still active
+ r.CompleteRequest("peer1")
+
+ require.True(t, r.Accept("peer1"))
+ r.CompleteRequest("peer1")
+
+ cl.AdvanceBy(10 * time.Second)
+ require.False(t, r.Accept("peer1"))
+
+ cl.AdvanceBy(10 * time.Second)
+ require.True(t, r.Accept("peer2"))
+ r.CompleteRequest("peer2")
+
+ cl.AdvanceBy(10 * time.Second)
+ require.False(t, r.Accept("peer3"))
+
+ cl.AdvanceBy(21 * time.Second) // first request expired
+ require.True(t, r.Accept("peer1"))
+ r.CompleteRequest("peer1")
+
+ cl.AdvanceBy(10 * time.Second)
+ require.True(t, r.Accept("peer3"))
+ r.CompleteRequest("peer3")
+
+ cl.AdvanceBy(50 * time.Second)
+ require.True(t, r.Accept("peer3"))
+ r.CompleteRequest("peer3")
+
+ cl.AdvanceBy(1 * time.Second)
+ require.False(t, r.Accept("peer3"))
+
+ cl.AdvanceBy(10 * time.Second)
+ require.True(t, r.Accept("peer3"))
+}
+
+func TestRateLimiterConcurrentRequests(t *testing.T) {
+ const N = 5
+ const Peers = 5
+ for concurrentRequests := 1; concurrentRequests <= N; concurrentRequests++ {
+ cl := test.NewMockClock()
+ r := rateLimiter{RPM: 10 * Peers * N, PerPeerRPM: 10 * Peers * N, DialDataRPM: 10 * Peers * N, now: cl.Now, MaxConcurrentRequestsPerPeer: concurrentRequests}
+ for p := 0; p < Peers; p++ {
+ for i := 0; i < concurrentRequests; i++ {
+ require.True(t, r.Accept(peer.ID(fmt.Sprintf("peer-%d", p))))
+ }
+ require.False(t, r.Accept(peer.ID(fmt.Sprintf("peer-%d", p))))
+ // Now complete the requests
+ for i := 0; i < concurrentRequests; i++ {
+ r.CompleteRequest(peer.ID(fmt.Sprintf("peer-%d", p)))
+ }
+ // Now we should be able to accept new requests
+ for i := 0; i < concurrentRequests; i++ {
+ require.True(t, r.Accept(peer.ID(fmt.Sprintf("peer-%d", p))))
+ }
+ require.False(t, r.Accept(peer.ID(fmt.Sprintf("peer-%d", p))))
+ }
+ }
+}
+
+func TestRateLimiterStress(t *testing.T) {
+ cl := test.NewMockClock()
+ for i := 0; i < 10; i++ {
+ r := rateLimiter{RPM: 20 + i, PerPeerRPM: 10 + i, DialDataRPM: i, MaxConcurrentRequestsPerPeer: 1, now: cl.Now}
+
+ peers := make([]peer.ID, 10+i)
+ for i := 0; i < len(peers); i++ {
+ peers[i] = peer.ID(fmt.Sprintf("peer-%d", i))
+ }
+ peerSuccesses := make([]atomic.Int64, len(peers))
+ var success, dialDataSuccesses atomic.Int64
+ var wg sync.WaitGroup
+ for k := 0; k < 5; k++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 2*60; i++ {
+ for j, p := range peers {
+ if r.Accept(p) {
+ success.Add(1)
+ peerSuccesses[j].Add(1)
+ }
+ if r.AcceptDialDataRequest() {
+ dialDataSuccesses.Add(1)
+ }
+ r.CompleteRequest(p)
+ }
+ cl.AdvanceBy(time.Second)
+ }
+ }()
+ }
+ wg.Wait()
+ if int(success.Load()) > 10*r.RPM || int(success.Load()) < 9*r.RPM {
+ t.Fatalf("invalid successes, %d, expected %d-%d", success.Load(), 9*r.RPM, 10*r.RPM)
+ }
+ if int(dialDataSuccesses.Load()) > 10*r.DialDataRPM || int(dialDataSuccesses.Load()) < 9*r.DialDataRPM {
+ t.Fatalf("invalid dial data successes, %d expected %d-%d", dialDataSuccesses.Load(), 9*r.DialDataRPM, 10*r.DialDataRPM)
+ }
+ for i := range peerSuccesses {
+ // We cannot check the lower bound because some peers would be hitting the global rpm limit
+ if int(peerSuccesses[i].Load()) > 10*r.PerPeerRPM {
+ t.Fatalf("too many per peer successes, PerPeerRPM=%d", r.PerPeerRPM)
+ }
+ }
+ cl.AdvanceBy(1 * time.Minute)
+ require.True(t, r.Accept(peers[0]))
+ // Assert lengths to check that we are cleaning up correctly
+ require.Equal(t, len(r.reqs), 1)
+ require.Equal(t, len(r.peerReqs), 1)
+ require.Equal(t, len(r.peerReqs[peers[0]]), 1)
+ require.Equal(t, len(r.dialDataReqs), 0)
+ require.Equal(t, len(r.inProgressReqs), 1)
+ }
+}
+
+func TestReadDialData(t *testing.T) {
+ for N := 30_000; N < 30_010; N++ {
+ for msgSize := 100; msgSize < 256; msgSize++ {
+ r, w := io.Pipe()
+ msg := &pb.Message{}
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ mw := pbio.NewDelimitedWriter(w)
+ err := sendDialData(make([]byte, msgSize), N, mw, msg)
+ if err != nil {
+ t.Error(err)
+ }
+ mw.Close()
+ }()
+ err := readDialData(N, r)
+ require.NoError(t, err)
+ wg.Wait()
+ }
+
+ for msgSize := 1000; msgSize < 1256; msgSize++ {
+ r, w := io.Pipe()
+ msg := &pb.Message{}
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ mw := pbio.NewDelimitedWriter(w)
+ err := sendDialData(make([]byte, msgSize), N, mw, msg)
+ if err != nil {
+ t.Error(err)
+ }
+ mw.Close()
+ }()
+ err := readDialData(N, r)
+ require.NoError(t, err)
+ wg.Wait()
+ }
+ }
+}
+
+func TestServerDataRequestWithAmplificationAttackPrevention(t *testing.T) {
+ // server will skip all tcp addresses
+ dialer := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableTCP))
+ // ask for dial data for quic address
+ an := newAutoNAT(t, dialer, allowPrivateAddrs,
+ WithServerRateLimit(10, 10, 10, 2),
+ withAmplificationAttackPreventionDialWait(0),
+ )
+ defer an.Close()
+ defer an.host.Close()
+
+ c := newAutoNAT(t, nil, allowPrivateAddrs)
+ defer c.Close()
+ defer c.host.Close()
+
+ idAndWait(t, c, an)
+
+ err := c.host.Network().Listen(ma.StringCast("/ip6/::1/udp/0/quic-v1"))
+ if err != nil {
+ // machine doesn't have ipv6
+ t.Skip("skipping test because machine doesn't have ipv6")
+ }
+
+ var quicv4Addr ma.Multiaddr
+ var quicv6Addr ma.Multiaddr
+ for _, a := range c.host.Addrs() {
+ if _, err := a.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ if _, err := a.ValueForProtocol(ma.P_IP4); err == nil {
+ quicv4Addr = a
+ } else {
+ quicv6Addr = a
+ }
+ }
+ }
+ res, err := c.GetReachability(context.Background(), []Request{{Addr: quicv4Addr, SendDialData: false}})
+ require.NoError(t, err)
+ require.Equal(t, Result{
+ Addr: quicv4Addr,
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ }, res)
+
+ // ipv6 address should require dial data
+ _, err = c.GetReachability(context.Background(), []Request{{Addr: quicv6Addr, SendDialData: false}})
+ require.Error(t, err)
+ require.ErrorContains(t, err, "invalid dial data request")
+ require.ErrorContains(t, err, "low priority addr")
+
+ // ipv6 address should work fine with dial data
+ res, err = c.GetReachability(context.Background(), []Request{{Addr: quicv6Addr, SendDialData: true}})
+ require.NoError(t, err)
+ require.Equal(t, Result{
+ Addr: quicv6Addr,
+ Idx: 0,
+ Reachability: network.ReachabilityPublic,
+ }, res)
+}
+
+func TestDefaultAmplificationAttackPrevention(t *testing.T) {
+ q1 := ma.StringCast("/ip4/1.2.3.4/udp/1234/quic-v1")
+ q2 := ma.StringCast("/ip4/1.2.3.4/udp/1235/quic-v1")
+ t1 := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
+
+ require.False(t, amplificationAttackPrevention(q1, q1))
+ require.False(t, amplificationAttackPrevention(q1, q2))
+ require.False(t, amplificationAttackPrevention(q1, t1))
+
+ t2 := ma.StringCast("/ip4/1.1.1.1/tcp/1235") // different IP
+ require.True(t, amplificationAttackPrevention(q2, t2))
+
+ // always ask dial data for dns addrs
+ d1 := ma.StringCast("/dns/localhost/udp/1/quic-v1")
+ d2 := ma.StringCast("/dnsaddr/libp2p.io/tcp/1")
+ require.True(t, amplificationAttackPrevention(d1, t1))
+ require.True(t, amplificationAttackPrevention(d2, t1))
+
+}
+
+func FuzzServerDialRequest(f *testing.F) {
+ a := newAutoNAT(f, nil, allowPrivateAddrs, WithServerRateLimit(math.MaxInt32, math.MaxInt32, math.MaxInt32, 2))
+ c := newAutoNAT(f, nil)
+ idAndWait(f, c, a)
+ // reduce the streamTimeout before running this. TODO: fix this
+ f.Fuzz(func(t *testing.T, data []byte) {
+ s, err := c.host.NewStream(context.Background(), a.host.ID(), DialProtocol)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s.SetDeadline(time.Now().Add(10 * time.Second))
+ s.Write(data)
+ buf := make([]byte, 64)
+ s.Read(buf) // We only care that server didn't panic
+ s, err = c.host.NewStream(context.Background(), a.host.ID(), DialProtocol)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := varint.PutUvarint(buf, uint64(len(data)))
+ s.SetDeadline(time.Now().Add(10 * time.Second))
+ s.Write(buf[:n])
+ s.Write(data)
+ s.Read(buf) // We only care that server didn't panic
+ s.Reset()
+ })
+}
+
+func FuzzReadDialData(f *testing.F) {
+ f.Fuzz(func(_ *testing.T, numBytes int, data []byte) {
+ readDialData(numBytes, bytes.NewReader(data))
+ })
+}
+
+func BenchmarkDialData(b *testing.B) {
+ b.ReportAllocs()
+ const N = 100_000
+ streamBuffer := make([]byte, 2*N)
+ buf := bytes.NewBuffer(streamBuffer[:0])
+ dialData := make([]byte, 4000)
+ msg := &pb.Message{}
+ w := pbio.NewDelimitedWriter(buf)
+ err := sendDialData(dialData, N, w, msg)
+ require.NoError(b, err)
+ dialDataBuf := buf.Bytes()
+ for i := 0; i < b.N; i++ {
+ err = readDialData(N, bytes.NewReader(dialDataBuf))
+ require.NoError(b, err)
+ }
+}
diff --git a/p2p/protocol/circuitv2/client/client.go b/p2p/protocol/circuitv2/client/client.go
new file mode 100644
index 0000000000..2ac68576d2
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/client.go
@@ -0,0 +1,76 @@
+package client
+
+import (
+ "context"
+ "io"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+var log = logging.Logger("p2p-circuit")
+
+// Client implements the client-side of the p2p-circuit/v2 protocol:
+// - it implements dialing through v2 relays
+// - it listens for incoming connections through v2 relays.
+//
+// For backwards compatibility with v1 relays and older nodes, the client will
+// also accept relay connections through v1 relays and fallback dial peers using p2p-circuit/v1.
+// This allows us to use the v2 code as drop in replacement for v1 in a host without breaking
+// existing code and interoperability with older nodes.
+type Client struct {
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ host host.Host
+ upgrader transport.Upgrader
+
+ incoming chan accept
+
+ mx sync.Mutex
+ activeDials map[peer.ID]*completion
+ hopCount map[peer.ID]int
+}
+
+var _ io.Closer = &Client{}
+var _ transport.Transport = &Client{}
+
+type accept struct {
+ conn *Conn
+ writeResponse func() error
+}
+
+type completion struct {
+ ch chan struct{}
+ relay peer.ID
+ err error
+}
+
+// New constructs a new p2p-circuit/v2 client, attached to the given host and using the given
+// upgrader to perform connection upgrades.
+func New(h host.Host, upgrader transport.Upgrader) (*Client, error) {
+ cl := &Client{
+ host: h,
+ upgrader: upgrader,
+ incoming: make(chan accept),
+ activeDials: make(map[peer.ID]*completion),
+ hopCount: make(map[peer.ID]int),
+ }
+ cl.ctx, cl.ctxCancel = context.WithCancel(context.Background())
+ return cl, nil
+}
+
+// Start registers the circuit (client) protocol stream handlers
+func (c *Client) Start() {
+ c.host.SetStreamHandler(proto.ProtoIDv2Stop, c.handleStreamV2)
+}
+
+func (c *Client) Close() error {
+ c.ctxCancel()
+ c.host.RemoveStreamHandler(proto.ProtoIDv2Stop)
+ return nil
+}
diff --git a/p2p/protocol/circuitv2/client/conn.go b/p2p/protocol/circuitv2/client/conn.go
new file mode 100644
index 0000000000..6a90fa431f
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/conn.go
@@ -0,0 +1,164 @@
+package client
+
+import (
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// HopTagWeight is the connection manager weight for connections carrying relay hop streams
+var HopTagWeight = 5
+
+type statLimitDuration struct{}
+type statLimitData struct{}
+
+var (
+ StatLimitDuration = statLimitDuration{}
+ StatLimitData = statLimitData{}
+)
+
+type Conn struct {
+ stream network.Stream
+ remote peer.AddrInfo
+ stat network.ConnStats
+
+ client *Client
+}
+
+type NetAddr struct {
+ Relay string
+ Remote string
+}
+
+var _ net.Addr = (*NetAddr)(nil)
+
+func (n *NetAddr) Network() string {
+ return "libp2p-circuit-relay"
+}
+
+func (n *NetAddr) String() string {
+ return fmt.Sprintf("relay[%s-%s]", n.Remote, n.Relay)
+}
+
+// Conn interface
+var _ manet.Conn = (*Conn)(nil)
+
+func (c *Conn) Close() error {
+ c.untagHop()
+ return c.stream.Reset()
+}
+
+func (c *Conn) Read(buf []byte) (int, error) {
+ return c.stream.Read(buf)
+}
+
+func (c *Conn) Write(buf []byte) (int, error) {
+ return c.stream.Write(buf)
+}
+
+func (c *Conn) SetDeadline(t time.Time) error {
+ return c.stream.SetDeadline(t)
+}
+
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.stream.SetReadDeadline(t)
+}
+
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ return c.stream.SetWriteDeadline(t)
+}
+
+// TODO: is it okay to cast c.Conn().RemotePeer() into a multiaddr? might be "user input"
+func (c *Conn) RemoteMultiaddr() ma.Multiaddr {
+ // TODO: We should be able to do this directly without converting to/from a string.
+ relayAddr, err := ma.NewComponent(
+ ma.ProtocolWithCode(ma.P_P2P).Name,
+ c.stream.Conn().RemotePeer().String(),
+ )
+ if err != nil {
+ log.Error("failed to create relay address:", "err", err)
+ return ma.Join(c.stream.Conn().RemoteMultiaddr(), circuitAddr)
+ }
+ return ma.Join(c.stream.Conn().RemoteMultiaddr(), relayAddr.Multiaddr(), circuitAddr)
+}
+
+func (c *Conn) LocalMultiaddr() ma.Multiaddr {
+ return c.stream.Conn().LocalMultiaddr()
+}
+
+func (c *Conn) LocalAddr() net.Addr {
+ na, err := manet.ToNetAddr(c.stream.Conn().LocalMultiaddr())
+ if err != nil {
+ log.Error("failed to convert local multiaddr to net addr:", "err", err)
+ return nil
+ }
+ return na
+}
+
+func (c *Conn) RemoteAddr() net.Addr {
+ return &NetAddr{
+ Relay: c.stream.Conn().RemotePeer().String(),
+ Remote: c.remote.ID.String(),
+ }
+}
+
+// ConnStat interface
+var _ network.ConnStat = (*Conn)(nil)
+
+func (c *Conn) Stat() network.ConnStats {
+ return c.stat
+}
+
+// tagHop tags the underlying relay connection so that it can be (somewhat) protected from the
+// connection manager as it is an important connection that proxies other connections.
+// This is handled here so that the user code doesnt need to bother with this and avoid
+// clown shoes situations where a high value peer connection is behind a relayed connection and it is
+// implicitly because the connection manager closed the underlying relay connection.
+func (c *Conn) tagHop() {
+ c.client.mx.Lock()
+ defer c.client.mx.Unlock()
+
+ p := c.stream.Conn().RemotePeer()
+ c.client.hopCount[p]++
+ if c.client.hopCount[p] == 1 {
+ c.client.host.ConnManager().TagPeer(p, "relay-hop-stream", HopTagWeight)
+ }
+}
+
+// untagHop removes the relay-hop-stream tag if necessary; it is invoked when a relayed connection
+// is closed.
+func (c *Conn) untagHop() {
+ c.client.mx.Lock()
+ defer c.client.mx.Unlock()
+
+ p := c.stream.Conn().RemotePeer()
+ c.client.hopCount[p]--
+ if c.client.hopCount[p] == 0 {
+ c.client.host.ConnManager().UntagPeer(p, "relay-hop-stream")
+ delete(c.client.hopCount, p)
+ }
+}
+
+type capableConnWithStat interface {
+ tpt.CapableConn
+ network.ConnStat
+}
+
+type capableConn struct {
+ capableConnWithStat
+}
+
+var transportName = ma.ProtocolWithCode(ma.P_CIRCUIT).Name
+
+func (c capableConn) ConnState() network.ConnectionState {
+ return network.ConnectionState{
+ Transport: transportName,
+ }
+}
diff --git a/p2p/protocol/circuitv2/client/dial.go b/p2p/protocol/circuitv2/client/dial.go
new file mode 100644
index 0000000000..5175803e79
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/dial.go
@@ -0,0 +1,191 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+const maxMessageSize = 4096
+
+var DialTimeout = time.Minute
+var DialRelayTimeout = 5 * time.Second
+
+// relay protocol errors; used for signalling deduplication
+type relayError struct {
+ err string
+}
+
+func (e relayError) Error() string {
+ return e.err
+}
+
+func newRelayError(t string, args ...interface{}) error {
+ return relayError{err: fmt.Sprintf(t, args...)}
+}
+
+func isRelayError(err error) bool {
+ _, ok := err.(relayError)
+ return ok
+}
+
+// dialer
+func (c *Client) dial(ctx context.Context, a ma.Multiaddr, p peer.ID) (*Conn, error) {
+ // split /a/p2p-circuit/b into (/a, /p2p-circuit/b)
+ relayaddr, destaddr := ma.SplitFunc(a, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_CIRCUIT
+ })
+
+ // If the address contained no /p2p-circuit part, the second part is nil.
+ if destaddr == nil {
+ return nil, fmt.Errorf("%s is not a relay address", a)
+ }
+
+ if relayaddr == nil {
+ return nil, fmt.Errorf("can't dial a p2p-circuit without specifying a relay: %s", a)
+ }
+
+ dinfo := peer.AddrInfo{ID: p}
+
+ // Strip the /p2p-circuit prefix from the destaddr so that we can pass the destination address
+ // (if present) for active relays
+ _, destaddr = ma.SplitFirst(destaddr)
+ if destaddr != nil {
+ dinfo.Addrs = append(dinfo.Addrs, destaddr)
+ }
+
+ rinfo, err := peer.AddrInfoFromP2pAddr(relayaddr)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing relay multiaddr '%s': %w", relayaddr, err)
+ }
+
+ // deduplicate active relay dials to the same peer
+retry:
+ c.mx.Lock()
+ dedup, active := c.activeDials[p]
+ if !active {
+ dedup = &completion{ch: make(chan struct{}), relay: rinfo.ID}
+ c.activeDials[p] = dedup
+ }
+ c.mx.Unlock()
+
+ if active {
+ select {
+ case <-dedup.ch:
+ if dedup.err != nil {
+ if dedup.relay != rinfo.ID {
+ // different relay, retry
+ goto retry
+ }
+
+ if !isRelayError(dedup.err) {
+ // not a relay protocol error, retry
+ goto retry
+ }
+
+ // don't try the same relay if it failed to connect with a protocol error
+ return nil, fmt.Errorf("concurrent active dial through the same relay failed with a protocol error")
+ }
+
+ return nil, fmt.Errorf("concurrent active dial succeeded")
+
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+
+ conn, err := c.dialPeer(ctx, *rinfo, dinfo)
+
+ c.mx.Lock()
+ dedup.err = err
+ close(dedup.ch)
+ delete(c.activeDials, p)
+ c.mx.Unlock()
+
+ return conn, err
+}
+
+func (c *Client) dialPeer(ctx context.Context, relay, dest peer.AddrInfo) (*Conn, error) {
+ log.Debug("dialing peer through relay",
+ "destination_peer", dest.ID,
+ "relay_peer", relay.ID)
+
+ if len(relay.Addrs) > 0 {
+ c.host.Peerstore().AddAddrs(relay.ID, relay.Addrs, peerstore.TempAddrTTL)
+ }
+
+ dialCtx, cancel := context.WithTimeout(ctx, DialRelayTimeout)
+ defer cancel()
+ s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop)
+ if err != nil {
+ return nil, fmt.Errorf("error opening hop stream to relay: %w", err)
+ }
+ return c.connect(s, dest)
+}
+
+func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) {
+ if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
+ s.Reset()
+ return nil, err
+ }
+ defer s.Scope().ReleaseMemory(maxMessageSize)
+
+ rd := util.NewDelimitedReader(s, maxMessageSize)
+ wr := util.NewDelimitedWriter(s)
+ defer rd.Close()
+
+ var msg pbv2.HopMessage
+
+ msg.Type = pbv2.HopMessage_CONNECT.Enum()
+ msg.Peer = util.PeerInfoToPeerV2(dest)
+
+ s.SetDeadline(time.Now().Add(DialTimeout))
+
+ err := wr.WriteMsg(&msg)
+ if err != nil {
+ s.Reset()
+ return nil, err
+ }
+
+ msg.Reset()
+
+ err = rd.ReadMsg(&msg)
+ if err != nil {
+ s.Reset()
+ return nil, err
+ }
+
+ s.SetDeadline(time.Time{})
+
+ if msg.GetType() != pbv2.HopMessage_STATUS {
+ s.Reset()
+ return nil, newRelayError("unexpected relay response; not a status message (%d)", msg.GetType())
+ }
+
+ status := msg.GetStatus()
+ if status != pbv2.Status_OK {
+ s.Reset()
+ return nil, newRelayError("error opening relay circuit: %s (%d)", pbv2.Status_name[int32(status)], status)
+ }
+
+ // check for a limit provided by the relay; if the limit is not nil, then this is a limited
+ // relay connection and we mark the connection as transient.
+ var stat network.ConnStats
+ if limit := msg.GetLimit(); limit != nil {
+ stat.Limited = true
+ stat.Extra = make(map[interface{}]interface{})
+ stat.Extra[StatLimitDuration] = time.Duration(limit.GetDuration()) * time.Second
+ stat.Extra[StatLimitData] = limit.GetData()
+ }
+
+ return &Conn{stream: s, remote: dest, stat: stat, client: c}, nil
+}
diff --git a/p2p/protocol/circuitv2/client/handlers.go b/p2p/protocol/circuitv2/client/handlers.go
new file mode 100644
index 0000000000..9859ec9081
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/handlers.go
@@ -0,0 +1,90 @@
+package client
+
+import (
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
+)
+
+var (
+ StreamTimeout = 1 * time.Minute
+ AcceptTimeout = 10 * time.Second
+)
+
+func (c *Client) handleStreamV2(s network.Stream) {
+ log.Debug("new relay/v2 stream", "remote_peer", s.Conn().RemotePeer())
+
+ s.SetReadDeadline(time.Now().Add(StreamTimeout))
+
+ rd := util.NewDelimitedReader(s, maxMessageSize)
+ defer rd.Close()
+
+ writeResponse := func(status pbv2.Status) error {
+ s.SetWriteDeadline(time.Now().Add(StreamTimeout))
+ defer s.SetWriteDeadline(time.Time{})
+ wr := util.NewDelimitedWriter(s)
+
+ var msg pbv2.StopMessage
+ msg.Type = pbv2.StopMessage_STATUS.Enum()
+ msg.Status = status.Enum()
+
+ return wr.WriteMsg(&msg)
+ }
+
+ handleError := func(status pbv2.Status) {
+ log.Debug("protocol error", "status_name", pbv2.Status_name[int32(status)], "status_code", status)
+ err := writeResponse(status)
+ if err != nil {
+ s.Reset()
+ log.Debug("error writing circuit response", "err", err)
+ } else {
+ s.Close()
+ }
+ }
+
+ var msg pbv2.StopMessage
+
+ err := rd.ReadMsg(&msg)
+ if err != nil {
+ handleError(pbv2.Status_MALFORMED_MESSAGE)
+ return
+ }
+ // reset stream deadline as message has been read
+ s.SetReadDeadline(time.Time{})
+
+ if msg.GetType() != pbv2.StopMessage_CONNECT {
+ handleError(pbv2.Status_UNEXPECTED_MESSAGE)
+ return
+ }
+
+ src, err := util.PeerToPeerInfoV2(msg.GetPeer())
+ if err != nil {
+ handleError(pbv2.Status_MALFORMED_MESSAGE)
+ return
+ }
+
+ // check for a limit provided by the relay; if the limit is not nil, then this is a limited
+ // relay connection and we mark the connection as transient.
+ var stat network.ConnStats
+ if limit := msg.GetLimit(); limit != nil {
+ stat.Limited = true
+ stat.Extra = make(map[interface{}]interface{})
+ stat.Extra[StatLimitDuration] = time.Duration(limit.GetDuration()) * time.Second
+ stat.Extra[StatLimitData] = limit.GetData()
+ }
+
+ log.Debug("incoming relay connection", "source_peer", src.ID)
+
+ select {
+ case c.incoming <- accept{
+ conn: &Conn{stream: s, remote: src, stat: stat, client: c},
+ writeResponse: func() error {
+ return writeResponse(pbv2.Status_OK)
+ },
+ }:
+ case <-time.After(AcceptTimeout):
+ handleError(pbv2.Status_CONNECTION_FAILED)
+ }
+}
diff --git a/p2p/protocol/circuitv2/client/listen.go b/p2p/protocol/circuitv2/client/listen.go
new file mode 100644
index 0000000000..d00ebac495
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/listen.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/transport"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var _ manet.Listener = (*Listener)(nil)
+
+type Listener Client
+
+func (c *Client) Listener() *Listener {
+ return (*Listener)(c)
+}
+
+func (l *Listener) Accept() (manet.Conn, error) {
+ for {
+ select {
+ case evt := <-l.incoming:
+ err := evt.writeResponse()
+ if err != nil {
+ log.Debug("error writing relay response", "err", err)
+ evt.conn.stream.Reset()
+ continue
+ }
+
+ log.Debug("accepted relay connection",
+ "remote_peer", evt.conn.remote.ID,
+ "remote_multiaddr", evt.conn.RemoteMultiaddr())
+
+ evt.conn.tagHop()
+ return evt.conn, nil
+
+ case <-l.ctx.Done():
+ return nil, transport.ErrListenerClosed
+ }
+ }
+}
+
+func (l *Listener) Addr() net.Addr {
+ return &NetAddr{
+ Relay: "any",
+ Remote: "any",
+ }
+}
+
+func (l *Listener) Multiaddr() ma.Multiaddr {
+ return circuitAddr
+}
+
+func (l *Listener) Close() error {
+ return (*Client)(l).Close()
+}
diff --git a/p2p/protocol/circuitv2/client/reservation.go b/p2p/protocol/circuitv2/client/reservation.go
new file mode 100644
index 0000000000..e51773d146
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/reservation.go
@@ -0,0 +1,177 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/record"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var ReserveTimeout = time.Minute
+
+// Reservation is a struct carrying information about a relay/v2 slot reservation.
+type Reservation struct {
+ // Expiration is the expiration time of the reservation
+ Expiration time.Time
+ // Addrs contains the vouched public addresses of the reserving peer, which can be
+ // announced to the network
+ Addrs []ma.Multiaddr
+
+ // LimitDuration is the time limit for which the relay will keep a relayed connection
+ // open. If 0, there is no limit.
+ LimitDuration time.Duration
+ // LimitData is the number of bytes that the relay will relay in each direction before
+ // resetting a relayed connection.
+ LimitData uint64
+
+ // Voucher is a signed reservation voucher provided by the relay
+ Voucher *proto.ReservationVoucher
+}
+
+// ReservationError is the error returned on failure to reserve a slot in the relay
+type ReservationError struct {
+
+ // Status is the status returned by the relay for rejecting the reservation
+ // request. It is set to pbv2.Status_CONNECTION_FAILED on other failures
+ Status pbv2.Status
+
+ // Reason is the reason for reservation failure
+ Reason string
+
+ err error
+}
+
+func (re ReservationError) Error() string {
+ return fmt.Sprintf("reservation error: status: %s reason: %s err: %s", pbv2.Status_name[int32(re.Status)], re.Reason, re.err)
+}
+
+func (re ReservationError) Unwrap() error {
+ return re.err
+}
+
+// Reserve reserves a slot in a relay and returns the reservation information.
+// Clients must reserve slots in order for the relay to relay connections to them.
+func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation, error) {
+ if len(ai.Addrs) > 0 {
+ h.Peerstore().AddAddrs(ai.ID, ai.Addrs, peerstore.TempAddrTTL)
+ }
+
+ s, err := h.NewStream(ctx, ai.ID, proto.ProtoIDv2Hop)
+ if err != nil {
+ return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "failed to open stream", err: err}
+ }
+ defer s.Close()
+
+ rd := util.NewDelimitedReader(s, maxMessageSize)
+ wr := util.NewDelimitedWriter(s)
+ defer rd.Close()
+
+ var msg pbv2.HopMessage
+ msg.Type = pbv2.HopMessage_RESERVE.Enum()
+
+ s.SetDeadline(time.Now().Add(ReserveTimeout))
+
+ if err := wr.WriteMsg(&msg); err != nil {
+ s.Reset()
+ return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error writing reservation message", err: err}
+ }
+
+ msg.Reset()
+
+ if err := rd.ReadMsg(&msg); err != nil {
+ s.Reset()
+ return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error reading reservation response message: %w", err: err}
+ }
+
+ if msg.GetType() != pbv2.HopMessage_STATUS {
+ return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: fmt.Sprintf("unexpected relay response: not a status message (%d)", msg.GetType())}
+ }
+
+ if status := msg.GetStatus(); status != pbv2.Status_OK {
+ return nil, ReservationError{Status: msg.GetStatus(), Reason: "reservation failed"}
+ }
+
+ rsvp := msg.GetReservation()
+ if rsvp == nil {
+ return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: "missing reservation info"}
+ }
+
+ result := &Reservation{}
+ result.Expiration = time.Unix(int64(rsvp.GetExpire()), 0)
+ if result.Expiration.Before(time.Now()) {
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("received reservation with expiration date in the past: %s", result.Expiration),
+ }
+ }
+
+ addrs := rsvp.GetAddrs()
+ result.Addrs = make([]ma.Multiaddr, 0, len(addrs))
+ for _, ab := range addrs {
+ a, err := ma.NewMultiaddrBytes(ab)
+ if err != nil {
+ log.Warn("ignoring unparsable relay address", "err", err)
+ continue
+ }
+ result.Addrs = append(result.Addrs, a)
+ }
+
+ voucherBytes := rsvp.GetVoucher()
+ if voucherBytes != nil {
+ env, rec, err := record.ConsumeEnvelope(voucherBytes, proto.RecordDomain)
+ if err != nil {
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("error consuming voucher envelope: %s", err),
+ err: err,
+ }
+ }
+
+ voucher, ok := rec.(*proto.ReservationVoucher)
+ if !ok {
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("unexpected voucher record type: %+T", rec),
+ }
+ }
+ signerPeerID, err := peer.IDFromPublicKey(env.PublicKey)
+ if err != nil {
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("invalid voucher signing public key: %s", err),
+ err: err,
+ }
+ }
+ if signerPeerID != voucher.Relay {
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("invalid voucher relay id: expected %s, got %s", signerPeerID, voucher.Relay),
+ }
+ }
+ if h.ID() != voucher.Peer {
+ return nil, ReservationError{
+ Status: pbv2.Status_MALFORMED_MESSAGE,
+ Reason: fmt.Sprintf("invalid voucher peer id: expected %s, got %s", h.ID(), voucher.Peer),
+ }
+
+ }
+ result.Voucher = voucher
+ }
+
+ limit := msg.GetLimit()
+ if limit != nil {
+ result.LimitDuration = time.Duration(limit.GetDuration()) * time.Second
+ result.LimitData = limit.GetData()
+ }
+
+ return result, nil
+}
diff --git a/p2p/protocol/circuitv2/client/reservation_test.go b/p2p/protocol/circuitv2/client/reservation_test.go
new file mode 100644
index 0000000000..d1ab6dc683
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/reservation_test.go
@@ -0,0 +1,161 @@
+package client_test
+
+import (
+ "context"
+ "errors"
+ "math"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestReservationFailures(t *testing.T) {
+ type testcase struct {
+ name string
+ streamHandler network.StreamHandler
+ err string
+ status pbv2.Status
+ }
+ testcases := []testcase{
+ {
+ name: "unsupported protocol",
+ streamHandler: nil,
+ err: "protocols not supported",
+ },
+ {
+ name: "wrong message type",
+ streamHandler: func(s network.Stream) {
+ util.NewDelimitedWriter(s).WriteMsg(&pbv2.HopMessage{
+ Type: pbv2.HopMessage_RESERVE.Enum(),
+ })
+ },
+ err: "unexpected relay response: not a status message",
+ status: pbv2.Status_MALFORMED_MESSAGE,
+ },
+ {
+ name: "unknown status",
+ streamHandler: func(s network.Stream) {
+ status := pbv2.Status(1337)
+ util.NewDelimitedWriter(s).WriteMsg(&pbv2.HopMessage{
+ Type: pbv2.HopMessage_STATUS.Enum(),
+ Status: &status,
+ })
+ },
+ err: "reservation failed",
+ status: pbv2.Status(1337),
+ },
+ {
+ name: "invalid time",
+ streamHandler: func(s network.Stream) {
+ status := pbv2.Status_OK
+ expire := uint64(math.MaxUint64)
+ util.NewDelimitedWriter(s).WriteMsg(&pbv2.HopMessage{
+ Type: pbv2.HopMessage_STATUS.Enum(),
+ Status: &status,
+ Reservation: &pbv2.Reservation{Expire: &expire},
+ })
+ },
+ err: "received reservation with expiration date in the past",
+ status: pbv2.Status_MALFORMED_MESSAGE,
+ },
+ {
+ name: "invalid voucher",
+ streamHandler: func(s network.Stream) {
+ status := pbv2.Status_OK
+ expire := uint64(time.Now().Add(time.Hour).UnixNano())
+ util.NewDelimitedWriter(s).WriteMsg(&pbv2.HopMessage{
+ Type: pbv2.HopMessage_STATUS.Enum(),
+ Status: &status,
+ Reservation: &pbv2.Reservation{
+ Expire: &expire,
+ Voucher: []byte("foobar"),
+ },
+ })
+ },
+ err: "error consuming voucher envelope: failed when unmarshalling the envelope",
+ status: pbv2.Status_MALFORMED_MESSAGE,
+ },
+ {
+ name: "invalid voucher 2",
+ streamHandler: func(s network.Stream) {
+ status := pbv2.Status_OK
+ expire := uint64(time.Now().Add(time.Hour).UnixNano())
+ priv, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ if err != nil {
+ s.Reset()
+ return
+ }
+ relay, _ := test.RandPeerID()
+ peer, _ := test.RandPeerID()
+ voucher := &proto.ReservationVoucher{
+ Relay: relay,
+ Peer: peer,
+ Expiration: time.Now().Add(time.Hour),
+ }
+ signedVoucher, err := record.Seal(voucher, priv)
+ if err != nil {
+ s.Reset()
+ return
+ }
+ env, err := signedVoucher.Marshal()
+ if err != nil {
+ s.Reset()
+ return
+ }
+ util.NewDelimitedWriter(s).WriteMsg(&pbv2.HopMessage{
+ Type: pbv2.HopMessage_STATUS.Enum(),
+ Status: &status,
+ Reservation: &pbv2.Reservation{
+ Expire: &expire,
+ Voucher: env,
+ },
+ })
+ },
+ err: "invalid voucher relay id",
+ status: pbv2.Status_MALFORMED_MESSAGE,
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ host, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
+ require.NoError(t, err)
+ defer host.Close()
+ if tc.streamHandler != nil {
+ host.SetStreamHandler(proto.ProtoIDv2Hop, tc.streamHandler)
+ }
+
+ cl, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{}))
+ require.NoError(t, err)
+ defer cl.Close()
+ _, err = client.Reserve(context.Background(), cl, peer.AddrInfo{ID: host.ID(), Addrs: host.Addrs()})
+ if tc.err == "" {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.err)
+ if tc.status != 0 {
+ var re client.ReservationError
+ if !errors.As(err, &re) {
+ t.Errorf("expected error to be of type %T", re)
+ }
+ if re.Status != tc.status {
+ t.Errorf("expected status %d got %d", tc.status, re.Status)
+ }
+ }
+ }
+ })
+ }
+}
diff --git a/p2p/protocol/circuitv2/client/transport.go b/p2p/protocol/circuitv2/client/transport.go
new file mode 100644
index 0000000000..7f0e98562c
--- /dev/null
+++ b/p2p/protocol/circuitv2/client/transport.go
@@ -0,0 +1,113 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var circuitProtocol = ma.ProtocolWithCode(ma.P_CIRCUIT)
+var circuitAddr = ma.Cast(circuitProtocol.VCode)
+
+// AddTransport constructs a new p2p-circuit/v2 client and adds it as a transport to the
+// host network
+func AddTransport(h host.Host, upgrader transport.Upgrader) error {
+ n, ok := h.Network().(transport.TransportNetwork)
+ if !ok {
+ return fmt.Errorf("%v is not a transport network", h.Network())
+ }
+
+ c, err := New(h, upgrader)
+ if err != nil {
+ return fmt.Errorf("error constructing circuit client: %w", err)
+ }
+
+ err = n.AddTransport(c)
+ if err != nil {
+ return fmt.Errorf("error adding circuit transport: %w", err)
+ }
+
+ err = n.Listen(circuitAddr)
+ if err != nil {
+ return fmt.Errorf("error listening to circuit addr: %w", err)
+ }
+
+ c.Start()
+
+ return nil
+}
+
+// Transport interface
+var _ transport.Transport = (*Client)(nil)
+
+// p2p-circuit implements the SkipResolver interface so that the underlying
+// transport can do the address resolution later. If you wrap this transport,
+// make sure you also implement SkipResolver as well.
+var _ transport.SkipResolver = (*Client)(nil)
+var _ io.Closer = (*Client)(nil)
+
+// SkipResolve returns true since we always defer to the inner transport for
+// the actual connection. By skipping resolution here, we let the inner
+// transport decide how to resolve the multiaddr
+func (c *Client) SkipResolve(_ context.Context, _ ma.Multiaddr) bool {
+ return true
+}
+
+func (c *Client) Dial(ctx context.Context, a ma.Multiaddr, p peer.ID) (transport.CapableConn, error) {
+ connScope, err := c.host.Network().ResourceManager().OpenConnection(network.DirOutbound, false, a)
+
+ if err != nil {
+ return nil, err
+ }
+ conn, err := c.dialAndUpgrade(ctx, a, p, connScope)
+ if err != nil {
+ connScope.Done()
+ return nil, err
+ }
+ return conn, nil
+}
+
+func (c *Client) dialAndUpgrade(ctx context.Context, a ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
+ if err := connScope.SetPeer(p); err != nil {
+ return nil, err
+ }
+ conn, err := c.dial(ctx, a, p)
+ if err != nil {
+ return nil, err
+ }
+ conn.tagHop()
+ cc, err := c.upgrader.Upgrade(ctx, c, conn, network.DirOutbound, p, connScope)
+ if err != nil {
+ return nil, err
+ }
+ return capableConn{cc.(capableConnWithStat)}, nil
+}
+
+func (c *Client) CanDial(addr ma.Multiaddr) bool {
+ _, err := addr.ValueForProtocol(ma.P_CIRCUIT)
+ return err == nil
+}
+
+func (c *Client) Listen(addr ma.Multiaddr) (transport.Listener, error) {
+ // TODO connect to the relay and reserve slot if specified
+ if _, err := addr.ValueForProtocol(ma.P_CIRCUIT); err != nil {
+ return nil, err
+ }
+
+ return c.upgrader.UpgradeGatedMaListener(c, c.upgrader.GateMaListener(c.Listener())), nil
+}
+
+func (c *Client) Protocols() []int {
+ return []int{ma.P_CIRCUIT}
+}
+
+func (c *Client) Proxy() bool {
+ return true
+}
diff --git a/p2p/protocol/circuitv2/pb/circuit.pb.go b/p2p/protocol/circuitv2/pb/circuit.pb.go
new file mode 100644
index 0000000000..8feee1994d
--- /dev/null
+++ b/p2p/protocol/circuitv2/pb/circuit.pb.go
@@ -0,0 +1,636 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/protocol/circuitv2/pb/circuit.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Status int32
+
+const (
+ // zero value field required for proto3 compatibility
+ Status_UNUSED Status = 0
+ Status_OK Status = 100
+ Status_RESERVATION_REFUSED Status = 200
+ Status_RESOURCE_LIMIT_EXCEEDED Status = 201
+ Status_PERMISSION_DENIED Status = 202
+ Status_CONNECTION_FAILED Status = 203
+ Status_NO_RESERVATION Status = 204
+ Status_MALFORMED_MESSAGE Status = 400
+ Status_UNEXPECTED_MESSAGE Status = 401
+)
+
+// Enum value maps for Status.
+var (
+ Status_name = map[int32]string{
+ 0: "UNUSED",
+ 100: "OK",
+ 200: "RESERVATION_REFUSED",
+ 201: "RESOURCE_LIMIT_EXCEEDED",
+ 202: "PERMISSION_DENIED",
+ 203: "CONNECTION_FAILED",
+ 204: "NO_RESERVATION",
+ 400: "MALFORMED_MESSAGE",
+ 401: "UNEXPECTED_MESSAGE",
+ }
+ Status_value = map[string]int32{
+ "UNUSED": 0,
+ "OK": 100,
+ "RESERVATION_REFUSED": 200,
+ "RESOURCE_LIMIT_EXCEEDED": 201,
+ "PERMISSION_DENIED": 202,
+ "CONNECTION_FAILED": 203,
+ "NO_RESERVATION": 204,
+ "MALFORMED_MESSAGE": 400,
+ "UNEXPECTED_MESSAGE": 401,
+ }
+)
+
+func (x Status) Enum() *Status {
+ p := new(Status)
+ *p = x
+ return p
+}
+
+func (x Status) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Status) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes[0].Descriptor()
+}
+
+func (Status) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes[0]
+}
+
+func (x Status) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Status.Descriptor instead.
+func (Status) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{0}
+}
+
+type HopMessage_Type int32
+
+const (
+ HopMessage_RESERVE HopMessage_Type = 0
+ HopMessage_CONNECT HopMessage_Type = 1
+ HopMessage_STATUS HopMessage_Type = 2
+)
+
+// Enum value maps for HopMessage_Type.
+var (
+ HopMessage_Type_name = map[int32]string{
+ 0: "RESERVE",
+ 1: "CONNECT",
+ 2: "STATUS",
+ }
+ HopMessage_Type_value = map[string]int32{
+ "RESERVE": 0,
+ "CONNECT": 1,
+ "STATUS": 2,
+ }
+)
+
+func (x HopMessage_Type) Enum() *HopMessage_Type {
+ p := new(HopMessage_Type)
+ *p = x
+ return p
+}
+
+func (x HopMessage_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HopMessage_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes[1].Descriptor()
+}
+
+func (HopMessage_Type) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes[1]
+}
+
+func (x HopMessage_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HopMessage_Type.Descriptor instead.
+func (HopMessage_Type) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type StopMessage_Type int32
+
+const (
+ StopMessage_CONNECT StopMessage_Type = 0
+ StopMessage_STATUS StopMessage_Type = 1
+)
+
+// Enum value maps for StopMessage_Type.
+var (
+ StopMessage_Type_name = map[int32]string{
+ 0: "CONNECT",
+ 1: "STATUS",
+ }
+ StopMessage_Type_value = map[string]int32{
+ "CONNECT": 0,
+ "STATUS": 1,
+ }
+)
+
+func (x StopMessage_Type) Enum() *StopMessage_Type {
+ p := new(StopMessage_Type)
+ *p = x
+ return p
+}
+
+func (x StopMessage_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (StopMessage_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes[2].Descriptor()
+}
+
+func (StopMessage_Type) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes[2]
+}
+
+func (x StopMessage_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use StopMessage_Type.Descriptor instead.
+func (StopMessage_Type) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type HopMessage struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Type *HopMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=circuit.pb.HopMessage_Type,oneof" json:"type,omitempty"`
+ Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ Reservation *Reservation `protobuf:"bytes,3,opt,name=reservation,proto3,oneof" json:"reservation,omitempty"`
+ Limit *Limit `protobuf:"bytes,4,opt,name=limit,proto3,oneof" json:"limit,omitempty"`
+ Status *Status `protobuf:"varint,5,opt,name=status,proto3,enum=circuit.pb.Status,oneof" json:"status,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HopMessage) Reset() {
+ *x = HopMessage{}
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HopMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HopMessage) ProtoMessage() {}
+
+func (x *HopMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HopMessage.ProtoReflect.Descriptor instead.
+func (*HopMessage) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HopMessage) GetType() HopMessage_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return HopMessage_RESERVE
+}
+
+func (x *HopMessage) GetPeer() *Peer {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+func (x *HopMessage) GetReservation() *Reservation {
+ if x != nil {
+ return x.Reservation
+ }
+ return nil
+}
+
+func (x *HopMessage) GetLimit() *Limit {
+ if x != nil {
+ return x.Limit
+ }
+ return nil
+}
+
+func (x *HopMessage) GetStatus() Status {
+ if x != nil && x.Status != nil {
+ return *x.Status
+ }
+ return Status_UNUSED
+}
+
+type StopMessage struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Type *StopMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=circuit.pb.StopMessage_Type,oneof" json:"type,omitempty"`
+ Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ Limit *Limit `protobuf:"bytes,3,opt,name=limit,proto3,oneof" json:"limit,omitempty"`
+ Status *Status `protobuf:"varint,4,opt,name=status,proto3,enum=circuit.pb.Status,oneof" json:"status,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *StopMessage) Reset() {
+ *x = StopMessage{}
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StopMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StopMessage) ProtoMessage() {}
+
+func (x *StopMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StopMessage.ProtoReflect.Descriptor instead.
+func (*StopMessage) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *StopMessage) GetType() StopMessage_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return StopMessage_CONNECT
+}
+
+func (x *StopMessage) GetPeer() *Peer {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+func (x *StopMessage) GetLimit() *Limit {
+ if x != nil {
+ return x.Limit
+ }
+ return nil
+}
+
+func (x *StopMessage) GetStatus() Status {
+ if x != nil && x.Status != nil {
+ return *x.Status
+ }
+ return Status_UNUSED
+}
+
+type Peer struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Id []byte `protobuf:"bytes,1,opt,name=id,proto3,oneof" json:"id,omitempty"`
+ Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Peer) Reset() {
+ *x = Peer{}
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Peer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Peer) ProtoMessage() {}
+
+func (x *Peer) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Peer.ProtoReflect.Descriptor instead.
+func (*Peer) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Peer) GetId() []byte {
+ if x != nil {
+ return x.Id
+ }
+ return nil
+}
+
+func (x *Peer) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
+ }
+ return nil
+}
+
+type Reservation struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ Expire *uint64 `protobuf:"varint,1,opt,name=expire,proto3,oneof" json:"expire,omitempty"` // Unix expiration time (UTC)
+ Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` // relay addrs for reserving peer
+ Voucher []byte `protobuf:"bytes,3,opt,name=voucher,proto3,oneof" json:"voucher,omitempty"` // reservation voucher
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Reservation) Reset() {
+ *x = Reservation{}
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Reservation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reservation) ProtoMessage() {}
+
+func (x *Reservation) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reservation.ProtoReflect.Descriptor instead.
+func (*Reservation) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reservation) GetExpire() uint64 {
+ if x != nil && x.Expire != nil {
+ return *x.Expire
+ }
+ return 0
+}
+
+func (x *Reservation) GetAddrs() [][]byte {
+ if x != nil {
+ return x.Addrs
+ }
+ return nil
+}
+
+func (x *Reservation) GetVoucher() []byte {
+ if x != nil {
+ return x.Voucher
+ }
+ return nil
+}
+
+type Limit struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Duration *uint32 `protobuf:"varint,1,opt,name=duration,proto3,oneof" json:"duration,omitempty"` // seconds
+ Data *uint64 `protobuf:"varint,2,opt,name=data,proto3,oneof" json:"data,omitempty"` // bytes
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Limit) Reset() {
+ *x = Limit{}
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Limit) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Limit) ProtoMessage() {}
+
+func (x *Limit) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Limit.ProtoReflect.Descriptor instead.
+func (*Limit) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Limit) GetDuration() uint32 {
+ if x != nil && x.Duration != nil {
+ return *x.Duration
+ }
+ return 0
+}
+
+func (x *Limit) GetData() uint64 {
+ if x != nil && x.Data != nil {
+ return *x.Data
+ }
+ return 0
+}
+
+var File_p2p_protocol_circuitv2_pb_circuit_proto protoreflect.FileDescriptor
+
+const file_p2p_protocol_circuitv2_pb_circuit_proto_rawDesc = "" +
+ "\n" +
+ "'p2p/protocol/circuitv2/pb/circuit.proto\x12\n" +
+ "circuit.pb\"\xf1\x02\n" +
+ "\n" +
+ "HopMessage\x124\n" +
+ "\x04type\x18\x01 \x01(\x0e2\x1b.circuit.pb.HopMessage.TypeH\x00R\x04type\x88\x01\x01\x12)\n" +
+ "\x04peer\x18\x02 \x01(\v2\x10.circuit.pb.PeerH\x01R\x04peer\x88\x01\x01\x12>\n" +
+ "\vreservation\x18\x03 \x01(\v2\x17.circuit.pb.ReservationH\x02R\vreservation\x88\x01\x01\x12,\n" +
+ "\x05limit\x18\x04 \x01(\v2\x11.circuit.pb.LimitH\x03R\x05limit\x88\x01\x01\x12/\n" +
+ "\x06status\x18\x05 \x01(\x0e2\x12.circuit.pb.StatusH\x04R\x06status\x88\x01\x01\",\n" +
+ "\x04Type\x12\v\n" +
+ "\aRESERVE\x10\x00\x12\v\n" +
+ "\aCONNECT\x10\x01\x12\n" +
+ "\n" +
+ "\x06STATUS\x10\x02B\a\n" +
+ "\x05_typeB\a\n" +
+ "\x05_peerB\x0e\n" +
+ "\f_reservationB\b\n" +
+ "\x06_limitB\t\n" +
+ "\a_status\"\x96\x02\n" +
+ "\vStopMessage\x125\n" +
+ "\x04type\x18\x01 \x01(\x0e2\x1c.circuit.pb.StopMessage.TypeH\x00R\x04type\x88\x01\x01\x12)\n" +
+ "\x04peer\x18\x02 \x01(\v2\x10.circuit.pb.PeerH\x01R\x04peer\x88\x01\x01\x12,\n" +
+ "\x05limit\x18\x03 \x01(\v2\x11.circuit.pb.LimitH\x02R\x05limit\x88\x01\x01\x12/\n" +
+ "\x06status\x18\x04 \x01(\x0e2\x12.circuit.pb.StatusH\x03R\x06status\x88\x01\x01\"\x1f\n" +
+ "\x04Type\x12\v\n" +
+ "\aCONNECT\x10\x00\x12\n" +
+ "\n" +
+ "\x06STATUS\x10\x01B\a\n" +
+ "\x05_typeB\a\n" +
+ "\x05_peerB\b\n" +
+ "\x06_limitB\t\n" +
+ "\a_status\"8\n" +
+ "\x04Peer\x12\x13\n" +
+ "\x02id\x18\x01 \x01(\fH\x00R\x02id\x88\x01\x01\x12\x14\n" +
+ "\x05addrs\x18\x02 \x03(\fR\x05addrsB\x05\n" +
+ "\x03_id\"v\n" +
+ "\vReservation\x12\x1b\n" +
+ "\x06expire\x18\x01 \x01(\x04H\x00R\x06expire\x88\x01\x01\x12\x14\n" +
+ "\x05addrs\x18\x02 \x03(\fR\x05addrs\x12\x1d\n" +
+ "\avoucher\x18\x03 \x01(\fH\x01R\avoucher\x88\x01\x01B\t\n" +
+ "\a_expireB\n" +
+ "\n" +
+ "\b_voucher\"W\n" +
+ "\x05Limit\x12\x1f\n" +
+ "\bduration\x18\x01 \x01(\rH\x00R\bduration\x88\x01\x01\x12\x17\n" +
+ "\x04data\x18\x02 \x01(\x04H\x01R\x04data\x88\x01\x01B\v\n" +
+ "\t_durationB\a\n" +
+ "\x05_data*\xca\x01\n" +
+ "\x06Status\x12\n" +
+ "\n" +
+ "\x06UNUSED\x10\x00\x12\x06\n" +
+ "\x02OK\x10d\x12\x18\n" +
+ "\x13RESERVATION_REFUSED\x10\xc8\x01\x12\x1c\n" +
+ "\x17RESOURCE_LIMIT_EXCEEDED\x10\xc9\x01\x12\x16\n" +
+ "\x11PERMISSION_DENIED\x10\xca\x01\x12\x16\n" +
+ "\x11CONNECTION_FAILED\x10\xcb\x01\x12\x13\n" +
+ "\x0eNO_RESERVATION\x10\xcc\x01\x12\x16\n" +
+ "\x11MALFORMED_MESSAGE\x10\x90\x03\x12\x17\n" +
+ "\x12UNEXPECTED_MESSAGE\x10\x91\x03B7Z5github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pbb\x06proto3"
+
+var (
+ file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescOnce sync.Once
+ file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescData []byte
+)
+
+func file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescGZIP() []byte {
+ file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescOnce.Do(func() {
+ file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_protocol_circuitv2_pb_circuit_proto_rawDesc), len(file_p2p_protocol_circuitv2_pb_circuit_proto_rawDesc)))
+ })
+ return file_p2p_protocol_circuitv2_pb_circuit_proto_rawDescData
+}
+
+var file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_p2p_protocol_circuitv2_pb_circuit_proto_goTypes = []any{
+ (Status)(0), // 0: circuit.pb.Status
+ (HopMessage_Type)(0), // 1: circuit.pb.HopMessage.Type
+ (StopMessage_Type)(0), // 2: circuit.pb.StopMessage.Type
+ (*HopMessage)(nil), // 3: circuit.pb.HopMessage
+ (*StopMessage)(nil), // 4: circuit.pb.StopMessage
+ (*Peer)(nil), // 5: circuit.pb.Peer
+ (*Reservation)(nil), // 6: circuit.pb.Reservation
+ (*Limit)(nil), // 7: circuit.pb.Limit
+}
+var file_p2p_protocol_circuitv2_pb_circuit_proto_depIdxs = []int32{
+ 1, // 0: circuit.pb.HopMessage.type:type_name -> circuit.pb.HopMessage.Type
+ 5, // 1: circuit.pb.HopMessage.peer:type_name -> circuit.pb.Peer
+ 6, // 2: circuit.pb.HopMessage.reservation:type_name -> circuit.pb.Reservation
+ 7, // 3: circuit.pb.HopMessage.limit:type_name -> circuit.pb.Limit
+ 0, // 4: circuit.pb.HopMessage.status:type_name -> circuit.pb.Status
+ 2, // 5: circuit.pb.StopMessage.type:type_name -> circuit.pb.StopMessage.Type
+ 5, // 6: circuit.pb.StopMessage.peer:type_name -> circuit.pb.Peer
+ 7, // 7: circuit.pb.StopMessage.limit:type_name -> circuit.pb.Limit
+ 0, // 8: circuit.pb.StopMessage.status:type_name -> circuit.pb.Status
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_p2p_protocol_circuitv2_pb_circuit_proto_init() }
+func file_p2p_protocol_circuitv2_pb_circuit_proto_init() {
+ if File_p2p_protocol_circuitv2_pb_circuit_proto != nil {
+ return
+ }
+ file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[0].OneofWrappers = []any{}
+ file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[1].OneofWrappers = []any{}
+ file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[2].OneofWrappers = []any{}
+ file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[3].OneofWrappers = []any{}
+ file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes[4].OneofWrappers = []any{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_protocol_circuitv2_pb_circuit_proto_rawDesc), len(file_p2p_protocol_circuitv2_pb_circuit_proto_rawDesc)),
+ NumEnums: 3,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_protocol_circuitv2_pb_circuit_proto_goTypes,
+ DependencyIndexes: file_p2p_protocol_circuitv2_pb_circuit_proto_depIdxs,
+ EnumInfos: file_p2p_protocol_circuitv2_pb_circuit_proto_enumTypes,
+ MessageInfos: file_p2p_protocol_circuitv2_pb_circuit_proto_msgTypes,
+ }.Build()
+ File_p2p_protocol_circuitv2_pb_circuit_proto = out.File
+ file_p2p_protocol_circuitv2_pb_circuit_proto_goTypes = nil
+ file_p2p_protocol_circuitv2_pb_circuit_proto_depIdxs = nil
+}
diff --git a/p2p/protocol/circuitv2/pb/circuit.proto b/p2p/protocol/circuitv2/pb/circuit.proto
new file mode 100644
index 0000000000..43af7e8957
--- /dev/null
+++ b/p2p/protocol/circuitv2/pb/circuit.proto
@@ -0,0 +1,72 @@
+syntax = "proto3";
+
+package circuit.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb";
+
+message HopMessage {
+ enum Type {
+ RESERVE = 0;
+ CONNECT = 1;
+ STATUS = 2;
+ }
+
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional Type type = 1;
+
+ optional Peer peer = 2;
+ optional Reservation reservation = 3;
+ optional Limit limit = 4;
+
+ optional Status status = 5;
+}
+
+message StopMessage {
+ enum Type {
+ CONNECT = 0;
+ STATUS = 1;
+ }
+
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional Type type = 1;
+
+ optional Peer peer = 2;
+ optional Limit limit = 3;
+
+ optional Status status = 4;
+}
+
+message Peer {
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional bytes id = 1;
+ repeated bytes addrs = 2;
+}
+
+message Reservation {
+ // This field is marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set this.
+ optional uint64 expire = 1; // Unix expiration time (UTC)
+ repeated bytes addrs = 2; // relay addrs for reserving peer
+ optional bytes voucher = 3; // reservation voucher
+}
+
+message Limit {
+ optional uint32 duration = 1; // seconds
+ optional uint64 data = 2; // bytes
+}
+
+enum Status {
+ // zero value field required for proto3 compatibility
+ UNUSED = 0;
+ OK = 100;
+ RESERVATION_REFUSED = 200;
+ RESOURCE_LIMIT_EXCEEDED = 201;
+ PERMISSION_DENIED = 202;
+ CONNECTION_FAILED = 203;
+ NO_RESERVATION = 204;
+ MALFORMED_MESSAGE = 400;
+ UNEXPECTED_MESSAGE = 401;
+}
diff --git a/p2p/protocol/circuitv2/pb/voucher.pb.go b/p2p/protocol/circuitv2/pb/voucher.pb.go
new file mode 100644
index 0000000000..d56b0ee5a8
--- /dev/null
+++ b/p2p/protocol/circuitv2/pb/voucher.pb.go
@@ -0,0 +1,149 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/protocol/circuitv2/pb/voucher.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ReservationVoucher struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // These fields are marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set these.
+ Relay []byte `protobuf:"bytes,1,opt,name=relay,proto3,oneof" json:"relay,omitempty"`
+ Peer []byte `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"`
+ Expiration *uint64 `protobuf:"varint,3,opt,name=expiration,proto3,oneof" json:"expiration,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ReservationVoucher) Reset() {
+ *x = ReservationVoucher{}
+ mi := &file_p2p_protocol_circuitv2_pb_voucher_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ReservationVoucher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReservationVoucher) ProtoMessage() {}
+
+func (x *ReservationVoucher) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_circuitv2_pb_voucher_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReservationVoucher.ProtoReflect.Descriptor instead.
+func (*ReservationVoucher) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ReservationVoucher) GetRelay() []byte {
+ if x != nil {
+ return x.Relay
+ }
+ return nil
+}
+
+func (x *ReservationVoucher) GetPeer() []byte {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
+func (x *ReservationVoucher) GetExpiration() uint64 {
+ if x != nil && x.Expiration != nil {
+ return *x.Expiration
+ }
+ return 0
+}
+
+var File_p2p_protocol_circuitv2_pb_voucher_proto protoreflect.FileDescriptor
+
+const file_p2p_protocol_circuitv2_pb_voucher_proto_rawDesc = "" +
+ "\n" +
+ "'p2p/protocol/circuitv2/pb/voucher.proto\x12\n" +
+ "circuit.pb\"\x8f\x01\n" +
+ "\x12ReservationVoucher\x12\x19\n" +
+ "\x05relay\x18\x01 \x01(\fH\x00R\x05relay\x88\x01\x01\x12\x17\n" +
+ "\x04peer\x18\x02 \x01(\fH\x01R\x04peer\x88\x01\x01\x12#\n" +
+ "\n" +
+ "expiration\x18\x03 \x01(\x04H\x02R\n" +
+ "expiration\x88\x01\x01B\b\n" +
+ "\x06_relayB\a\n" +
+ "\x05_peerB\r\n" +
+ "\v_expirationB7Z5github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pbb\x06proto3"
+
+var (
+ file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescOnce sync.Once
+ file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescData []byte
+)
+
+func file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescGZIP() []byte {
+ file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescOnce.Do(func() {
+ file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_protocol_circuitv2_pb_voucher_proto_rawDesc), len(file_p2p_protocol_circuitv2_pb_voucher_proto_rawDesc)))
+ })
+ return file_p2p_protocol_circuitv2_pb_voucher_proto_rawDescData
+}
+
+var file_p2p_protocol_circuitv2_pb_voucher_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_p2p_protocol_circuitv2_pb_voucher_proto_goTypes = []any{
+ (*ReservationVoucher)(nil), // 0: circuit.pb.ReservationVoucher
+}
+var file_p2p_protocol_circuitv2_pb_voucher_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_p2p_protocol_circuitv2_pb_voucher_proto_init() }
+func file_p2p_protocol_circuitv2_pb_voucher_proto_init() {
+ if File_p2p_protocol_circuitv2_pb_voucher_proto != nil {
+ return
+ }
+ file_p2p_protocol_circuitv2_pb_voucher_proto_msgTypes[0].OneofWrappers = []any{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_protocol_circuitv2_pb_voucher_proto_rawDesc), len(file_p2p_protocol_circuitv2_pb_voucher_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_protocol_circuitv2_pb_voucher_proto_goTypes,
+ DependencyIndexes: file_p2p_protocol_circuitv2_pb_voucher_proto_depIdxs,
+ MessageInfos: file_p2p_protocol_circuitv2_pb_voucher_proto_msgTypes,
+ }.Build()
+ File_p2p_protocol_circuitv2_pb_voucher_proto = out.File
+ file_p2p_protocol_circuitv2_pb_voucher_proto_goTypes = nil
+ file_p2p_protocol_circuitv2_pb_voucher_proto_depIdxs = nil
+}
diff --git a/p2p/protocol/circuitv2/pb/voucher.proto b/p2p/protocol/circuitv2/pb/voucher.proto
new file mode 100644
index 0000000000..b723c78aae
--- /dev/null
+++ b/p2p/protocol/circuitv2/pb/voucher.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+
+package circuit.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb";
+
+message ReservationVoucher {
+ // These fields are marked optional for backwards compatibility with proto2.
+ // Users should make sure to always set these.
+ optional bytes relay = 1;
+ optional bytes peer = 2;
+ optional uint64 expiration = 3;
+}
diff --git a/p2p/protocol/circuitv2/proto/protocol.go b/p2p/protocol/circuitv2/proto/protocol.go
new file mode 100644
index 0000000000..4b6d96b887
--- /dev/null
+++ b/p2p/protocol/circuitv2/proto/protocol.go
@@ -0,0 +1,6 @@
+package proto
+
+const (
+ ProtoIDv2Hop = "/libp2p/circuit/relay/0.2.0/hop"
+ ProtoIDv2Stop = "/libp2p/circuit/relay/0.2.0/stop"
+)
diff --git a/p2p/protocol/circuitv2/proto/voucher.go b/p2p/protocol/circuitv2/proto/voucher.go
new file mode 100644
index 0000000000..7114d81c65
--- /dev/null
+++ b/p2p/protocol/circuitv2/proto/voucher.go
@@ -0,0 +1,69 @@
+package proto
+
+import (
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/record"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+
+ "google.golang.org/protobuf/proto"
+)
+
+const RecordDomain = "libp2p-relay-rsvp"
+
+// TODO: register in multicodec table in https://github.com/multiformats/multicodec
+var RecordCodec = []byte{0x03, 0x02}
+
+func init() {
+ record.RegisterType(&ReservationVoucher{})
+}
+
+type ReservationVoucher struct {
+ // Relay is the ID of the peer providing relay service
+ Relay peer.ID
+ // Peer is the ID of the peer receiving relay service through Relay
+ Peer peer.ID
+ // Expiration is the expiration time of the reservation
+ Expiration time.Time
+}
+
+var _ record.Record = (*ReservationVoucher)(nil)
+
+func (rv *ReservationVoucher) Domain() string {
+ return RecordDomain
+}
+
+func (rv *ReservationVoucher) Codec() []byte {
+ return RecordCodec
+}
+
+func (rv *ReservationVoucher) MarshalRecord() ([]byte, error) {
+ expiration := uint64(rv.Expiration.Unix())
+ return proto.Marshal(&pbv2.ReservationVoucher{
+ Relay: []byte(rv.Relay),
+ Peer: []byte(rv.Peer),
+ Expiration: &expiration,
+ })
+}
+
+func (rv *ReservationVoucher) UnmarshalRecord(blob []byte) error {
+ pbrv := pbv2.ReservationVoucher{}
+ err := proto.Unmarshal(blob, &pbrv)
+ if err != nil {
+ return err
+ }
+
+ rv.Relay, err = peer.IDFromBytes(pbrv.GetRelay())
+ if err != nil {
+ return err
+ }
+
+ rv.Peer, err = peer.IDFromBytes(pbrv.GetPeer())
+ if err != nil {
+ return err
+ }
+
+ rv.Expiration = time.Unix(int64(pbrv.GetExpiration()), 0)
+ return nil
+}
diff --git a/p2p/protocol/circuitv2/proto/voucher_test.go b/p2p/protocol/circuitv2/proto/voucher_test.go
new file mode 100644
index 0000000000..643dd0a5df
--- /dev/null
+++ b/p2p/protocol/circuitv2/proto/voucher_test.go
@@ -0,0 +1,68 @@
+package proto
+
+import (
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/record"
+)
+
+func TestReservationVoucher(t *testing.T) {
+ relayPrivk, relayPubk, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, peerPubk, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ relayID, err := peer.IDFromPublicKey(relayPubk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ peerID, err := peer.IDFromPublicKey(peerPubk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rsvp := &ReservationVoucher{
+ Relay: relayID,
+ Peer: peerID,
+ Expiration: time.Now().Add(time.Hour),
+ }
+
+ envelope, err := record.Seal(rsvp, relayPrivk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ blob, err := envelope.Marshal()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, rec, err := record.ConsumeEnvelope(blob, RecordDomain)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rsvp2, ok := rec.(*ReservationVoucher)
+ if !ok {
+ t.Fatalf("invalid record type %+T", rec)
+ }
+
+ if rsvp.Relay != rsvp2.Relay {
+ t.Fatal("relay IDs don't match")
+ }
+ if rsvp.Peer != rsvp2.Peer {
+ t.Fatal("peer IDs don't match")
+ }
+ if rsvp.Expiration.Unix() != rsvp2.Expiration.Unix() {
+ t.Fatal("expirations don't match")
+ }
+}
diff --git a/p2p/protocol/circuitv2/relay/acl.go b/p2p/protocol/circuitv2/relay/acl.go
new file mode 100644
index 0000000000..4191556a35
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/acl.go
@@ -0,0 +1,17 @@
+package relay
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// ACLFilter is an Access Control mechanism for relayed connect.
+type ACLFilter interface {
+ // AllowReserve returns true if a reservation from a peer with the given peer ID and multiaddr
+ // is allowed.
+ AllowReserve(p peer.ID, a ma.Multiaddr) bool
+ // AllowConnect returns true if a source peer, with a given multiaddr is allowed to connect
+ // to a destination peer.
+ AllowConnect(src peer.ID, srcAddr ma.Multiaddr, dest peer.ID) bool
+}
diff --git a/p2p/protocol/circuitv2/relay/constraints.go b/p2p/protocol/circuitv2/relay/constraints.go
new file mode 100644
index 0000000000..4cce1f0176
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/constraints.go
@@ -0,0 +1,133 @@
+package relay
+
+import (
+ "errors"
+ "slices"
+ "sync"
+ "time"
+
+ asnutil "github.com/libp2p/go-libp2p-asn-util"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var (
+ errTooManyReservations = errors.New("too many reservations")
+ errTooManyReservationsForIP = errors.New("too many peers for IP address")
+ errTooManyReservationsForASN = errors.New("too many peers for ASN")
+)
+
+type peerWithExpiry struct {
+ Expiry time.Time
+ Peer peer.ID
+}
+
+// constraints implements various reservation constraints
+type constraints struct {
+ rc *Resources
+
+ mutex sync.Mutex
+ total []peerWithExpiry
+ ips map[string][]peerWithExpiry
+ asns map[uint32][]peerWithExpiry
+}
+
+// newConstraints creates a new constraints object.
+// The methods are *not* thread-safe; an external lock must be held if synchronization
+// is required.
+func newConstraints(rc *Resources) *constraints {
+ return &constraints{
+ rc: rc,
+ ips: make(map[string][]peerWithExpiry),
+ asns: make(map[uint32][]peerWithExpiry),
+ }
+}
+
+// Reserve adds a reservation for a given peer with a given multiaddr.
+// If adding this reservation violates IP, ASN, or total reservation constraints, an error is returned.
+func (c *constraints) Reserve(p peer.ID, a ma.Multiaddr, expiry time.Time) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ now := time.Now()
+ c.cleanup(now)
+ // To handle refreshes correctly, remove the existing reservation for the peer.
+ c.cleanupPeer(p)
+
+ if len(c.total) >= c.rc.MaxReservations {
+ return errTooManyReservations
+ }
+
+ ip, err := manet.ToIP(a)
+ if err != nil {
+ return errors.New("no IP address associated with peer")
+ }
+
+ ipReservations := c.ips[ip.String()]
+ if len(ipReservations) >= c.rc.MaxReservationsPerIP {
+ return errTooManyReservationsForIP
+ }
+
+ var asnReservations []peerWithExpiry
+ var asn uint32
+ if ip.To4() == nil {
+ asn = asnutil.AsnForIPv6(ip)
+ if asn != 0 {
+ asnReservations = c.asns[asn]
+ if len(asnReservations) >= c.rc.MaxReservationsPerASN {
+ return errTooManyReservationsForASN
+ }
+ }
+ }
+
+ c.total = append(c.total, peerWithExpiry{Expiry: expiry, Peer: p})
+
+ ipReservations = append(ipReservations, peerWithExpiry{Expiry: expiry, Peer: p})
+ c.ips[ip.String()] = ipReservations
+
+ if asn != 0 {
+ asnReservations = append(asnReservations, peerWithExpiry{Expiry: expiry, Peer: p})
+ c.asns[asn] = asnReservations
+ }
+ return nil
+}
+
+func (c *constraints) cleanup(now time.Time) {
+ expireFunc := func(pe peerWithExpiry) bool {
+ return pe.Expiry.Before(now)
+ }
+ c.total = slices.DeleteFunc(c.total, expireFunc)
+ for k, ipReservations := range c.ips {
+ c.ips[k] = slices.DeleteFunc(ipReservations, expireFunc)
+ if len(c.ips[k]) == 0 {
+ delete(c.ips, k)
+ }
+ }
+ for k, asnReservations := range c.asns {
+ c.asns[k] = slices.DeleteFunc(asnReservations, expireFunc)
+ if len(c.asns[k]) == 0 {
+ delete(c.asns, k)
+ }
+ }
+}
+
+func (c *constraints) cleanupPeer(p peer.ID) {
+ removeFunc := func(pe peerWithExpiry) bool {
+ return pe.Peer == p
+ }
+ c.total = slices.DeleteFunc(c.total, removeFunc)
+ for k, ipReservations := range c.ips {
+ c.ips[k] = slices.DeleteFunc(ipReservations, removeFunc)
+ if len(c.ips[k]) == 0 {
+ delete(c.ips, k)
+ }
+ }
+ for k, asnReservations := range c.asns {
+ c.asns[k] = slices.DeleteFunc(asnReservations, removeFunc)
+ if len(c.asns[k]) == 0 {
+ delete(c.asns, k)
+ }
+ }
+}
diff --git a/p2p/protocol/circuitv2/relay/constraints_test.go b/p2p/protocol/circuitv2/relay/constraints_test.go
new file mode 100644
index 0000000000..bced8e4097
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/constraints_test.go
@@ -0,0 +1,146 @@
+package relay
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/test"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func randomIPv4Addr(t *testing.T) ma.Multiaddr {
+ t.Helper()
+ b := make([]byte, 4)
+ rand.Read(b)
+ addr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/1234", net.IP(b)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ return addr
+}
+
+func TestConstraints(t *testing.T) {
+ infResources := func() *Resources {
+ return &Resources{
+ MaxReservations: math.MaxInt32,
+ MaxReservationsPerPeer: math.MaxInt32,
+ MaxReservationsPerIP: math.MaxInt32,
+ MaxReservationsPerASN: math.MaxInt32,
+ }
+ }
+ const limit = 7
+ expiry := time.Now().Add(30 * time.Minute)
+
+ t.Run("total reservations", func(t *testing.T) {
+ res := infResources()
+ res.MaxReservations = limit
+ c := newConstraints(res)
+ for i := 0; i < limit; i++ {
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != errTooManyReservations {
+ t.Fatalf("expected to run into total reservation limit, got %v", err)
+ }
+ })
+
+ t.Run("updates reservations on the same peer", func(t *testing.T) {
+ p := test.RandPeerIDFatal(t)
+ p2 := test.RandPeerIDFatal(t)
+ res := infResources()
+ res.MaxReservationsPerIP = 1
+ c := newConstraints(res)
+
+ ipAddr := randomIPv4Addr(t)
+ if err := c.Reserve(p, ipAddr, expiry); err != nil {
+ t.Fatal(err)
+ }
+ if err := c.Reserve(p2, ipAddr, expiry); err != errTooManyReservationsForIP {
+ t.Fatalf("expected to run into IP reservation limit as this IP has already been reserved by a different peer, got %v", err)
+ }
+ if err := c.Reserve(p, randomIPv4Addr(t), expiry); err != nil {
+ t.Fatalf("expected to update existing reservation for peer, got %v", err)
+ }
+ if err := c.Reserve(p2, ipAddr, expiry); err != nil {
+ t.Fatalf("expected reservation for different peer to be possible, got %v", err)
+ }
+ })
+
+ t.Run("reservations per IP", func(t *testing.T) {
+ ip := randomIPv4Addr(t)
+ res := infResources()
+ res.MaxReservationsPerIP = limit
+ c := newConstraints(res)
+ for i := 0; i < limit; i++ {
+ if err := c.Reserve(test.RandPeerIDFatal(t), ip, expiry); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := c.Reserve(test.RandPeerIDFatal(t), ip, expiry); err != errTooManyReservationsForIP {
+ t.Fatalf("expected to run into total reservation limit, got %v", err)
+ }
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
+ t.Fatalf("expected reservation for different IP to be possible, got %v", err)
+ }
+ })
+
+ t.Run("reservations per ASN", func(t *testing.T) {
+ getAddr := func(t *testing.T, ip net.IP) ma.Multiaddr {
+ t.Helper()
+ addr, err := ma.NewMultiaddr(fmt.Sprintf("/ip6/%s/tcp/1234", ip))
+ if err != nil {
+ t.Fatal(err)
+ }
+ return addr
+ }
+
+ res := infResources()
+ res.MaxReservationsPerASN = limit
+ c := newConstraints(res)
+ const ipv6Prefix = "2a03:2880:f003:c07:face:b00c::"
+ for i := 0; i < limit; i++ {
+ addr := getAddr(t, net.ParseIP(fmt.Sprintf("%s%d", ipv6Prefix, i+1)))
+ if err := c.Reserve(test.RandPeerIDFatal(t), addr, expiry); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := c.Reserve(test.RandPeerIDFatal(t), getAddr(t, net.ParseIP(fmt.Sprintf("%s%d", ipv6Prefix, 42))), expiry); err != errTooManyReservationsForASN {
+ t.Fatalf("expected to run into total reservation limit, got %v", err)
+ }
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
+ t.Fatalf("expected reservation for different IP to be possible, got %v", err)
+ }
+ })
+}
+
+func TestConstraintsCleanup(t *testing.T) {
+ const limit = 7
+ validity := 500 * time.Millisecond
+ expiry := time.Now().Add(validity)
+ res := &Resources{
+ MaxReservations: limit,
+ MaxReservationsPerPeer: math.MaxInt32,
+ MaxReservationsPerIP: math.MaxInt32,
+ MaxReservationsPerASN: math.MaxInt32,
+ }
+ c := newConstraints(res)
+ for i := 0; i < limit; i++ {
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != errTooManyReservations {
+ t.Fatalf("expected to run into total reservation limit, got %v", err)
+ }
+
+ time.Sleep(validity + time.Millisecond)
+ if err := c.Reserve(test.RandPeerIDFatal(t), randomIPv4Addr(t), expiry); err != nil {
+ t.Fatalf("expected old reservations to have been garbage collected, %v", err)
+ }
+}
diff --git a/p2p/protocol/circuitv2/relay/metrics.go b/p2p/protocol/circuitv2/relay/metrics.go
new file mode 100644
index 0000000000..7786459133
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/metrics.go
@@ -0,0 +1,268 @@
+package relay
+
+import (
+ "time"
+
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_relaysvc"
+
+var (
+ status = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "status",
+ Help: "Relay Status",
+ },
+ )
+
+ reservationsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservations_total",
+ Help: "Relay Reservation Request",
+ },
+ []string{"type"},
+ )
+ reservationRequestResponseStatusTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservation_request_response_status_total",
+ Help: "Relay Reservation Request Response Status",
+ },
+ []string{"status"},
+ )
+ reservationRejectionsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "reservation_rejections_total",
+ Help: "Relay Reservation Rejected Reason",
+ },
+ []string{"reason"},
+ )
+
+ connectionsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connections_total",
+ Help: "Relay Connection Total",
+ },
+ []string{"type"},
+ )
+ connectionRequestResponseStatusTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connection_request_response_status_total",
+ Help: "Relay Connection Request Status",
+ },
+ []string{"status"},
+ )
+ connectionRejectionsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "connection_rejections_total",
+ Help: "Relay Connection Rejected Reason",
+ },
+ []string{"reason"},
+ )
+ connectionDurationSeconds = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "connection_duration_seconds",
+ Help: "Relay Connection Duration",
+ },
+ )
+
+ dataTransferredBytesTotal = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "data_transferred_bytes_total",
+ Help: "Bytes Transferred Total",
+ },
+ )
+
+ collectors = []prometheus.Collector{
+ status,
+ reservationsTotal,
+ reservationRequestResponseStatusTotal,
+ reservationRejectionsTotal,
+ connectionsTotal,
+ connectionRequestResponseStatusTotal,
+ connectionRejectionsTotal,
+ connectionDurationSeconds,
+ dataTransferredBytesTotal,
+ }
+)
+
+const (
+ requestStatusOK = "ok"
+ requestStatusRejected = "rejected"
+ requestStatusError = "error"
+)
+
+// MetricsTracer is the interface for tracking metrics for relay service
+type MetricsTracer interface {
+ // RelayStatus tracks whether the service is currently active
+ RelayStatus(enabled bool)
+
+ // ConnectionOpened tracks metrics on opening a relay connection
+ ConnectionOpened()
+ // ConnectionClosed tracks metrics on closing a relay connection
+ ConnectionClosed(d time.Duration)
+ // ConnectionRequestHandled tracks metrics on handling a relay connection request
+ ConnectionRequestHandled(status pbv2.Status)
+
+ // ReservationAllowed tracks metrics on opening or renewing a relay reservation
+ ReservationAllowed(isRenewal bool)
+ // ReservationRequestClosed tracks metrics on closing a relay reservation
+ ReservationClosed(cnt int)
+ // ReservationRequestHandled tracks metrics on handling a relay reservation request
+ ReservationRequestHandled(status pbv2.Status)
+
+ // BytesTransferred tracks the total bytes transferred by the relay service
+ BytesTransferred(cnt int)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (mt *metricsTracer) RelayStatus(enabled bool) {
+ if enabled {
+ status.Set(1)
+ } else {
+ status.Set(0)
+ }
+}
+
+func (mt *metricsTracer) ConnectionOpened() {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "opened")
+
+ connectionsTotal.WithLabelValues(*tags...).Add(1)
+}
+
+func (mt *metricsTracer) ConnectionClosed(d time.Duration) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "closed")
+
+ connectionsTotal.WithLabelValues(*tags...).Add(1)
+ connectionDurationSeconds.Observe(d.Seconds())
+}
+
+func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ respStatus := getResponseStatus(status)
+
+ *tags = append(*tags, respStatus)
+ connectionRequestResponseStatusTotal.WithLabelValues(*tags...).Add(1)
+ if respStatus == requestStatusRejected {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, getRejectionReason(status))
+ connectionRejectionsTotal.WithLabelValues(*tags...).Add(1)
+ }
+}
+
+func (mt *metricsTracer) ReservationAllowed(isRenewal bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if isRenewal {
+ *tags = append(*tags, "renewed")
+ } else {
+ *tags = append(*tags, "opened")
+ }
+
+ reservationsTotal.WithLabelValues(*tags...).Add(1)
+}
+
+func (mt *metricsTracer) ReservationClosed(cnt int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ *tags = append(*tags, "closed")
+
+ reservationsTotal.WithLabelValues(*tags...).Add(float64(cnt))
+}
+
+func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ respStatus := getResponseStatus(status)
+
+ *tags = append(*tags, respStatus)
+ reservationRequestResponseStatusTotal.WithLabelValues(*tags...).Add(1)
+ if respStatus == requestStatusRejected {
+ *tags = (*tags)[:0]
+ *tags = append(*tags, getRejectionReason(status))
+ reservationRejectionsTotal.WithLabelValues(*tags...).Add(1)
+ }
+}
+
+func (mt *metricsTracer) BytesTransferred(cnt int) {
+ dataTransferredBytesTotal.Add(float64(cnt))
+}
+
+func getResponseStatus(status pbv2.Status) string {
+ responseStatus := "unknown"
+ switch status {
+ case pbv2.Status_RESERVATION_REFUSED,
+ pbv2.Status_RESOURCE_LIMIT_EXCEEDED,
+ pbv2.Status_PERMISSION_DENIED,
+ pbv2.Status_NO_RESERVATION,
+ pbv2.Status_MALFORMED_MESSAGE:
+
+ responseStatus = requestStatusRejected
+ case pbv2.Status_UNEXPECTED_MESSAGE, pbv2.Status_CONNECTION_FAILED:
+ responseStatus = requestStatusError
+ case pbv2.Status_OK:
+ responseStatus = requestStatusOK
+ }
+ return responseStatus
+}
+
+func getRejectionReason(status pbv2.Status) string {
+ reason := "unknown"
+ switch status {
+ case pbv2.Status_RESERVATION_REFUSED:
+ reason = "ip constraint violation"
+ case pbv2.Status_RESOURCE_LIMIT_EXCEEDED:
+ reason = "resource limit exceeded"
+ case pbv2.Status_PERMISSION_DENIED:
+ reason = "permission denied"
+ case pbv2.Status_NO_RESERVATION:
+ reason = "no reservation"
+ case pbv2.Status_MALFORMED_MESSAGE:
+ reason = "malformed message"
+ }
+ return reason
+}
diff --git a/p2p/protocol/circuitv2/relay/metrics_test.go b/p2p/protocol/circuitv2/relay/metrics_test.go
new file mode 100644
index 0000000000..9af23fec75
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/metrics_test.go
@@ -0,0 +1,37 @@
+//go:build nocover
+
+package relay
+
+import (
+ "math/rand"
+ "testing"
+ "time"
+
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+)
+
+func TestNoCoverNoAlloc(t *testing.T) {
+ statuses := []pbv2.Status{
+ pbv2.Status_OK,
+ pbv2.Status_NO_RESERVATION,
+ pbv2.Status_RESOURCE_LIMIT_EXCEEDED,
+ pbv2.Status_PERMISSION_DENIED,
+ }
+ mt := NewMetricsTracer()
+ tests := map[string]func(){
+ "RelayStatus": func() { mt.RelayStatus(rand.Intn(2) == 1) },
+ "ConnectionOpened": func() { mt.ConnectionOpened() },
+ "ConnectionClosed": func() { mt.ConnectionClosed(time.Duration(rand.Intn(10)) * time.Second) },
+ "ConnectionRequestHandled": func() { mt.ConnectionRequestHandled(statuses[rand.Intn(len(statuses))]) },
+ "ReservationAllowed": func() { mt.ReservationAllowed(rand.Intn(2) == 1) },
+ "ReservationClosed": func() { mt.ReservationClosed(rand.Intn(10)) },
+ "ReservationRequestHandled": func() { mt.ReservationRequestHandled(statuses[rand.Intn(len(statuses))]) },
+ "BytesTransferred": func() { mt.BytesTransferred(rand.Intn(1000)) },
+ }
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+ if allocs > 0 {
+ t.Fatalf("Alloc Test: %s, got: %0.2f, expected: 0 allocs", method, allocs)
+ }
+ }
+}
diff --git a/p2p/protocol/circuitv2/relay/options.go b/p2p/protocol/circuitv2/relay/options.go
new file mode 100644
index 0000000000..3b50ec385f
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/options.go
@@ -0,0 +1,43 @@
+package relay
+
+type Option func(*Relay) error
+
+// WithResources is a Relay option that sets specific relay resources for the relay.
+func WithResources(rc Resources) Option {
+ return func(r *Relay) error {
+ r.rc = rc
+ return nil
+ }
+}
+
+// WithLimit is a Relay option that sets only the relayed connection limits for the relay.
+func WithLimit(limit *RelayLimit) Option {
+ return func(r *Relay) error {
+ r.rc.Limit = limit
+ return nil
+ }
+}
+
+// WithInfiniteLimits is a Relay option that disables limits.
+func WithInfiniteLimits() Option {
+ return func(r *Relay) error {
+ r.rc.Limit = nil
+ return nil
+ }
+}
+
+// WithACL is a Relay option that supplies an ACLFilter for access control.
+func WithACL(acl ACLFilter) Option {
+ return func(r *Relay) error {
+ r.acl = acl
+ return nil
+ }
+}
+
+// WithMetricsTracer is a Relay option that supplies a MetricsTracer for metrics
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(r *Relay) error {
+ r.metricsTracer = mt
+ return nil
+ }
+}
diff --git a/p2p/protocol/circuitv2/relay/relay.go b/p2p/protocol/circuitv2/relay/relay.go
new file mode 100644
index 0000000000..32fd736765
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/relay.go
@@ -0,0 +1,752 @@
+package relay
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/record"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util"
+
+ pool "github.com/libp2p/go-buffer-pool"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+const (
+ ServiceName = "libp2p.relay/v2"
+
+ ReservationTagWeight = 10
+
+ StreamTimeout = time.Minute
+ ConnectTimeout = 30 * time.Second
+ HandshakeTimeout = time.Minute
+
+ relayHopTag = "relay-v2-hop"
+ relayHopTagValue = 2
+
+ maxMessageSize = 4096
+)
+
+var log = logging.Logger("relay")
+
+// Relay is the (limited) relay service object.
+type Relay struct {
+ ctx context.Context
+ cancel func()
+
+ host host.Host
+ rc Resources
+ acl ACLFilter
+ constraints *constraints
+ scope network.ResourceScopeSpan
+ notifiee network.Notifiee
+
+ mx sync.Mutex
+ rsvp map[peer.ID]time.Time
+ conns map[peer.ID]int
+ closed bool
+
+ selfAddr ma.Multiaddr
+
+ metricsTracer MetricsTracer
+}
+
+// New constructs a new limited relay that can provide relay services in the given host.
+func New(h host.Host, opts ...Option) (*Relay, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ r := &Relay{
+ ctx: ctx,
+ cancel: cancel,
+ host: h,
+ rc: DefaultResources(),
+ acl: nil,
+ rsvp: make(map[peer.ID]time.Time),
+ conns: make(map[peer.ID]int),
+ }
+
+ for _, opt := range opts {
+ err := opt(r)
+ if err != nil {
+ return nil, fmt.Errorf("error applying relay option: %w", err)
+ }
+ }
+
+ // get a scope for memory reservations at service level
+ err := h.Network().ResourceManager().ViewService(ServiceName,
+ func(s network.ServiceScope) error {
+ var err error
+ r.scope, err = s.BeginSpan()
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r.constraints = newConstraints(&r.rc)
+ r.selfAddr = ma.StringCast(fmt.Sprintf("/p2p/%s", h.ID()))
+
+ h.SetStreamHandler(proto.ProtoIDv2Hop, r.handleStream)
+ r.notifiee = &network.NotifyBundle{DisconnectedF: r.disconnected}
+ h.Network().Notify(r.notifiee)
+
+ if r.metricsTracer != nil {
+ r.metricsTracer.RelayStatus(true)
+ }
+ go r.background()
+
+ return r, nil
+}
+
+func (r *Relay) Close() error {
+ r.mx.Lock()
+ if !r.closed {
+ r.closed = true
+ r.mx.Unlock()
+
+ r.host.RemoveStreamHandler(proto.ProtoIDv2Hop)
+ r.host.Network().StopNotify(r.notifiee)
+ defer r.scope.Done()
+ r.cancel()
+ r.gc()
+ if r.metricsTracer != nil {
+ r.metricsTracer.RelayStatus(false)
+ }
+ return nil
+ }
+ r.mx.Unlock()
+ return nil
+}
+
+func (r *Relay) handleStream(s network.Stream) {
+ log.Info("new relay stream", "remote_peer", s.Conn().RemotePeer())
+
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to relay service", "err", err)
+ s.Reset()
+ return
+ }
+
+ if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for stream", "err", err)
+ s.Reset()
+ return
+ }
+ defer s.Scope().ReleaseMemory(maxMessageSize)
+
+ rd := util.NewDelimitedReader(s, maxMessageSize)
+ defer rd.Close()
+
+ s.SetReadDeadline(time.Now().Add(StreamTimeout))
+
+ var msg pbv2.HopMessage
+
+ err := rd.ReadMsg(&msg)
+ if err != nil {
+ r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
+ return
+ }
+ // reset stream deadline as message has been read
+ s.SetReadDeadline(time.Time{})
+ switch msg.GetType() {
+ case pbv2.HopMessage_RESERVE:
+ status := r.handleReserve(s)
+ if r.metricsTracer != nil {
+ r.metricsTracer.ReservationRequestHandled(status)
+ }
+ case pbv2.HopMessage_CONNECT:
+ status := r.handleConnect(s, &msg)
+ if r.metricsTracer != nil {
+ r.metricsTracer.ConnectionRequestHandled(status)
+ }
+ default:
+ r.handleError(s, pbv2.Status_MALFORMED_MESSAGE)
+ }
+}
+
+func (r *Relay) handleReserve(s network.Stream) pbv2.Status {
+ defer s.Close()
+ p := s.Conn().RemotePeer()
+ a := s.Conn().RemoteMultiaddr()
+
+ if isRelayAddr(a) {
+ log.Debug("refusing relay reservation",
+ "remote_peer", p,
+ "reason", "reservation attempt over relay connection")
+ r.handleError(s, pbv2.Status_PERMISSION_DENIED)
+ return pbv2.Status_PERMISSION_DENIED
+ }
+
+ if r.acl != nil && !r.acl.AllowReserve(p, a) {
+ log.Debug("refusing relay reservation",
+ "remote_peer", p,
+ "reason", "permission denied")
+ r.handleError(s, pbv2.Status_PERMISSION_DENIED)
+ return pbv2.Status_PERMISSION_DENIED
+ }
+
+ r.mx.Lock()
+ // Check if relay is still active. Otherwise ConnManager.UnTagPeer will not be called if this block runs after
+ // Close() call
+ if r.closed {
+ r.mx.Unlock()
+ log.Debug("refusing relay reservation",
+ "remote_peer", p,
+ "reason", "relay closed")
+ r.handleError(s, pbv2.Status_PERMISSION_DENIED)
+ return pbv2.Status_PERMISSION_DENIED
+ }
+ now := time.Now()
+ expire := now.Add(r.rc.ReservationTTL)
+
+ _, exists := r.rsvp[p]
+ if err := r.constraints.Reserve(p, a, expire); err != nil {
+ r.mx.Unlock()
+ log.Debug("refusing relay reservation",
+ "remote_peer", p,
+ "reason", "IP constraint violation",
+ "error", err)
+ r.handleError(s, pbv2.Status_RESERVATION_REFUSED)
+ return pbv2.Status_RESERVATION_REFUSED
+ }
+
+ r.rsvp[p] = expire
+ r.host.ConnManager().TagPeer(p, "relay-reservation", ReservationTagWeight)
+ r.mx.Unlock()
+ if r.metricsTracer != nil {
+ r.metricsTracer.ReservationAllowed(exists)
+ }
+
+ log.Debug("reserving relay slot", "remote_peer", p)
+
+ // Delivery of the reservation might fail for a number of reasons.
+ // For example, the stream might be reset or the connection might be closed before the reservation is received.
+ // In that case, the reservation will just be garbage collected later.
+ rsvp := makeReservationMsg(
+ r.host.Peerstore().PrivKey(r.host.ID()),
+ r.host.ID(),
+ r.host.Addrs(),
+ p,
+ expire)
+ if err := r.writeResponse(s, pbv2.Status_OK, rsvp, r.makeLimitMsg(p)); err != nil {
+ log.Debug("error writing reservation response",
+ "remote_peer", p,
+ "reason", "retracting reservation")
+ s.Reset()
+ return pbv2.Status_CONNECTION_FAILED
+ }
+ return pbv2.Status_OK
+}
+
+func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Status {
+ src := s.Conn().RemotePeer()
+ a := s.Conn().RemoteMultiaddr()
+
+ span, err := r.scope.BeginSpan()
+ if err != nil {
+ log.Debug("failed to begin relay transaction",
+ "error", err)
+ r.handleError(s, pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
+ }
+
+ fail := func(status pbv2.Status) {
+ span.Done()
+ r.handleError(s, status)
+ }
+
+ // reserve buffers for the relay
+ if err := span.ReserveMemory(2*r.rc.BufferSize, network.ReservationPriorityHigh); err != nil {
+ log.Debug("error reserving memory for relay",
+ "error", err)
+ fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
+ }
+
+ if isRelayAddr(a) {
+ log.Debug("refusing connection",
+ "reason", "connection attempt over relay connection")
+ fail(pbv2.Status_PERMISSION_DENIED)
+ return pbv2.Status_PERMISSION_DENIED
+ }
+
+ dest, err := util.PeerToPeerInfoV2(msg.GetPeer())
+ if err != nil {
+ fail(pbv2.Status_MALFORMED_MESSAGE)
+ return pbv2.Status_MALFORMED_MESSAGE
+ }
+
+ if r.acl != nil && !r.acl.AllowConnect(src, s.Conn().RemoteMultiaddr(), dest.ID) {
+ log.Debug("refusing connection",
+ "source_peer", src,
+ "destination_peer", dest.ID,
+ "reason", "permission denied")
+ fail(pbv2.Status_PERMISSION_DENIED)
+ return pbv2.Status_PERMISSION_DENIED
+ }
+
+ r.mx.Lock()
+ _, rsvp := r.rsvp[dest.ID]
+ if !rsvp {
+ r.mx.Unlock()
+ log.Debug("refusing connection",
+ "source_peer", src,
+ "destination_peer", dest.ID,
+ "reason", "no reservation")
+ fail(pbv2.Status_NO_RESERVATION)
+ return pbv2.Status_NO_RESERVATION
+ }
+
+ srcConns := r.conns[src]
+ if srcConns >= r.rc.MaxCircuits {
+ r.mx.Unlock()
+ log.Debug("refusing connection",
+ "source_peer", src,
+ "destination_peer", dest.ID,
+ "reason", "too many connections from source")
+ fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
+ }
+
+ destConns := r.conns[dest.ID]
+ if destConns >= r.rc.MaxCircuits {
+ r.mx.Unlock()
+ log.Debug("refusing connection",
+ "source_peer", src,
+ "destination_peer", dest.ID,
+ "reason", "too many connections to destination")
+ fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
+ }
+
+ r.addConn(src)
+ r.addConn(dest.ID)
+ r.mx.Unlock()
+
+ if r.metricsTracer != nil {
+ r.metricsTracer.ConnectionOpened()
+ }
+ connStTime := time.Now()
+
+ cleanup := func() {
+ defer span.Done()
+ r.mx.Lock()
+ r.rmConn(src)
+ r.rmConn(dest.ID)
+ r.mx.Unlock()
+ if r.metricsTracer != nil {
+ r.metricsTracer.ConnectionClosed(time.Since(connStTime))
+ }
+ }
+
+ ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout)
+ defer cancel()
+
+ ctx = network.WithNoDial(ctx, "relay connect")
+
+ bs, err := r.host.NewStream(ctx, dest.ID, proto.ProtoIDv2Stop)
+ if err != nil {
+ log.Debug("error opening relay stream",
+ "destination_peer", dest.ID,
+ "err", err)
+ cleanup()
+ r.handleError(s, pbv2.Status_CONNECTION_FAILED)
+ return pbv2.Status_CONNECTION_FAILED
+ }
+
+ fail = func(status pbv2.Status) {
+ bs.Reset()
+ cleanup()
+ r.handleError(s, status)
+ }
+
+ if err := bs.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to relay service",
+ "error", err)
+ fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
+ }
+
+ // handshake
+ if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for stream",
+ "error", err)
+ fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED)
+ return pbv2.Status_RESOURCE_LIMIT_EXCEEDED
+ }
+ defer bs.Scope().ReleaseMemory(maxMessageSize)
+
+ rd := util.NewDelimitedReader(bs, maxMessageSize)
+ wr := util.NewDelimitedWriter(bs)
+ defer rd.Close()
+
+ var stopmsg pbv2.StopMessage
+ stopmsg.Type = pbv2.StopMessage_CONNECT.Enum()
+ stopmsg.Peer = util.PeerInfoToPeerV2(peer.AddrInfo{ID: src})
+ stopmsg.Limit = r.makeLimitMsg(dest.ID)
+
+ bs.SetDeadline(time.Now().Add(HandshakeTimeout))
+
+ err = wr.WriteMsg(&stopmsg)
+ if err != nil {
+ log.Debug("error writing stop handshake")
+ fail(pbv2.Status_CONNECTION_FAILED)
+ return pbv2.Status_CONNECTION_FAILED
+ }
+
+ stopmsg.Reset()
+
+ err = rd.ReadMsg(&stopmsg)
+ if err != nil {
+ log.Debug("error reading stop response",
+ "err", err)
+ fail(pbv2.Status_CONNECTION_FAILED)
+ return pbv2.Status_CONNECTION_FAILED
+ }
+
+ if t := stopmsg.GetType(); t != pbv2.StopMessage_STATUS {
+ log.Debug("unexpected stop response",
+ "message_type", t,
+ "expected", "status message")
+ fail(pbv2.Status_CONNECTION_FAILED)
+ return pbv2.Status_CONNECTION_FAILED
+ }
+
+ if status := stopmsg.GetStatus(); status != pbv2.Status_OK {
+ log.Debug("relay stop failure",
+ "status", status)
+ fail(pbv2.Status_CONNECTION_FAILED)
+ return pbv2.Status_CONNECTION_FAILED
+ }
+
+ var response pbv2.HopMessage
+ response.Type = pbv2.HopMessage_STATUS.Enum()
+ response.Status = pbv2.Status_OK.Enum()
+ response.Limit = r.makeLimitMsg(dest.ID)
+
+ wr = util.NewDelimitedWriter(s)
+ err = wr.WriteMsg(&response)
+ if err != nil {
+ log.Debug("error writing relay response",
+ "err", err)
+ bs.Reset()
+ s.Reset()
+ cleanup()
+ return pbv2.Status_CONNECTION_FAILED
+ }
+
+ // reset deadline
+ bs.SetDeadline(time.Time{})
+
+ log.Info("relaying connection",
+ "source_peer", src,
+ "destination_peer", dest.ID)
+
+ var goroutines atomic.Int32
+ goroutines.Store(2)
+
+ done := func() {
+ if goroutines.Add(-1) == 0 {
+ s.Close()
+ bs.Close()
+ cleanup()
+ }
+ }
+
+ if r.rc.Limit != nil {
+ deadline := time.Now().Add(r.rc.Limit.Duration)
+ s.SetDeadline(deadline)
+ bs.SetDeadline(deadline)
+ go r.relayLimited(s, bs, src, dest.ID, r.rc.Limit.Data, done)
+ go r.relayLimited(bs, s, dest.ID, src, r.rc.Limit.Data, done)
+ } else {
+ go r.relayUnlimited(s, bs, src, dest.ID, done)
+ go r.relayUnlimited(bs, s, dest.ID, src, done)
+ }
+
+ return pbv2.Status_OK
+}
+
+func (r *Relay) addConn(p peer.ID) {
+ conns := r.conns[p]
+ conns++
+ r.conns[p] = conns
+ if conns == 1 {
+ r.host.ConnManager().TagPeer(p, relayHopTag, relayHopTagValue)
+ }
+}
+
+func (r *Relay) rmConn(p peer.ID) {
+ conns := r.conns[p]
+ conns--
+ if conns > 0 {
+ r.conns[p] = conns
+ } else {
+ delete(r.conns, p)
+ r.host.ConnManager().UntagPeer(p, relayHopTag)
+ }
+}
+
+func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, limit int64, done func()) {
+ defer done()
+
+ buf := pool.Get(r.rc.BufferSize)
+ defer pool.Put(buf)
+
+ limitedSrc := io.LimitReader(src, limit)
+
+ count, err := r.copyWithBuffer(dest, limitedSrc, buf)
+ if err != nil {
+ log.Debug("relay copy error", "err", err)
+ // Reset both.
+ src.Reset()
+ dest.Reset()
+ } else {
+ // propagate the close
+ dest.CloseWrite()
+ if count == limit {
+ // we've reached the limit, discard further input
+ src.CloseRead()
+ }
+ }
+
+ log.Debug("relayed bytes", "count", count, "srcID", srcID, "destID", destID)
+}
+
+func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID, done func()) {
+ defer done()
+
+ buf := pool.Get(r.rc.BufferSize)
+ defer pool.Put(buf)
+
+ count, err := r.copyWithBuffer(dest, src, buf)
+ if err != nil {
+ log.Debug("relay copy error", "err", err)
+ // Reset both.
+ src.Reset()
+ dest.Reset()
+ } else {
+ // propagate the close
+ dest.CloseWrite()
+ }
+
+ log.Debug("relayed bytes", "count", count, "srcID", srcID, "destID", destID)
+}
+
+// errInvalidWrite means that a write returned an impossible count.
+// copied from io.errInvalidWrite
+var errInvalidWrite = errors.New("invalid write result")
+
+// copyWithBuffer copies from src to dst using the provided buf until either EOF is reached
+// on src or an error occurs. It reports the number of bytes transferred to metricsTracer.
+// The implementation is a modified form of io.CopyBuffer to support metrics tracking.
+func (r *Relay) copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) {
+ for {
+ nr, er := src.Read(buf)
+ if nr > 0 {
+ nw, ew := dst.Write(buf[0:nr])
+ if nw < 0 || nr < nw {
+ nw = 0
+ if ew == nil {
+ ew = errInvalidWrite
+ }
+ }
+ written += int64(nw)
+ if ew != nil {
+ err = ew
+ break
+ }
+ if nr != nw {
+ err = io.ErrShortWrite
+ break
+ }
+ if r.metricsTracer != nil {
+ r.metricsTracer.BytesTransferred(nw)
+ }
+ }
+ if er != nil {
+ if er != io.EOF {
+ err = er
+ }
+ break
+ }
+ }
+ return written, err
+}
+
+func (r *Relay) handleError(s network.Stream, status pbv2.Status) {
+ log.Debug("relay error", "status_name", pbv2.Status_name[int32(status)], "status", status)
+ err := r.writeResponse(s, status, nil, nil)
+ if err != nil {
+ s.Reset()
+ log.Debug("error writing relay response", "err", err)
+ } else {
+ s.Close()
+ }
+}
+
+func (r *Relay) writeResponse(s network.Stream, status pbv2.Status, rsvp *pbv2.Reservation, limit *pbv2.Limit) error {
+ s.SetWriteDeadline(time.Now().Add(StreamTimeout))
+ defer s.SetWriteDeadline(time.Time{})
+ wr := util.NewDelimitedWriter(s)
+
+ var msg pbv2.HopMessage
+ msg.Type = pbv2.HopMessage_STATUS.Enum()
+ msg.Status = status.Enum()
+ msg.Reservation = rsvp
+ msg.Limit = limit
+
+ return wr.WriteMsg(&msg)
+}
+
+func makeReservationMsg(
+ signingKey crypto.PrivKey,
+ selfID peer.ID,
+ selfAddrs []ma.Multiaddr,
+ p peer.ID,
+ expire time.Time,
+) *pbv2.Reservation {
+ expireUnix := uint64(expire.Unix())
+
+ rsvp := &pbv2.Reservation{Expire: &expireUnix}
+
+ selfP2PAddr, err := ma.NewComponent("p2p", selfID.String())
+ if err != nil {
+ log.Error("error creating p2p component", "err", err)
+ return rsvp
+ }
+
+ addrBytes := make([][]byte, 0, len(selfAddrs))
+ for _, addr := range selfAddrs {
+ if !manet.IsPublicAddr(addr) {
+ continue
+ }
+
+ id, _ := peer.IDFromP2PAddr(addr)
+ switch {
+ case id == "":
+ // No ID, we'll add one to the address
+ addr = addr.Encapsulate(selfP2PAddr)
+ case id == selfID:
+ // This address already has our ID in it.
+ // Do nothing
+ case id != selfID:
+ // This address has a different ID in it. Skip it.
+ log.Warn("skipping address", "addr", addr, "reason", "contains an unexpected ID")
+ continue
+ }
+ addrBytes = append(addrBytes, addr.Bytes())
+ }
+
+ rsvp.Addrs = addrBytes
+
+ voucher := &proto.ReservationVoucher{
+ Relay: selfID,
+ Peer: p,
+ Expiration: expire,
+ }
+
+ envelope, err := record.Seal(voucher, signingKey)
+ if err != nil {
+ log.Error("error sealing voucher", "peer", p, "err", err)
+ return rsvp
+ }
+
+ blob, err := envelope.Marshal()
+ if err != nil {
+ log.Error("error marshalling voucher", "peer", p, "err", err)
+ return rsvp
+ }
+
+ rsvp.Voucher = blob
+
+ return rsvp
+}
+
+func (r *Relay) makeLimitMsg(_ peer.ID) *pbv2.Limit {
+ if r.rc.Limit == nil {
+ return nil
+ }
+
+ duration := uint32(r.rc.Limit.Duration / time.Second)
+ data := uint64(r.rc.Limit.Data)
+
+ return &pbv2.Limit{
+ Duration: &duration,
+ Data: &data,
+ }
+}
+
+func (r *Relay) background() {
+ ticker := time.NewTicker(time.Minute)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ r.gc()
+ case <-r.ctx.Done():
+ return
+ }
+ }
+}
+
+func (r *Relay) gc() {
+ r.mx.Lock()
+ defer r.mx.Unlock()
+
+ now := time.Now()
+ cnt := 0
+ for p, expire := range r.rsvp {
+ if r.closed || expire.Before(now) {
+ delete(r.rsvp, p)
+ r.host.ConnManager().UntagPeer(p, "relay-reservation")
+ cnt++
+ }
+ }
+ if r.metricsTracer != nil {
+ r.metricsTracer.ReservationClosed(cnt)
+ }
+
+ for p, count := range r.conns {
+ if count == 0 {
+ delete(r.conns, p)
+ }
+ }
+}
+
+func (r *Relay) disconnected(n network.Network, c network.Conn) {
+ p := c.RemotePeer()
+ if n.Connectedness(p) == network.Connected {
+ return
+ }
+
+ r.mx.Lock()
+ _, ok := r.rsvp[p]
+ if ok {
+ delete(r.rsvp, p)
+ }
+ r.constraints.cleanupPeer(p)
+ r.mx.Unlock()
+
+ if ok && r.metricsTracer != nil {
+ r.metricsTracer.ReservationClosed(1)
+ }
+}
+
+func isRelayAddr(a ma.Multiaddr) bool {
+ _, err := a.ValueForProtocol(ma.P_CIRCUIT)
+ return err == nil
+}
diff --git a/p2p/protocol/circuitv2/relay/relay_priv_test.go b/p2p/protocol/circuitv2/relay/relay_priv_test.go
new file mode 100644
index 0000000000..637c53e9c6
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/relay_priv_test.go
@@ -0,0 +1,53 @@
+package relay
+
+import (
+ "crypto/rand"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/stretchr/testify/require"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func genKeyAndID(t *testing.T) (crypto.PrivKey, peer.ID) {
+ t.Helper()
+ key, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(key)
+ require.NoError(t, err)
+ return key, id
+}
+
+// TestMakeReservationWithP2PAddrs ensures that our reservation message builder
+// sanitizes the input addresses
+func TestMakeReservationWithP2PAddrs(t *testing.T) {
+ selfKey, selfID := genKeyAndID(t)
+ _, otherID := genKeyAndID(t)
+ _, reserverID := genKeyAndID(t)
+
+ addrs := []ma.Multiaddr{
+ ma.StringCast("/ip4/1.2.3.4/tcp/1234"), // No p2p part
+ ma.StringCast("/ip4/1.2.3.4/tcp/1235/p2p/" + selfID.String()), // Already has p2p part
+ ma.StringCast("/ip4/1.2.3.4/tcp/1236/p2p/" + otherID.String()), // Some other peer (?? Not expected, but we could get anything in this func)
+ }
+
+ rsvp := makeReservationMsg(selfKey, selfID, addrs, reserverID, time.Now().Add(time.Minute))
+ require.NotNil(t, rsvp)
+
+ expectedAddrs := []string{
+ "/ip4/1.2.3.4/tcp/1234/p2p/" + selfID.String(),
+ "/ip4/1.2.3.4/tcp/1235/p2p/" + selfID.String(),
+ }
+
+ addrsFromRsvp := make([]string, 0, len(rsvp.GetAddrs()))
+ for _, addr := range rsvp.GetAddrs() {
+ a, err := ma.NewMultiaddrBytes(addr)
+ require.NoError(t, err)
+ addrsFromRsvp = append(addrsFromRsvp, a.String())
+ }
+
+ require.Equal(t, expectedAddrs, addrsFromRsvp)
+}
diff --git a/p2p/protocol/circuitv2/relay/relay_test.go b/p2p/protocol/circuitv2/relay/relay_test.go
new file mode 100644
index 0000000000..a257205383
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/relay_test.go
@@ -0,0 +1,384 @@
+package relay_test
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/metrics"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+ bhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/stretchr/testify/require"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func getNetHosts(t *testing.T, _ context.Context, n int) (hosts []host.Host, upgraders []transport.Upgrader) {
+ for i := 0; i < n; i++ {
+ privk, pubk, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ p, err := peer.IDFromPublicKey(pubk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ps.AddPrivKey(p, privk)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bwr := metrics.NewBandwidthCounter()
+ bus := eventbus.NewBus()
+ netw, err := swarm.NewSwarm(p, ps, bus, swarm.WithMetrics(bwr))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ upgrader := swarmt.GenUpgrader(t, netw, nil)
+ upgraders = append(upgraders, upgrader)
+
+ tpt, err := tcp.NewTCPTransport(upgrader, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := netw.AddTransport(tpt); err != nil {
+ t.Fatal(err)
+ }
+
+ err = netw.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ h := bhost.NewBlankHost(netw, bhost.WithEventBus(bus))
+
+ hosts = append(hosts, h)
+ }
+
+ return hosts, upgraders
+}
+
+func connect(t *testing.T, a, b host.Host) {
+ pi := peer.AddrInfo{ID: a.ID(), Addrs: a.Addrs()}
+ err := b.Connect(context.Background(), pi)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func addTransport(t *testing.T, h host.Host, upgrader transport.Upgrader) {
+ if err := client.AddTransport(h, upgrader); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBasicRelay(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ hosts, upgraders := getNetHosts(t, ctx, 3)
+ addTransport(t, hosts[0], upgraders[0])
+ addTransport(t, hosts[2], upgraders[2])
+
+ rch := make(chan []byte, 1)
+ hosts[0].SetStreamHandler("test", func(s network.Stream) {
+ defer s.Close()
+ defer close(rch)
+
+ buf := make([]byte, 1024)
+ nread := 0
+ for nread < len(buf) {
+ n, err := s.Read(buf[nread:])
+ nread += n
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Fatal(err)
+ }
+ }
+
+ rch <- buf[:nread]
+ })
+
+ r, err := relay.New(hosts[1])
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ connect(t, hosts[0], hosts[1])
+ connect(t, hosts[1], hosts[2])
+
+ rinfo := hosts[1].Peerstore().PeerInfo(hosts[1].ID())
+ rsvp, err := client.Reserve(ctx, hosts[0], rinfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if rsvp.Voucher == nil {
+ t.Fatal("no reservation voucher")
+ }
+
+ raddr, err := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s/p2p-circuit/p2p/%s", hosts[1].ID(), hosts[0].ID()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sub, err := hosts[2].EventBus().Subscribe(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+
+ err = hosts[2].Connect(ctx, peer.AddrInfo{ID: hosts[0].ID(), Addrs: []ma.Multiaddr{raddr}})
+ if err != nil {
+ t.Fatal(err)
+ }
+ for {
+ var e interface{}
+ select {
+ case e = <-sub.Out():
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected limited connectivity event")
+ }
+ evt, ok := e.(event.EvtPeerConnectednessChanged)
+ if !ok {
+ t.Fatalf("invalid event: %s", e)
+ }
+ if evt.Peer == hosts[0].ID() {
+ if evt.Connectedness != network.Limited {
+ t.Fatalf("expected limited connectivity %s", evt.Connectedness)
+ }
+ break
+ }
+ }
+
+ conns := hosts[2].Network().ConnsToPeer(hosts[0].ID())
+ if len(conns) != 1 {
+ t.Fatalf("expected 1 connection, but got %d", len(conns))
+ }
+ if !conns[0].Stat().Limited {
+ t.Fatal("expected transient connection")
+ }
+
+ s, err := hosts[2].NewStream(network.WithAllowLimitedConn(ctx, "test"), hosts[0].ID(), "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ msg := []byte("relay works!")
+ nwritten, err := s.Write(msg)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nwritten != len(msg) {
+ t.Fatalf("expected to write %d bytes, but wrote %d instead", len(msg), nwritten)
+ }
+ s.CloseWrite()
+
+ got := <-rch
+ if !bytes.Equal(msg, got) {
+ t.Fatalf("Wrong echo; expected %s but got %s", string(msg), string(got))
+ }
+}
+
+func TestRelayLimitTime(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ hosts, upgraders := getNetHosts(t, ctx, 3)
+ addTransport(t, hosts[0], upgraders[0])
+ addTransport(t, hosts[2], upgraders[2])
+
+ rch := make(chan error, 1)
+ hosts[0].SetStreamHandler("test", func(s network.Stream) {
+ defer s.Close()
+ defer close(rch)
+
+ buf := make([]byte, 1024)
+ _, err := s.Read(buf)
+ rch <- err
+ })
+
+ rc := relay.DefaultResources()
+ rc.Limit.Duration = time.Second
+
+ r, err := relay.New(hosts[1], relay.WithResources(rc))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ connect(t, hosts[0], hosts[1])
+ connect(t, hosts[1], hosts[2])
+
+ rinfo := hosts[1].Peerstore().PeerInfo(hosts[1].ID())
+ _, err = client.Reserve(ctx, hosts[0], rinfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ raddr, err := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s/p2p-circuit/p2p/%s", hosts[1].ID(), hosts[0].ID()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = hosts[2].Connect(ctx, peer.AddrInfo{ID: hosts[0].ID(), Addrs: []ma.Multiaddr{raddr}})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conns := hosts[2].Network().ConnsToPeer(hosts[0].ID())
+ if len(conns) != 1 {
+ t.Fatalf("expected 1 connection, but got %d", len(conns))
+ }
+ if !conns[0].Stat().Limited {
+ t.Fatal("expected transient connection")
+ }
+
+ s, err := hosts[2].NewStream(network.WithAllowLimitedConn(ctx, "test"), hosts[0].ID(), "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(2 * time.Second)
+ n, err := s.Write([]byte("should be closed"))
+ if n > 0 {
+ t.Fatalf("expected to write 0 bytes, wrote %d", n)
+ }
+ if !errors.Is(err, network.ErrReset) {
+ t.Fatalf("expected reset, but got %s", err)
+ }
+
+ err = <-rch
+ if !errors.Is(err, network.ErrReset) {
+ t.Fatalf("expected reset, but got %s", err)
+ }
+}
+
+func TestRelayLimitData(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ hosts, upgraders := getNetHosts(t, ctx, 3)
+ addTransport(t, hosts[0], upgraders[0])
+ addTransport(t, hosts[2], upgraders[2])
+
+ rch := make(chan int, 1)
+ hosts[0].SetStreamHandler("test", func(s network.Stream) {
+ defer s.Close()
+ defer close(rch)
+
+ buf := make([]byte, 1024)
+ for i := 0; i < 3; i++ {
+ n, err := s.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rch <- n
+ }
+
+ n, err := s.Read(buf)
+ if !errors.Is(err, network.ErrReset) {
+ t.Fatalf("expected reset but got %s", err)
+ }
+ rch <- n
+ })
+
+ rc := relay.DefaultResources()
+ rc.Limit.Duration = time.Second
+ rc.Limit.Data = 4096
+
+ r, err := relay.New(hosts[1], relay.WithResources(rc))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer r.Close()
+
+ connect(t, hosts[0], hosts[1])
+ connect(t, hosts[1], hosts[2])
+
+ rinfo := hosts[1].Peerstore().PeerInfo(hosts[1].ID())
+ _, err = client.Reserve(ctx, hosts[0], rinfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ raddr, err := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s/p2p-circuit/p2p/%s", hosts[1].ID(), hosts[0].ID()))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = hosts[2].Connect(ctx, peer.AddrInfo{ID: hosts[0].ID(), Addrs: []ma.Multiaddr{raddr}})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conns := hosts[2].Network().ConnsToPeer(hosts[0].ID())
+ if len(conns) != 1 {
+ t.Fatalf("expected 1 connection, but got %d", len(conns))
+ }
+ if !conns[0].Stat().Limited {
+ t.Fatal("expected transient connection")
+ }
+
+ s, err := hosts[2].NewStream(network.WithAllowLimitedConn(ctx, "test"), hosts[0].ID(), "test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf := make([]byte, 1024)
+ for i := 0; i < 3; i++ {
+ if _, err := rand.Read(buf); err != nil {
+ t.Fatal(err)
+ }
+
+ n, err := s.Write(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != len(buf) {
+ t.Fatalf("expected to write %d bytes but wrote %d", len(buf), n)
+ }
+
+ n = <-rch
+ if n != len(buf) {
+ t.Fatalf("expected to read %d bytes but read %d", len(buf), n)
+ }
+ }
+
+ buf = make([]byte, 4096)
+ if _, err := rand.Read(buf); err != nil {
+ t.Fatal(err)
+ }
+
+ s.Write(buf)
+
+ n := <-rch
+ if n != 0 {
+ t.Fatalf("expected to read 0 bytes but read %d", n)
+ }
+
+}
diff --git a/p2p/protocol/circuitv2/relay/resources.go b/p2p/protocol/circuitv2/relay/resources.go
new file mode 100644
index 0000000000..dc0207bca6
--- /dev/null
+++ b/p2p/protocol/circuitv2/relay/resources.go
@@ -0,0 +1,68 @@
+package relay
+
+import (
+ "time"
+)
+
+// Resources are the resource limits associated with the relay service.
+type Resources struct {
+ // Limit is the (optional) relayed connection limits.
+ Limit *RelayLimit
+
+ // ReservationTTL is the duration of a new (or refreshed reservation).
+ // Defaults to 1hr.
+ ReservationTTL time.Duration
+
+ // MaxReservations is the maximum number of active relay slots; defaults to 128.
+ MaxReservations int
+ // MaxCircuits is the maximum number of open relay connections for each peer; defaults to 16.
+ MaxCircuits int
+ // BufferSize is the size of the relayed connection buffers; defaults to 2048.
+ BufferSize int
+
+ // MaxReservationsPerPeer is the maximum number of reservations originating from the same
+ // peer; default is 4.
+ //
+ // Deprecated: We only need 1 reservation per peer.
+ MaxReservationsPerPeer int
+ // MaxReservationsPerIP is the maximum number of reservations originating from the same
+ // IP address; default is 8.
+ MaxReservationsPerIP int
+ // MaxReservationsPerASN is the maximum number of reservations origination from the same
+ // ASN; default is 32
+ MaxReservationsPerASN int
+}
+
+// RelayLimit are the per relayed connection resource limits.
+type RelayLimit struct {
+ // Duration is the time limit before resetting a relayed connection; defaults to 2min.
+ Duration time.Duration
+ // Data is the limit of data relayed (on each direction) before resetting the connection.
+ // Defaults to 128KB
+ Data int64
+}
+
+// DefaultResources returns a Resources object with the default filled in.
+func DefaultResources() Resources {
+ return Resources{
+ Limit: DefaultLimit(),
+
+ ReservationTTL: time.Hour,
+
+ MaxReservations: 128,
+ MaxCircuits: 16,
+ BufferSize: 2048,
+
+ MaxReservationsPerPeer: 1,
+ MaxReservationsPerIP: 8,
+ MaxReservationsPerASN: 32,
+ }
+}
+
+// DefaultLimit returns a RelayLimit object with the defaults filled in.
+func DefaultLimit() *RelayLimit {
+ return &RelayLimit{
+ Duration: 2 * time.Minute,
+ Data: 1 << 17, // 128K
+ }
+}
diff --git a/p2p/protocol/circuitv2/util/io.go b/p2p/protocol/circuitv2/util/io.go
new file mode 100644
index 0000000000..21e888d9f7
--- /dev/null
+++ b/p2p/protocol/circuitv2/util/io.go
@@ -0,0 +1,66 @@
+package util
+
+import (
+ "errors"
+ "io"
+
+ pool "github.com/libp2p/go-buffer-pool"
+ "github.com/libp2p/go-msgio/pbio"
+ "github.com/multiformats/go-varint"
+ "google.golang.org/protobuf/proto"
+)
+
+type DelimitedReader struct {
+ r io.Reader
+ buf []byte
+}
+
+// The gogo protobuf NewDelimitedReader is buffered, which may eat up stream data.
+// So we need to implement a compatible delimited reader that reads unbuffered.
+// There is a slowdown from unbuffered reading: when reading the message
+// it can take multiple single byte Reads to read the length and another Read
+// to read the message payload.
+// However, this is not critical performance degradation as
+// - the reader is utilized to read one (dialer, stop) or two messages (hop) during
+// the handshake, so it's a drop in the water for the connection lifetime.
+// - messages are small (max 4k) and the length fits in a couple of bytes,
+// so overall we have at most three reads per message.
+func NewDelimitedReader(r io.Reader, maxSize int) *DelimitedReader {
+ return &DelimitedReader{r: r, buf: pool.Get(maxSize)}
+}
+
+func (d *DelimitedReader) Close() {
+ if d.buf != nil {
+ pool.Put(d.buf)
+ d.buf = nil
+ }
+}
+
+func (d *DelimitedReader) ReadByte() (byte, error) {
+ buf := d.buf[:1]
+ _, err := d.r.Read(buf)
+ return buf[0], err
+}
+
+func (d *DelimitedReader) ReadMsg(msg proto.Message) error {
+ mlen, err := varint.ReadUvarint(d)
+ if err != nil {
+ return err
+ }
+
+ if uint64(len(d.buf)) < mlen {
+ return errors.New("message too large")
+ }
+
+ buf := d.buf[:mlen]
+ _, err = io.ReadFull(d.r, buf)
+ if err != nil {
+ return err
+ }
+
+ return proto.Unmarshal(buf, msg)
+}
+
+func NewDelimitedWriter(w io.Writer) pbio.WriteCloser {
+ return pbio.NewDelimitedWriter(w)
+}
diff --git a/p2p/protocol/circuitv2/util/pbconv.go b/p2p/protocol/circuitv2/util/pbconv.go
new file mode 100644
index 0000000000..f5b72bf05b
--- /dev/null
+++ b/p2p/protocol/circuitv2/util/pbconv.go
@@ -0,0 +1,44 @@
+package util
+
+import (
+ "errors"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func PeerToPeerInfoV2(p *pbv2.Peer) (peer.AddrInfo, error) {
+ if p == nil {
+ return peer.AddrInfo{}, errors.New("nil peer")
+ }
+
+ id, err := peer.IDFromBytes(p.Id)
+ if err != nil {
+ return peer.AddrInfo{}, err
+ }
+
+ addrs := make([]ma.Multiaddr, 0, len(p.Addrs))
+
+ for _, addrBytes := range p.Addrs {
+ a, err := ma.NewMultiaddrBytes(addrBytes)
+ if err == nil {
+ addrs = append(addrs, a)
+ }
+ }
+
+ return peer.AddrInfo{ID: id, Addrs: addrs}, nil
+}
+
+func PeerInfoToPeerV2(pi peer.AddrInfo) *pbv2.Peer {
+ addrs := make([][]byte, 0, len(pi.Addrs))
+ for _, addr := range pi.Addrs {
+ addrs = append(addrs, addr.Bytes())
+ }
+
+ return &pbv2.Peer{
+ Id: []byte(pi.ID),
+ Addrs: addrs,
+ }
+}
diff --git a/p2p/protocol/holepunch/filter.go b/p2p/protocol/holepunch/filter.go
new file mode 100644
index 0000000000..5c1a4f5342
--- /dev/null
+++ b/p2p/protocol/holepunch/filter.go
@@ -0,0 +1,27 @@
+package holepunch
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+// WithAddrFilter is a Service option that enables multiaddress filtering.
+// It allows to only send a subset of observed addresses to the remote
+// peer. E.g., only announce TCP or QUIC multi addresses instead of both.
+// It also allows to only consider a subset of received multi addresses
+// that remote peers announced to us.
+// Theoretically, this API also allows to add multi addresses in both cases.
+func WithAddrFilter(f AddrFilter) Option {
+ return func(hps *Service) error {
+ hps.filter = f
+ return nil
+ }
+}
+
+// AddrFilter defines the interface for the multi address filtering.
+type AddrFilter interface {
+ // FilterLocal filters the multi addresses that are sent to the remote peer.
+ FilterLocal(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+ // FilterRemote filters the multi addresses received from the remote peer.
+ FilterRemote(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+}
diff --git a/p2p/protocol/holepunch/holepunch_test.go b/p2p/protocol/holepunch/holepunch_test.go
new file mode 100644
index 0000000000..b96c94685b
--- /dev/null
+++ b/p2p/protocol/holepunch/holepunch_test.go
@@ -0,0 +1,768 @@
+package holepunch_test
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "slices"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch"
+ holepunch_pb "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/marcopolo/simnet"
+ "go.uber.org/fx"
+
+ "github.com/libp2p/go-msgio/pbio"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type mockEventTracer struct {
+ mutex sync.Mutex
+ events []*holepunch.Event
+}
+
+func (m *mockEventTracer) Trace(evt *holepunch.Event) {
+ m.mutex.Lock()
+ m.events = append(m.events, evt)
+ m.mutex.Unlock()
+}
+
+func (m *mockEventTracer) getEvents() []*holepunch.Event {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ // copy the slice
+ return append([]*holepunch.Event{}, m.events...)
+}
+
+var _ holepunch.EventTracer = &mockEventTracer{}
+
+type mockMaddrFilter struct {
+ filterLocal func(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+ filterRemote func(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+}
+
+func (m mockMaddrFilter) FilterLocal(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr {
+ return m.filterLocal(remoteID, maddrs)
+}
+
+func (m mockMaddrFilter) FilterRemote(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr {
+ return m.filterRemote(remoteID, maddrs)
+}
+
+var _ holepunch.AddrFilter = &mockMaddrFilter{}
+
+func newIDService(t *testing.T, h host.Host) identify.IDService {
+ ids, err := identify.NewIDService(h)
+ require.NoError(t, err)
+ ids.Start()
+ t.Cleanup(func() { ids.Close() })
+ return ids
+}
+
+func TestNoHolePunchIfDirectConnExists(t *testing.T) {
+ router := &simnet.SimpleFirewallRouter{}
+ relay := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/1.2.0.1/udp/8000/quic-v1")),
+ libp2p.DisableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.WithFxOption(fx.Invoke(func(h host.Host) {
+ // Setup relay service
+ _, err := relayv2.New(h)
+ require.NoError(t, err)
+ })),
+ )
+
+ tr := &mockEventTracer{}
+ h1 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.EnableHolePunching(holepunch.DirectDialTimeout(100*time.Millisecond)),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.1/udp/8000/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+
+ h2 := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.ForceReachabilityPublic(),
+ connectToRelay(&relay),
+ libp2p.EnableHolePunching(holepunch.WithTracer(tr), holepunch.DirectDialTimeout(100*time.Millisecond)),
+ )
+
+ defer h1.Close()
+ defer h2.Close()
+ defer relay.Close()
+
+ waitForHolePunchingSvcActive(t, h1)
+ waitForHolePunchingSvcActive(t, h2)
+
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.ConnectedAddrTTL)
+ // try to hole punch without any connection and streams, if it works -> it's a direct connection
+ require.Empty(t, h1.Network().ConnsToPeer(h2.ID()))
+ pingAtoB(t, h1, h2)
+
+ nc1 := len(h1.Network().ConnsToPeer(h2.ID()))
+ require.Equal(t, nc1, 1)
+ nc2 := len(h2.Network().ConnsToPeer(h1.ID()))
+ require.Equal(t, nc2, 1)
+ assert.Never(t, func() bool {
+ return (len(h1.Network().ConnsToPeer(h2.ID())) != nc1 ||
+ len(h2.Network().ConnsToPeer(h1.ID())) != nc2 ||
+ len(tr.getEvents()) != 0)
+ }, time.Second, 100*time.Millisecond)
+}
+
+func TestDirectDialWorks(t *testing.T) {
+ router := &simnet.SimpleFirewallRouter{}
+ relay := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/1.2.0.1/udp/8000/quic-v1")),
+ libp2p.DisableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.WithFxOption(fx.Invoke(func(h host.Host) {
+ // Setup relay service
+ _, err := relayv2.New(h)
+ require.NoError(t, err)
+ })),
+ )
+
+ tr := &mockEventTracer{}
+ // h1 is public
+ h1 := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ForceReachabilityPublic(),
+ libp2p.EnableHolePunching(holepunch.DirectDialTimeout(100*time.Millisecond)),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.1/udp/8000/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+
+ h2 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ libp2p.EnableHolePunching(holepunch.WithTracer(tr), holepunch.DirectDialTimeout(100*time.Millisecond)),
+ libp2p.ForceReachabilityPrivate(),
+ )
+
+ defer h1.Close()
+ defer h2.Close()
+ defer relay.Close()
+
+ // wait for dcutr to be available
+ waitForHolePunchingSvcActive(t, h2)
+
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.ConnectedAddrTTL)
+ // try to hole punch without any connection and streams, if it works -> it's a direct connection
+ require.Empty(t, h1.Network().ConnsToPeer(h2.ID()))
+ pingAtoB(t, h1, h2)
+
+ // require.NoError(t, h1ps.DirectConnect(h2.ID()))
+ require.GreaterOrEqual(t, len(h1.Network().ConnsToPeer(h2.ID())), 1)
+ require.GreaterOrEqual(t, len(h2.Network().ConnsToPeer(h1.ID())), 1)
+ require.EventuallyWithT(t, func(collect *assert.CollectT) {
+ events := tr.getEvents()
+ fmt.Println("events:", events)
+ if !assert.Len(collect, events, 1) {
+ return
+ }
+ assert.Equal(t, holepunch.DirectDialEvtT, events[0].Type)
+ }, 2*time.Second, 100*time.Millisecond)
+}
+
+func connectToRelay(relayPtr *host.Host) libp2p.Option {
+ return func(cfg *libp2p.Config) error {
+ if relayPtr == nil {
+ return nil
+ }
+ relay := *relayPtr
+ pi := peer.AddrInfo{
+ ID: relay.ID(),
+ Addrs: relay.Addrs(),
+ }
+
+ return cfg.Apply(
+ libp2p.EnableRelay(),
+ libp2p.EnableAutoRelayWithStaticRelays([]peer.AddrInfo{pi}),
+ )
+ }
+}
+
+func learnAddrs(h1, h2 host.Host) {
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.ConnectedAddrTTL)
+ h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), peerstore.ConnectedAddrTTL)
+}
+
+func pingAtoB(t *testing.T, a, b host.Host) {
+ t.Helper()
+ p1 := ping.NewPingService(a)
+ require.NoError(t, a.Connect(context.Background(), peer.AddrInfo{
+ ID: b.ID(),
+ Addrs: b.Addrs(),
+ }))
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ res := p1.Ping(ctx, b.ID())
+ result := <-res
+ require.NoError(t, result.Error)
+}
+
+func MustNewHost(t *testing.T, opts ...libp2p.Option) host.Host {
+ t.Helper()
+ h, err := libp2p.New(opts...)
+ require.NoError(t, err)
+ return h
+}
+
+func TestEndToEndSimConnect(t *testing.T) {
+ for _, useLegacyHolePunchingBehavior := range []bool{true, false} {
+ t.Run(fmt.Sprintf("legacy=%t", useLegacyHolePunchingBehavior), func(t *testing.T) {
+ h1tr := &mockEventTracer{}
+ h2tr := &mockEventTracer{}
+
+ router := &simnet.SimpleFirewallRouter{}
+ relay := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/1.2.0.1/udp/8000/quic-v1")),
+ libp2p.DisableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.WithFxOption(fx.Invoke(func(h host.Host) {
+ // Setup relay service
+ _, err := relayv2.New(h)
+ require.NoError(t, err)
+ })),
+ )
+
+ h1 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.EnableHolePunching(holepunch.WithTracer(h1tr), holepunch.DirectDialTimeout(100*time.Millisecond), SetLegacyBehavior(useLegacyHolePunchingBehavior)),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.1/udp/8000/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.ForceReachabilityPrivate(),
+ )
+
+ h2 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ libp2p.EnableHolePunching(holepunch.WithTracer(h2tr), holepunch.DirectDialTimeout(100*time.Millisecond), SetLegacyBehavior(useLegacyHolePunchingBehavior)),
+ libp2p.ForceReachabilityPrivate(),
+ )
+
+ defer h1.Close()
+ defer h2.Close()
+ defer relay.Close()
+
+ // Wait for holepunch service to start
+ waitForHolePunchingSvcActive(t, h1)
+ waitForHolePunchingSvcActive(t, h2)
+
+ learnAddrs(h1, h2)
+ pingAtoB(t, h1, h2)
+
+ // wait till a direct connection is complete
+ ensureDirectConn(t, h1, h2)
+ // ensure no hole-punching streams are open on either side
+ ensureNoHolePunchingStream(t, h1, h2)
+ var h2Events []*holepunch.Event
+ require.Eventually(t,
+ func() bool {
+ h2Events = h2tr.getEvents()
+ return len(h2Events) == 4
+ },
+ time.Second,
+ 100*time.Millisecond,
+ )
+ require.Equal(t, holepunch.DirectDialEvtT, h2Events[0].Type)
+ require.Equal(t, holepunch.StartHolePunchEvtT, h2Events[1].Type)
+ require.Equal(t, holepunch.HolePunchAttemptEvtT, h2Events[2].Type)
+ require.Equal(t, holepunch.EndHolePunchEvtT, h2Events[3].Type)
+
+ h1Events := h1tr.getEvents()
+ // We don't really expect a hole-punched connection to be established in this test,
+ // as we probably don't get the timing right for the TCP simultaneous open.
+ // From time to time, it still happens occasionally, and then we get a EndHolePunchEvtT here.
+ if len(h1Events) != 2 && len(h1Events) != 3 {
+ t.Fatal("expected either 2 or 3 events")
+ }
+ require.Equal(t, holepunch.StartHolePunchEvtT, h1Events[0].Type)
+ require.Equal(t, holepunch.HolePunchAttemptEvtT, h1Events[1].Type)
+ if len(h1Events) == 3 {
+ require.Equal(t, holepunch.EndHolePunchEvtT, h1Events[2].Type)
+ }
+ })
+ }
+}
+
+func TestFailuresOnInitiator(t *testing.T) {
+ tcs := map[string]struct {
+ rhandler func(s network.Stream)
+ errMsg string
+ holePunchTimeout time.Duration
+ filter func(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+ }{
+ "responder does NOT send a CONNECT message": {
+ rhandler: func(s network.Stream) {
+ wr := pbio.NewDelimitedWriter(s)
+ wr.WriteMsg(&holepunch_pb.HolePunch{Type: holepunch_pb.HolePunch_SYNC.Enum()})
+ },
+ errMsg: "expect CONNECT message, got SYNC",
+ },
+ "responder does NOT support protocol": {
+ rhandler: nil,
+ },
+ "unable to READ CONNECT message from responder": {
+ rhandler: func(s network.Stream) {
+ s.Reset()
+ },
+ errMsg: "failed to read CONNECT message",
+ },
+ "responder does NOT reply within hole punch deadline": {
+ holePunchTimeout: 200 * time.Millisecond,
+ rhandler: func(_ network.Stream) { time.Sleep(5 * time.Second) },
+ errMsg: "i/o deadline reached",
+ },
+ "no addrs after filtering": {
+ errMsg: "aborting hole punch initiation as we have no public address",
+ rhandler: func(_ network.Stream) { time.Sleep(5 * time.Second) },
+ filter: func(_ peer.ID, _ []ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{}
+ },
+ },
+ }
+
+ for name, tc := range tcs {
+ t.Run(name, func(t *testing.T) {
+ if tc.holePunchTimeout != 0 {
+ cpy := holepunch.StreamTimeout
+ holepunch.StreamTimeout = tc.holePunchTimeout
+ defer func() { holepunch.StreamTimeout = cpy }()
+ }
+
+ router := &simnet.SimpleFirewallRouter{}
+ relay := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/1.2.0.1/udp/8000/quic-v1")),
+ libp2p.DisableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.WithFxOption(fx.Invoke(func(h host.Host) {
+ // Setup relay service
+ _, err := relayv2.New(h)
+ require.NoError(t, err)
+ })),
+ )
+
+ // h1 does not have a holepunching service because we'll mock the holepunching stream handler below.
+ h1 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.ForceReachabilityPrivate(),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.1/udp/8000/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ )
+
+ h2 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ )
+
+ defer h1.Close()
+ defer h2.Close()
+ defer relay.Close()
+
+ time.Sleep(100 * time.Millisecond)
+
+ tr := &mockEventTracer{}
+ opts := []holepunch.Option{holepunch.WithTracer(tr), holepunch.DirectDialTimeout(100 * time.Millisecond)}
+ if tc.filter != nil {
+ f := mockMaddrFilter{
+ filterLocal: tc.filter,
+ filterRemote: tc.filter,
+ }
+ opts = append(opts, holepunch.WithAddrFilter(f))
+ }
+
+ hps := addHolePunchService(t, h2, []ma.Multiaddr{ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1")}, opts...)
+ // We are only holepunching from h2 to h1. Remove h2's holepunching stream handler to avoid confusion.
+ h2.RemoveStreamHandler(holepunch.Protocol)
+ if tc.rhandler != nil {
+ h1.SetStreamHandler(holepunch.Protocol, tc.rhandler)
+ }
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ err := hps.DirectConnect(h1.ID())
+ require.Error(t, err)
+ if tc.errMsg != "" {
+ require.Contains(t, err.Error(), tc.errMsg)
+ }
+ })
+ }
+}
+
+func addrsToBytes(as []ma.Multiaddr) [][]byte {
+ bzs := make([][]byte, 0, len(as))
+ for _, a := range as {
+ bzs = append(bzs, a.Bytes())
+ }
+ return bzs
+}
+
+func TestFailuresOnResponder(t *testing.T) {
+ tcs := map[string]struct {
+ initiator func(s network.Stream)
+ errMsg string
+ holePunchTimeout time.Duration
+ filter func(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr
+ }{
+ "initiator does NOT send a CONNECT message": {
+ initiator: func(s network.Stream) {
+ pbio.NewDelimitedWriter(s).WriteMsg(&holepunch_pb.HolePunch{Type: holepunch_pb.HolePunch_SYNC.Enum()})
+ },
+ errMsg: "expected CONNECT message",
+ },
+ "initiator does NOT send a SYNC message after a CONNECT message": {
+ initiator: func(s network.Stream) {
+ w := pbio.NewDelimitedWriter(s)
+ w.WriteMsg(&holepunch_pb.HolePunch{
+ Type: holepunch_pb.HolePunch_CONNECT.Enum(),
+ ObsAddrs: addrsToBytes([]ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}),
+ })
+ w.WriteMsg(&holepunch_pb.HolePunch{Type: holepunch_pb.HolePunch_CONNECT.Enum()})
+ },
+ errMsg: "expected SYNC message",
+ },
+ "initiator does NOT reply within hole punch deadline": {
+ holePunchTimeout: 10 * time.Millisecond,
+ initiator: func(s network.Stream) {
+ pbio.NewDelimitedWriter(s).WriteMsg(&holepunch_pb.HolePunch{
+ Type: holepunch_pb.HolePunch_CONNECT.Enum(),
+ ObsAddrs: addrsToBytes([]ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}),
+ })
+ time.Sleep(10 * time.Second)
+ },
+ errMsg: "i/o deadline reached",
+ },
+ "initiator does NOT send any addresses in CONNECT": {
+ holePunchTimeout: 10 * time.Millisecond,
+ initiator: func(s network.Stream) {
+ pbio.NewDelimitedWriter(s).WriteMsg(&holepunch_pb.HolePunch{Type: holepunch_pb.HolePunch_CONNECT.Enum()})
+ time.Sleep(10 * time.Second)
+ },
+ errMsg: "expected CONNECT message to contain at least one address",
+ },
+ "no addrs after filtering": {
+ errMsg: "rejecting hole punch request, as we don't have any public addresses",
+ initiator: func(s network.Stream) {
+ pbio.NewDelimitedWriter(s).WriteMsg(&holepunch_pb.HolePunch{
+ Type: holepunch_pb.HolePunch_CONNECT.Enum(),
+ ObsAddrs: addrsToBytes([]ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/1234")}),
+ })
+ time.Sleep(10 * time.Second)
+ },
+ filter: func(_ peer.ID, _ []ma.Multiaddr) []ma.Multiaddr {
+ return []ma.Multiaddr{}
+ },
+ },
+ }
+
+ for name, tc := range tcs {
+ t.Run(name, func(t *testing.T) {
+ if tc.holePunchTimeout != 0 {
+ cpy := holepunch.StreamTimeout
+ holepunch.StreamTimeout = tc.holePunchTimeout
+ defer func() { holepunch.StreamTimeout = cpy }()
+ }
+ tr := &mockEventTracer{}
+
+ opts := []holepunch.Option{holepunch.WithTracer(tr), holepunch.DirectDialTimeout(100 * time.Millisecond)}
+ if tc.filter != nil {
+ f := mockMaddrFilter{
+ filterLocal: tc.filter,
+ filterRemote: tc.filter,
+ }
+ opts = append(opts, holepunch.WithAddrFilter(f))
+ }
+
+ router := &simnet.SimpleFirewallRouter{}
+ relay := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/1.2.0.1/udp/8000/quic-v1")),
+ libp2p.DisableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.WithFxOption(fx.Invoke(func(h host.Host) {
+ // Setup relay service
+ _, err := relayv2.New(h)
+ require.NoError(t, err)
+ })),
+ )
+ h1 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.EnableHolePunching(opts...),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.1/udp/8000/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ libp2p.ForceReachabilityPrivate(),
+ )
+
+ h2 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ libp2p.ForceReachabilityPrivate(),
+ )
+
+ defer h1.Close()
+ defer h2.Close()
+ defer relay.Close()
+
+ time.Sleep(100 * time.Millisecond)
+
+ require.NoError(t, h1.Connect(context.Background(), peer.AddrInfo{
+ ID: h2.ID(),
+ Addrs: h2.Addrs(),
+ }))
+ require.EventuallyWithT(t, func(c *assert.CollectT) {
+ assert.Contains(c, h1.Mux().Protocols(), holepunch.Protocol)
+ }, time.Second, 100*time.Millisecond)
+
+ s, err := h2.NewStream(network.WithAllowLimitedConn(context.Background(), "holepunch"), h1.ID(), holepunch.Protocol)
+ require.NoError(t, err)
+
+ go tc.initiator(s)
+
+ getTracerError := func(tr *mockEventTracer) []string {
+ var errs []string
+ events := tr.getEvents()
+ for _, ev := range events {
+ if errEv, ok := ev.Evt.(*holepunch.ProtocolErrorEvt); ok {
+ errs = append(errs, errEv.Error)
+ }
+ }
+ return errs
+ }
+
+ require.Eventually(t, func() bool { return len(getTracerError(tr)) > 0 }, 5*time.Second, 100*time.Millisecond)
+ errs := getTracerError(tr)
+ require.Len(t, errs, 1)
+ require.Contains(t, errs[0], tc.errMsg)
+ })
+ }
+}
+
+func ensureNoHolePunchingStream(t *testing.T, h1, h2 host.Host) {
+ require.Eventually(t, func() bool {
+ for _, c := range h1.Network().ConnsToPeer(h2.ID()) {
+ for _, s := range c.GetStreams() {
+ if s.ID() == string(holepunch.Protocol) {
+ return false
+ }
+ }
+ }
+ return true
+ }, 5*time.Second, 50*time.Millisecond)
+
+ require.Eventually(t, func() bool {
+ for _, c := range h2.Network().ConnsToPeer(h1.ID()) {
+ for _, s := range c.GetStreams() {
+ if s.ID() == string(holepunch.Protocol) {
+ return false
+ }
+ }
+ }
+ return true
+ }, 5*time.Second, 50*time.Millisecond)
+}
+
+func ensureDirectConn(t *testing.T, h1, h2 host.Host) {
+ require.Eventually(t, func() bool {
+ for _, c := range h1.Network().ConnsToPeer(h2.ID()) {
+ if _, err := c.RemoteMultiaddr().ValueForProtocol(ma.P_CIRCUIT); err != nil {
+ return true
+ }
+ }
+ return false
+ }, 5*time.Second, 50*time.Millisecond)
+
+ require.Eventually(t, func() bool {
+ for _, c := range h2.Network().ConnsToPeer(h1.ID()) {
+ if _, err := c.RemoteMultiaddr().ValueForProtocol(ma.P_CIRCUIT); err != nil {
+ return true
+ }
+ }
+ return false
+ }, 5*time.Second, 50*time.Millisecond)
+}
+
+type MockSourceIPSelector struct {
+ ip atomic.Pointer[net.IP]
+}
+
+func (m *MockSourceIPSelector) PreferredSourceIPForDestination(_ *net.UDPAddr) (net.IP, error) {
+ return *m.ip.Load(), nil
+}
+
+func quicSimnet(isPubliclyReachably bool, router *simnet.SimpleFirewallRouter) libp2p.Option {
+ m := &MockSourceIPSelector{}
+ return libp2p.QUICReuse(
+ quicreuse.NewConnManager,
+ quicreuse.OverrideSourceIPSelector(func() (quicreuse.SourceIPSelector, error) {
+ return m, nil
+ }),
+ quicreuse.OverrideListenUDP(func(_ string, address *net.UDPAddr) (net.PacketConn, error) {
+ m.ip.Store(&address.IP)
+ if isPubliclyReachably {
+ router.SetAddrPubliclyReachable(address)
+ }
+ c := simnet.NewSimConn(address, router)
+ return c, nil
+ }))
+}
+
+func addHolePunchService(t *testing.T, h host.Host, extraAddrs []ma.Multiaddr, opts ...holepunch.Option) *holepunch.Service {
+ t.Helper()
+ hps, err := holepunch.NewService(h, newIDService(t, h), func() []ma.Multiaddr {
+ addrs := h.Addrs()
+ addrs = append(addrs, extraAddrs...)
+ return addrs
+ }, opts...)
+ require.NoError(t, err)
+ return hps
+}
+
+func waitForHolePunchingSvcActive(t *testing.T, h host.Host) {
+ require.EventuallyWithT(t, func(c *assert.CollectT) {
+ assert.Contains(c, h.Mux().Protocols(), holepunch.Protocol)
+ }, time.Second, 100*time.Millisecond)
+}
+
+// setLegacyBehavior is an option that controls the isClient behavior of the hole punching service.
+// Prior to https://github.com/libp2p/go-libp2p/pull/3044, go-libp2p would
+// pick the opposite roles for client/server a hole punch. Setting this to
+// true preserves that behavior.
+//
+// Currently, only exposed for testing purposes.
+// Do not set this unless you know what you are doing.
+func SetLegacyBehavior(legacyBehavior bool) holepunch.Option {
+ return func(s *holepunch.Service) error {
+ s.SetLegacyBehavior(legacyBehavior)
+ return nil
+ }
+}
+
+// TestEndToEndSimConnectQUICReuse tests that hole punching works if we are
+// reusing the same port for QUIC and WebTransport, and when we have multiple
+// QUIC listeners on different ports.
+//
+// If this tests fails or is flaky it may be because:
+// - The quicreuse logic (and association logic) is not returning the appropriate transport for holepunching.
+// - The ordering of listeners is unexpected (remember the swarm will sort the listeners with `.ListenOrder()`).
+func TestEndToEndSimConnectQUICReuse(t *testing.T) {
+ h1tr := &mockEventTracer{}
+ h2tr := &mockEventTracer{}
+
+ router := &simnet.SimpleFirewallRouter{}
+ relay := MustNewHost(t,
+ quicSimnet(true, router),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/1.2.0.1/udp/8000/quic-v1")),
+ libp2p.DisableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.WithFxOption(fx.Invoke(func(h host.Host) {
+ // Setup relay service
+ _, err := relayv2.New(h)
+ require.NoError(t, err)
+ })),
+ )
+
+ // We return addrs of quic on port 8001 and circuit.
+ // This lets us listen on other ports for QUIC in order to confuse the quicreuse logic during hole punching.
+ onlyQuicOnPort8001AndCircuit := func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return slices.DeleteFunc(addrs, func(a ma.Multiaddr) bool {
+ _, err := a.ValueForProtocol(ma.P_CIRCUIT)
+ isCircuit := err == nil
+ if isCircuit {
+ return false
+ }
+ _, err = a.ValueForProtocol(ma.P_QUIC_V1)
+ isQuic := err == nil
+ if !isQuic {
+ return true
+ }
+ port, err := a.ValueForProtocol(ma.P_UDP)
+ if err != nil {
+ return true
+ }
+ isPort8001 := port == "8001"
+ return !isPort8001
+ })
+ }
+
+ h1 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.EnableHolePunching(holepunch.WithTracer(h1tr), holepunch.DirectDialTimeout(100*time.Millisecond)),
+ libp2p.ListenAddrs(ma.StringCast("/ip4/2.2.0.1/udp/8001/quic-v1/webtransport")),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ libp2p.AddrsFactory(onlyQuicOnPort8001AndCircuit),
+ libp2p.ForceReachabilityPrivate(),
+ )
+ // Listen on quic *after* listening on webtransport.
+ // This is to test that the quicreuse logic is not returning the wrong transport.
+ // See: https://github.com/libp2p/go-libp2p/issues/3165#issuecomment-2700126706 for details.
+ h1.Network().Listen(
+ ma.StringCast("/ip4/2.2.0.1/udp/8001/quic-v1"),
+ ma.StringCast("/ip4/2.2.0.1/udp/9001/quic-v1"),
+ )
+
+ h2 := MustNewHost(t,
+ quicSimnet(false, router),
+ libp2p.ListenAddrs(
+ ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1/webtransport"),
+ ),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ connectToRelay(&relay),
+ libp2p.EnableHolePunching(holepunch.WithTracer(h2tr), holepunch.DirectDialTimeout(100*time.Millisecond)),
+ libp2p.AddrsFactory(onlyQuicOnPort8001AndCircuit),
+ libp2p.ForceReachabilityPrivate(),
+ )
+ // Listen on quic after listening on webtransport.
+ h2.Network().Listen(
+ ma.StringCast("/ip4/2.2.0.2/udp/8001/quic-v1"),
+ ma.StringCast("/ip4/2.2.0.2/udp/9001/quic-v1"),
+ )
+
+ defer h1.Close()
+ defer h2.Close()
+ defer relay.Close()
+
+ // Wait for holepunch service to start
+ waitForHolePunchingSvcActive(t, h1)
+ waitForHolePunchingSvcActive(t, h2)
+
+ learnAddrs(h1, h2)
+ pingAtoB(t, h1, h2)
+
+ // wait till a direct connection is complete
+ ensureDirectConn(t, h1, h2)
+}
diff --git a/p2p/protocol/holepunch/holepuncher.go b/p2p/protocol/holepunch/holepuncher.go
new file mode 100644
index 0000000000..bf27d8c095
--- /dev/null
+++ b/p2p/protocol/holepunch/holepuncher.go
@@ -0,0 +1,307 @@
+package holepunch
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-msgio/pbio"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// ErrHolePunchActive is returned from DirectConnect when another hole punching attempt is currently running
+var ErrHolePunchActive = errors.New("another hole punching attempt to this peer is active")
+
+const maxRetries = 3
+
+// The holePuncher is run on the peer that's behind a NAT / Firewall.
+// It observes new incoming connections via a relay that it has a reservation with,
+// and initiates the DCUtR protocol with them.
+// It then first tries to establish a direct connection, and if that fails, it
+// initiates a hole punch.
+type holePuncher struct {
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ host host.Host
+ refCount sync.WaitGroup
+
+ ids identify.IDService
+ listenAddrs func() []ma.Multiaddr
+
+ directDialTimeout time.Duration
+
+ // active hole punches for deduplicating
+ activeMx sync.Mutex
+ active map[peer.ID]struct{}
+
+ closeMx sync.RWMutex
+ closed bool
+
+ tracer *tracer
+ filter AddrFilter
+
+ // Prior to https://github.com/libp2p/go-libp2p/pull/3044, go-libp2p would
+ // pick the opposite roles for client/server a hole punch. Setting this to
+ // true preserves that behavior
+ legacyBehavior bool
+}
+
+func newHolePuncher(h host.Host, ids identify.IDService, listenAddrs func() []ma.Multiaddr, tracer *tracer, filter AddrFilter) *holePuncher {
+ hp := &holePuncher{
+ host: h,
+ ids: ids,
+ active: make(map[peer.ID]struct{}),
+ tracer: tracer,
+ filter: filter,
+ listenAddrs: listenAddrs,
+
+ legacyBehavior: true,
+ }
+ hp.ctx, hp.ctxCancel = context.WithCancel(context.Background())
+ h.Network().Notify((*netNotifiee)(hp))
+ return hp
+}
+
+func (hp *holePuncher) beginDirectConnect(p peer.ID) error {
+ hp.closeMx.RLock()
+ defer hp.closeMx.RUnlock()
+ if hp.closed {
+ return ErrClosed
+ }
+
+ hp.activeMx.Lock()
+ defer hp.activeMx.Unlock()
+ if _, ok := hp.active[p]; ok {
+ return ErrHolePunchActive
+ }
+
+ hp.active[p] = struct{}{}
+ return nil
+}
+
+// DirectConnect attempts to make a direct connection with a remote peer.
+// It first attempts a direct dial (if we have a public address of that peer), and then
+// coordinates a hole punch over the given relay connection.
+func (hp *holePuncher) DirectConnect(p peer.ID) error {
+ log.Debug("beginDirectConnect", "source_peer", hp.host.ID(), "destination_peer", p)
+ if err := hp.beginDirectConnect(p); err != nil {
+ return err
+ }
+
+ defer func() {
+ hp.activeMx.Lock()
+ delete(hp.active, p)
+ hp.activeMx.Unlock()
+ }()
+
+ return hp.directConnect(p)
+}
+
+func (hp *holePuncher) directConnect(rp peer.ID) error {
+ // short-circuit check to see if we already have a direct connection
+ if getDirectConnection(hp.host, rp) != nil {
+ log.Debug("already connected", "source_peer", hp.host.ID(), "destination_peer", rp)
+ return nil
+ }
+
+ log.Debug("attempting direct dial", "source_peer", hp.host.ID(), "destination_peer", rp, "addrs", hp.host.Peerstore().Addrs(rp))
+ // short-circuit hole punching if a direct dial works.
+ // attempt a direct connection ONLY if we have a public address for the remote peer
+ for _, a := range hp.host.Peerstore().Addrs(rp) {
+ if !isRelayAddress(a) && manet.IsPublicAddr(a) {
+ forceDirectConnCtx := network.WithForceDirectDial(hp.ctx, "hole-punching")
+ dialCtx, cancel := context.WithTimeout(forceDirectConnCtx, hp.directDialTimeout)
+
+ tstart := time.Now()
+ // This dials *all* addresses, public and private, from the peerstore.
+ err := hp.host.Connect(dialCtx, peer.AddrInfo{ID: rp})
+ dt := time.Since(tstart)
+ cancel()
+
+ if err != nil {
+ hp.tracer.DirectDialFailed(rp, dt, err)
+ break
+ }
+ hp.tracer.DirectDialSuccessful(rp, dt)
+ log.Debug("direct connection to peer successful, no need for a hole punch", "destination_peer", rp)
+ return nil
+ }
+ }
+
+ log.Debug("got inbound proxy conn", "destination_peer", rp)
+
+ // hole punch
+ for i := 1; i <= maxRetries; i++ {
+ addrs, obsAddrs, rtt, err := hp.initiateHolePunch(rp)
+ if err != nil {
+ hp.tracer.ProtocolError(rp, err)
+ return err
+ }
+ synTime := rtt / 2
+ log.Debug("peer RTT and starting hole punch", "rtt", rtt, "syn_time", synTime)
+
+ // wait for sync to reach the other peer and then punch a hole for it in our NAT
+ // by attempting a connect to it.
+ timer := time.NewTimer(synTime)
+ select {
+ case start := <-timer.C:
+ pi := peer.AddrInfo{
+ ID: rp,
+ Addrs: addrs,
+ }
+ hp.tracer.StartHolePunch(rp, addrs, rtt)
+ hp.tracer.HolePunchAttempt(pi.ID)
+ ctx, cancel := context.WithTimeout(hp.ctx, hp.directDialTimeout)
+ isClient := true
+ if hp.legacyBehavior {
+ isClient = false
+ }
+ err := holePunchConnect(ctx, hp.host, pi, isClient)
+ cancel()
+ dt := time.Since(start)
+ hp.tracer.EndHolePunch(rp, dt, err)
+ if err == nil {
+ log.Debug("hole punching with successful", "destination_peer", rp, "duration", dt)
+ hp.tracer.HolePunchFinished("initiator", i, addrs, obsAddrs, getDirectConnection(hp.host, rp))
+ return nil
+ }
+ case <-hp.ctx.Done():
+ timer.Stop()
+ return hp.ctx.Err()
+ }
+ if i == maxRetries {
+ hp.tracer.HolePunchFinished("initiator", maxRetries, addrs, obsAddrs, nil)
+ }
+ }
+ return fmt.Errorf("all retries for hole punch with peer %s failed", rp)
+}
+
+// initiateHolePunch opens a new hole punching coordination stream,
+// exchanges the addresses and measures the RTT.
+func (hp *holePuncher) initiateHolePunch(rp peer.ID) ([]ma.Multiaddr, []ma.Multiaddr, time.Duration, error) {
+ hpCtx := network.WithAllowLimitedConn(hp.ctx, "hole-punch")
+ sCtx := network.WithNoDial(hpCtx, "hole-punch")
+
+ str, err := hp.host.NewStream(sCtx, rp, Protocol)
+ if err != nil {
+ return nil, nil, 0, fmt.Errorf("failed to open hole-punching stream: %w", err)
+ }
+ defer str.Close()
+ log.Debug("initiateHolePunch", "remote_peer", str.Conn().RemotePeer(), "remote_multiaddr", str.Conn().RemoteMultiaddr())
+
+ addr, obsAddr, rtt, err := hp.initiateHolePunchImpl(str)
+ if err != nil {
+ str.Reset()
+ return addr, obsAddr, rtt, fmt.Errorf("failed to initiateHolePunch: %w", err)
+ }
+ return addr, obsAddr, rtt, err
+}
+
+func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr, []ma.Multiaddr, time.Duration, error) {
+ if err := str.Scope().SetService(ServiceName); err != nil {
+ return nil, nil, 0, fmt.Errorf("error attaching stream to holepunch service: %s", err)
+ }
+
+ if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
+ return nil, nil, 0, fmt.Errorf("error reserving memory for stream: %s", err)
+ }
+ defer str.Scope().ReleaseMemory(maxMsgSize)
+
+ w := pbio.NewDelimitedWriter(str)
+ rd := pbio.NewDelimitedReader(str, maxMsgSize)
+
+ str.SetDeadline(time.Now().Add(StreamTimeout))
+
+ // send a CONNECT and start RTT measurement.
+ obsAddrs := removeRelayAddrs(hp.listenAddrs())
+ if hp.filter != nil {
+ obsAddrs = hp.filter.FilterLocal(str.Conn().RemotePeer(), obsAddrs)
+ }
+ if len(obsAddrs) == 0 {
+ return nil, nil, 0, errors.New("aborting hole punch initiation as we have no public address")
+ }
+ log.Debug("initiating hole punch", "observed_addrs", obsAddrs)
+
+ start := time.Now()
+ if err := w.WriteMsg(&pb.HolePunch{
+ Type: pb.HolePunch_CONNECT.Enum(),
+ ObsAddrs: addrsToBytes(obsAddrs),
+ }); err != nil {
+ str.Reset()
+ return nil, nil, 0, err
+ }
+
+ // wait for a CONNECT message from the remote peer
+ var msg pb.HolePunch
+ if err := rd.ReadMsg(&msg); err != nil {
+ return nil, nil, 0, fmt.Errorf("failed to read CONNECT message from remote peer: %w", err)
+ }
+ rtt := time.Since(start)
+ if t := msg.GetType(); t != pb.HolePunch_CONNECT {
+ return nil, nil, 0, fmt.Errorf("expect CONNECT message, got %s", t)
+ }
+
+ addrs := removeRelayAddrs(addrsFromBytes(msg.ObsAddrs))
+ if hp.filter != nil {
+ addrs = hp.filter.FilterRemote(str.Conn().RemotePeer(), addrs)
+ }
+
+ if len(addrs) == 0 {
+ return nil, nil, 0, errors.New("didn't receive any public addresses in CONNECT")
+ }
+
+ if err := w.WriteMsg(&pb.HolePunch{Type: pb.HolePunch_SYNC.Enum()}); err != nil {
+ return nil, nil, 0, fmt.Errorf("failed to send SYNC message for hole punching: %w", err)
+ }
+ return addrs, obsAddrs, rtt, nil
+}
+
+func (hp *holePuncher) Close() error {
+ hp.closeMx.Lock()
+ hp.closed = true
+ hp.closeMx.Unlock()
+ hp.ctxCancel()
+ hp.refCount.Wait()
+ return nil
+}
+
+type netNotifiee holePuncher
+
+func (nn *netNotifiee) Connected(_ network.Network, conn network.Conn) {
+ hs := (*holePuncher)(nn)
+
+ // Hole punch if it's an inbound proxy connection.
+ // If we already have a direct connection with the remote peer, this will be a no-op.
+ if conn.Stat().Direction == network.DirInbound && isRelayAddress(conn.RemoteMultiaddr()) {
+ hs.refCount.Add(1)
+ go func() {
+ defer hs.refCount.Done()
+
+ select {
+ // waiting for Identify here will allow us to access the peer's public and observed addresses
+ // that we can dial to for a hole punch.
+ case <-hs.ids.IdentifyWait(conn):
+ case <-hs.ctx.Done():
+ return
+ }
+
+ err := hs.DirectConnect(conn.RemotePeer())
+ if err != nil {
+ log.Debug("attempt to perform DirectConnect failed", "remote_peer", conn.RemotePeer(), "err", err)
+ }
+ }()
+ }
+}
+
+func (nn *netNotifiee) Disconnected(_ network.Network, _ network.Conn) {}
+func (nn *netNotifiee) Listen(_ network.Network, _ ma.Multiaddr) {}
+func (nn *netNotifiee) ListenClose(_ network.Network, _ ma.Multiaddr) {}
diff --git a/p2p/protocol/holepunch/metrics.go b/p2p/protocol/holepunch/metrics.go
new file mode 100644
index 0000000000..92ed20b14d
--- /dev/null
+++ b/p2p/protocol/holepunch/metrics.go
@@ -0,0 +1,187 @@
+package holepunch
+
+import (
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_holepunch"
+
+var (
+ directDialsTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "direct_dials_total",
+ Help: "Direct Dials Total",
+ },
+ []string{"outcome"},
+ )
+ hpAddressOutcomesTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "address_outcomes_total",
+ Help: "Hole Punch outcomes by Transport",
+ },
+ []string{"side", "num_attempts", "ipv", "transport", "outcome"},
+ )
+ hpOutcomesTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "outcomes_total",
+ Help: "Hole Punch outcomes overall",
+ },
+ []string{"side", "num_attempts", "outcome"},
+ )
+
+ collectors = []prometheus.Collector{
+ directDialsTotal,
+ hpAddressOutcomesTotal,
+ hpOutcomesTotal,
+ }
+)
+
+type MetricsTracer interface {
+ HolePunchFinished(side string, attemptNum int, theirAddrs []ma.Multiaddr, ourAddr []ma.Multiaddr, directConn network.ConnMultiaddrs)
+ DirectDialFinished(success bool)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ // initialise metrics's labels so that the first data point is handled correctly
+ for _, side := range []string{"initiator", "receiver"} {
+ for _, numAttempts := range []string{"1", "2", "3", "4"} {
+ for _, outcome := range []string{"success", "failed", "cancelled", "no_suitable_address"} {
+ for _, ipv := range []string{"ip4", "ip6"} {
+ for _, transport := range []string{"quic", "quic-v1", "tcp", "webtransport"} {
+ hpAddressOutcomesTotal.WithLabelValues(side, numAttempts, ipv, transport, outcome)
+ }
+ }
+ if outcome == "cancelled" {
+ // not a valid outcome for the overall holepunch metric
+ continue
+ }
+ hpOutcomesTotal.WithLabelValues(side, numAttempts, outcome)
+ }
+ }
+ }
+ return &metricsTracer{}
+}
+
+// HolePunchFinished tracks metrics completion of a holepunch. Metrics are tracked on
+// a holepunch attempt level and on individual addresses involved in a holepunch.
+//
+// outcome for an address is computed as:
+//
+// - success:
+// A direct connection was established with the peer using this address
+// - cancelled:
+// A direct connection was established with the peer but not using this address
+// - failed:
+// No direct connection was made to the peer and the peer reported an address
+// with the same transport as this address
+// - no_suitable_address:
+// The peer reported no address with the same transport as this address
+func (mt *metricsTracer) HolePunchFinished(side string, numAttempts int,
+ remoteAddrs []ma.Multiaddr, localAddrs []ma.Multiaddr, directConn network.ConnMultiaddrs) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, side, getNumAttemptString(numAttempts))
+ var dipv, dtransport string
+ if directConn != nil {
+ dipv = metricshelper.GetIPVersion(directConn.LocalMultiaddr())
+ dtransport = metricshelper.GetTransport(directConn.LocalMultiaddr())
+ }
+
+ matchingAddressCount := 0
+ // calculate holepunch outcome for all the addresses involved
+ for _, la := range localAddrs {
+ lipv := metricshelper.GetIPVersion(la)
+ ltransport := metricshelper.GetTransport(la)
+
+ matchingAddress := false
+ for _, ra := range remoteAddrs {
+ ripv := metricshelper.GetIPVersion(ra)
+ rtransport := metricshelper.GetTransport(ra)
+ if ripv == lipv && rtransport == ltransport {
+ // the peer reported an address with the same transport
+ matchingAddress = true
+ matchingAddressCount++
+
+ *tags = append(*tags, ripv, rtransport)
+ if directConn != nil && dipv == ripv && dtransport == rtransport {
+ // the connection was made using this address
+ *tags = append(*tags, "success")
+ } else if directConn != nil {
+ // connection was made but not using this address
+ *tags = append(*tags, "cancelled")
+ } else {
+ // no connection was made
+ *tags = append(*tags, "failed")
+ }
+ hpAddressOutcomesTotal.WithLabelValues(*tags...).Inc()
+ *tags = (*tags)[:2] // 2 because we want to keep (side, numAttempts)
+ break
+ }
+ }
+ if !matchingAddress {
+ *tags = append(*tags, lipv, ltransport, "no_suitable_address")
+ hpAddressOutcomesTotal.WithLabelValues(*tags...).Inc()
+ *tags = (*tags)[:2] // 2 because we want to keep (side, numAttempts)
+ }
+ }
+
+ outcome := "failed"
+ if directConn != nil {
+ outcome = "success"
+ } else if matchingAddressCount == 0 {
+ // there were no matching addresses, this attempt was going to fail
+ outcome = "no_suitable_address"
+ }
+
+ *tags = append(*tags, outcome)
+ hpOutcomesTotal.WithLabelValues(*tags...).Inc()
+}
+
+func getNumAttemptString(numAttempt int) string {
+ var attemptStr = [...]string{"0", "1", "2", "3", "4", "5"}
+ if numAttempt > 5 {
+ return "> 5"
+ }
+ return attemptStr[numAttempt]
+}
+
+func (mt *metricsTracer) DirectDialFinished(success bool) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+ if success {
+ *tags = append(*tags, "success")
+ } else {
+ *tags = append(*tags, "failed")
+ }
+ directDialsTotal.WithLabelValues(*tags...).Inc()
+}
diff --git a/p2p/protocol/holepunch/metrics_noalloc_test.go b/p2p/protocol/holepunch/metrics_noalloc_test.go
new file mode 100644
index 0000000000..eb04eedff8
--- /dev/null
+++ b/p2p/protocol/holepunch/metrics_noalloc_test.go
@@ -0,0 +1,49 @@
+//go:build nocover
+
+package holepunch
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestNoCoverNoAllocMetrics(t *testing.T) {
+ addrs1 := [][]ma.Multiaddr{
+ {
+ ma.StringCast("/ip4/0.0.0.0/tcp/1"),
+ ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1"),
+ },
+ nil,
+ }
+ addrs2 := [][]ma.Multiaddr{
+ {
+ ma.StringCast("/ip4/1.2.3.4/tcp/3"),
+ ma.StringCast("/ip4/1.2.3.4/udp/4/quic-v1"),
+ },
+ nil,
+ }
+ conns := []network.ConnMultiaddrs{
+ &mockConnMultiaddrs{local: addrs1[0][0], remote: addrs2[0][0]},
+ nil,
+ }
+ sides := []string{"initiator", "receiver"}
+ mt := NewMetricsTracer()
+ testcases := map[string]func(){
+ "DirectDialFinished": func() { mt.DirectDialFinished(rand.Intn(2) == 1) },
+ "HolePunchFinished": func() {
+ mt.HolePunchFinished(sides[rand.Intn(len(sides))], rand.Intn(maxRetries), addrs1[rand.Intn(len(addrs1))],
+ addrs2[rand.Intn(len(addrs2))], conns[rand.Intn(len(conns))])
+ },
+ }
+ for method, f := range testcases {
+ t.Run(method, func(t *testing.T) {
+ cnt := testing.AllocsPerRun(1000, f)
+ if cnt > 0 {
+ t.Errorf("%s Failed: expected 0 allocs got %0.2f", method, cnt)
+ }
+ })
+ }
+}
diff --git a/p2p/protocol/holepunch/metrics_test.go b/p2p/protocol/holepunch/metrics_test.go
new file mode 100644
index 0000000000..86cb59d64c
--- /dev/null
+++ b/p2p/protocol/holepunch/metrics_test.go
@@ -0,0 +1,99 @@
+package holepunch
+
+import (
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func getCounterValue(t *testing.T, counter *prometheus.CounterVec, labels ...string) int {
+ t.Helper()
+ m := &dto.Metric{}
+ if err := counter.WithLabelValues(labels...).Write(m); err != nil {
+ t.Errorf("failed to extract counter value %s", err)
+ return 0
+ }
+ return int(*m.Counter.Value)
+
+}
+
+func TestHolePunchOutcomeCounter(t *testing.T) {
+ t1 := ma.StringCast("/ip4/1.2.3.4/tcp/1")
+ t2 := ma.StringCast("/ip4/1.2.3.4/tcp/2")
+
+ q1v1 := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1")
+ q2v1 := ma.StringCast("/ip4/1.2.3.4/udp/2/quic-v1")
+
+ type testcase struct {
+ name string
+ theirAddrs []ma.Multiaddr
+ ourAddrs []ma.Multiaddr
+ conn network.ConnMultiaddrs
+ result map[[3]string]int
+ }
+ testcases := []testcase{
+ {
+ name: "connection success",
+ theirAddrs: []ma.Multiaddr{t1, q1v1},
+ ourAddrs: []ma.Multiaddr{t2, q2v1},
+ conn: &mockConnMultiaddrs{local: t1, remote: t2},
+ result: map[[3]string]int{
+ [...]string{"ip4", "tcp", "success"}: 1,
+ [...]string{"ip4", "quic-v1", "cancelled"}: 1,
+ },
+ },
+ {
+ name: "connection failed",
+ theirAddrs: []ma.Multiaddr{t1},
+ ourAddrs: []ma.Multiaddr{t2, q2v1},
+ conn: nil,
+ result: map[[3]string]int{
+ [...]string{"ip4", "tcp", "failed"}: 1,
+ [...]string{"ip4", "quic-v1", "no_suitable_address"}: 1,
+ },
+ },
+ {
+ name: "no_suitable_address",
+ theirAddrs: []ma.Multiaddr{t1, q1v1},
+ ourAddrs: []ma.Multiaddr{t2, q2v1},
+ conn: &mockConnMultiaddrs{local: q1v1, remote: q2v1},
+ result: map[[3]string]int{
+ [...]string{"ip4", "tcp", "cancelled"}: 1,
+ [...]string{"ip4", "quic-v1", "failed"}: 0,
+ [...]string{"ip4", "quic-v1", "success"}: 1,
+ [...]string{"ip4", "tcp", "success"}: 0,
+ },
+ },
+ }
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ reg := prometheus.NewRegistry()
+ hpAddressOutcomesTotal.Reset()
+ mt := NewMetricsTracer(WithRegisterer(reg))
+ for _, side := range []string{"receiver", "initiator"} {
+ mt.HolePunchFinished(side, 1, tc.theirAddrs, tc.ourAddrs, tc.conn)
+ for labels, value := range tc.result {
+ v := getCounterValue(t, hpAddressOutcomesTotal, side, "1", labels[0], labels[1], labels[2])
+ if v != value {
+ t.Errorf("Invalid metric value %s: expected: %d got: %d", labels, value, v)
+ }
+ }
+ }
+ })
+ }
+}
+
+type mockConnMultiaddrs struct {
+ local, remote ma.Multiaddr
+}
+
+func (cma *mockConnMultiaddrs) LocalMultiaddr() ma.Multiaddr {
+ return cma.local
+}
+
+func (cma *mockConnMultiaddrs) RemoteMultiaddr() ma.Multiaddr {
+ return cma.remote
+}
diff --git a/p2p/protocol/holepunch/pb/holepunch.pb.go b/p2p/protocol/holepunch/pb/holepunch.pb.go
new file mode 100644
index 0000000000..83bf57d677
--- /dev/null
+++ b/p2p/protocol/holepunch/pb/holepunch.pb.go
@@ -0,0 +1,195 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/protocol/holepunch/pb/holepunch.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type HolePunch_Type int32
+
+const (
+ HolePunch_CONNECT HolePunch_Type = 100
+ HolePunch_SYNC HolePunch_Type = 300
+)
+
+// Enum value maps for HolePunch_Type.
+var (
+ HolePunch_Type_name = map[int32]string{
+ 100: "CONNECT",
+ 300: "SYNC",
+ }
+ HolePunch_Type_value = map[string]int32{
+ "CONNECT": 100,
+ "SYNC": 300,
+ }
+)
+
+func (x HolePunch_Type) Enum() *HolePunch_Type {
+ p := new(HolePunch_Type)
+ *p = x
+ return p
+}
+
+func (x HolePunch_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HolePunch_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_protocol_holepunch_pb_holepunch_proto_enumTypes[0].Descriptor()
+}
+
+func (HolePunch_Type) Type() protoreflect.EnumType {
+ return &file_p2p_protocol_holepunch_pb_holepunch_proto_enumTypes[0]
+}
+
+func (x HolePunch_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *HolePunch_Type) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = HolePunch_Type(num)
+ return nil
+}
+
+// Deprecated: Use HolePunch_Type.Descriptor instead.
+func (HolePunch_Type) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// spec: https://github.com/libp2p/specs/blob/master/relay/DCUtR.md
+type HolePunch struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type *HolePunch_Type `protobuf:"varint,1,req,name=type,enum=holepunch.pb.HolePunch_Type" json:"type,omitempty"`
+ ObsAddrs [][]byte `protobuf:"bytes,2,rep,name=ObsAddrs" json:"ObsAddrs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HolePunch) Reset() {
+ *x = HolePunch{}
+ mi := &file_p2p_protocol_holepunch_pb_holepunch_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HolePunch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HolePunch) ProtoMessage() {}
+
+func (x *HolePunch) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_holepunch_pb_holepunch_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HolePunch.ProtoReflect.Descriptor instead.
+func (*HolePunch) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HolePunch) GetType() HolePunch_Type {
+ if x != nil && x.Type != nil {
+ return *x.Type
+ }
+ return HolePunch_CONNECT
+}
+
+func (x *HolePunch) GetObsAddrs() [][]byte {
+ if x != nil {
+ return x.ObsAddrs
+ }
+ return nil
+}
+
+var File_p2p_protocol_holepunch_pb_holepunch_proto protoreflect.FileDescriptor
+
+const file_p2p_protocol_holepunch_pb_holepunch_proto_rawDesc = "" +
+ "\n" +
+ ")p2p/protocol/holepunch/pb/holepunch.proto\x12\fholepunch.pb\"y\n" +
+ "\tHolePunch\x120\n" +
+ "\x04type\x18\x01 \x02(\x0e2\x1c.holepunch.pb.HolePunch.TypeR\x04type\x12\x1a\n" +
+ "\bObsAddrs\x18\x02 \x03(\fR\bObsAddrs\"\x1e\n" +
+ "\x04Type\x12\v\n" +
+ "\aCONNECT\x10d\x12\t\n" +
+ "\x04SYNC\x10\xac\x02B7Z5github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+
+var (
+ file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescOnce sync.Once
+ file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescData []byte
+)
+
+func file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescGZIP() []byte {
+ file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescOnce.Do(func() {
+ file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_protocol_holepunch_pb_holepunch_proto_rawDesc), len(file_p2p_protocol_holepunch_pb_holepunch_proto_rawDesc)))
+ })
+ return file_p2p_protocol_holepunch_pb_holepunch_proto_rawDescData
+}
+
+var file_p2p_protocol_holepunch_pb_holepunch_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_p2p_protocol_holepunch_pb_holepunch_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_p2p_protocol_holepunch_pb_holepunch_proto_goTypes = []any{
+ (HolePunch_Type)(0), // 0: holepunch.pb.HolePunch.Type
+ (*HolePunch)(nil), // 1: holepunch.pb.HolePunch
+}
+var file_p2p_protocol_holepunch_pb_holepunch_proto_depIdxs = []int32{
+ 0, // 0: holepunch.pb.HolePunch.type:type_name -> holepunch.pb.HolePunch.Type
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_p2p_protocol_holepunch_pb_holepunch_proto_init() }
+func file_p2p_protocol_holepunch_pb_holepunch_proto_init() {
+ if File_p2p_protocol_holepunch_pb_holepunch_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_protocol_holepunch_pb_holepunch_proto_rawDesc), len(file_p2p_protocol_holepunch_pb_holepunch_proto_rawDesc)),
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_protocol_holepunch_pb_holepunch_proto_goTypes,
+ DependencyIndexes: file_p2p_protocol_holepunch_pb_holepunch_proto_depIdxs,
+ EnumInfos: file_p2p_protocol_holepunch_pb_holepunch_proto_enumTypes,
+ MessageInfos: file_p2p_protocol_holepunch_pb_holepunch_proto_msgTypes,
+ }.Build()
+ File_p2p_protocol_holepunch_pb_holepunch_proto = out.File
+ file_p2p_protocol_holepunch_pb_holepunch_proto_goTypes = nil
+ file_p2p_protocol_holepunch_pb_holepunch_proto_depIdxs = nil
+}
diff --git a/p2p/protocol/holepunch/pb/holepunch.proto b/p2p/protocol/holepunch/pb/holepunch.proto
new file mode 100644
index 0000000000..cf697f94dd
--- /dev/null
+++ b/p2p/protocol/holepunch/pb/holepunch.proto
@@ -0,0 +1,16 @@
+syntax = "proto2";
+
+package holepunch.pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb";
+
+// spec: https://github.com/libp2p/specs/blob/master/relay/DCUtR.md
+message HolePunch {
+ enum Type {
+ CONNECT = 100;
+ SYNC = 300;
+ }
+
+ required Type type=1;
+ repeated bytes ObsAddrs = 2;
+}
diff --git a/p2p/protocol/holepunch/svc.go b/p2p/protocol/holepunch/svc.go
new file mode 100644
index 0000000000..ff0b56ca0c
--- /dev/null
+++ b/p2p/protocol/holepunch/svc.go
@@ -0,0 +1,306 @@
+package holepunch
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-msgio/pbio"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+const defaultDirectDialTimeout = 10 * time.Second
+
+// Protocol is the libp2p protocol for Hole Punching.
+const Protocol protocol.ID = "/libp2p/dcutr"
+
+var log = logging.Logger("p2p-holepunch")
+
+// StreamTimeout is the timeout for the hole punch protocol stream.
+var StreamTimeout = 1 * time.Minute
+
+const (
+ ServiceName = "libp2p.holepunch"
+
+ maxMsgSize = 4 * 1024 // 4K
+)
+
+// ErrClosed is returned when the hole punching is closed
+var ErrClosed = errors.New("hole punching service closing")
+
+type Option func(*Service) error
+
+func DirectDialTimeout(timeout time.Duration) Option {
+ return func(s *Service) error {
+ s.directDialTimeout = timeout
+ return nil
+ }
+}
+
+// The Service runs on every node that supports the DCUtR protocol.
+type Service struct {
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ host host.Host
+ // ids helps with connection reversal. We wait for identify to complete and attempt
+ // a direct connection to the peer if it's publicly reachable.
+ ids identify.IDService
+ // listenAddrs provides the addresses for the host to be used for hole punching. We use this
+ // and not host.Addrs because host.Addrs might remove public unreachable address and only advertise
+ // publicly reachable relay addresses.
+ listenAddrs func() []ma.Multiaddr
+
+ directDialTimeout time.Duration
+ holePuncherMx sync.Mutex
+ holePuncher *holePuncher
+
+ hasPublicAddrsChan chan struct{}
+
+ tracer *tracer
+ filter AddrFilter
+
+ refCount sync.WaitGroup
+
+ // Prior to https://github.com/libp2p/go-libp2p/pull/3044, go-libp2p would
+ // pick the opposite roles for client/server a hole punch. Setting this to
+ // true preserves that behavior
+ legacyBehavior bool
+}
+
+// SetLegacyBehavior is only exposed for testing purposes.
+// Do not set this unless you know what you are doing.
+func (s *Service) SetLegacyBehavior(legacyBehavior bool) {
+ s.legacyBehavior = legacyBehavior
+}
+
+// NewService creates a new service that can be used for hole punching
+// The Service runs on all hosts that support the DCUtR protocol,
+// no matter if they are behind a NAT / firewall or not.
+// The Service handles DCUtR streams (which are initiated from the node behind
+// a NAT / Firewall once we establish a connection to them through a relay.
+//
+// listenAddrs MUST only return public addresses.
+func NewService(h host.Host, ids identify.IDService, listenAddrs func() []ma.Multiaddr, opts ...Option) (*Service, error) {
+ if ids == nil {
+ return nil, errors.New("identify service can't be nil")
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ s := &Service{
+ ctx: ctx,
+ ctxCancel: cancel,
+ host: h,
+ ids: ids,
+ listenAddrs: listenAddrs,
+ hasPublicAddrsChan: make(chan struct{}),
+ directDialTimeout: defaultDirectDialTimeout,
+ legacyBehavior: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt(s); err != nil {
+ cancel()
+ return nil, err
+ }
+ }
+ s.tracer.Start()
+
+ s.refCount.Add(1)
+ go s.waitForPublicAddr()
+
+ return s, nil
+}
+
+func (s *Service) waitForPublicAddr() {
+ defer s.refCount.Done()
+
+ log.Debug("waiting until we have at least one public address", "peer", s.host.ID())
+
+ // TODO: We should have an event here that fires when identify discovers a new
+ // address.
+ // As we currently don't have an event like this, just check our observed addresses
+ // regularly (exponential backoff starting at 250 ms, capped at 5s).
+ duration := 250 * time.Millisecond
+ const maxDuration = 5 * time.Second
+ t := time.NewTimer(duration)
+ defer t.Stop()
+ for {
+ if len(s.listenAddrs()) > 0 {
+ log.Debug("Host now has a public address", "hostID", s.host.ID(), "addresses", s.host.Addrs())
+ s.host.SetStreamHandler(Protocol, s.handleNewStream)
+ break
+ }
+
+ select {
+ case <-s.ctx.Done():
+ return
+ case <-t.C:
+ duration *= 2
+ if duration > maxDuration {
+ duration = maxDuration
+ }
+ t.Reset(duration)
+ }
+ }
+
+ s.holePuncherMx.Lock()
+ if s.ctx.Err() != nil {
+ // service is closed
+ return
+ }
+ s.holePuncher = newHolePuncher(s.host, s.ids, s.listenAddrs, s.tracer, s.filter)
+ s.holePuncher.directDialTimeout = s.directDialTimeout
+ s.holePuncher.legacyBehavior = s.legacyBehavior
+ s.holePuncherMx.Unlock()
+ close(s.hasPublicAddrsChan)
+}
+
+// Close closes the Hole Punch Service.
+func (s *Service) Close() error {
+ var err error
+ s.ctxCancel()
+ s.holePuncherMx.Lock()
+ if s.holePuncher != nil {
+ err = s.holePuncher.Close()
+ }
+ s.holePuncherMx.Unlock()
+ s.tracer.Close()
+ s.host.RemoveStreamHandler(Protocol)
+ s.refCount.Wait()
+ return err
+}
+
+func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, remoteAddrs []ma.Multiaddr, ownAddrs []ma.Multiaddr, err error) {
+ // sanity check: a hole punch request should only come from peers behind a relay
+ if !isRelayAddress(str.Conn().RemoteMultiaddr()) {
+ return 0, nil, nil, fmt.Errorf("received hole punch stream: %s", str.Conn().RemoteMultiaddr())
+ }
+ ownAddrs = s.listenAddrs()
+ if s.filter != nil {
+ ownAddrs = s.filter.FilterLocal(str.Conn().RemotePeer(), ownAddrs)
+ }
+
+ // If we can't tell the peer where to dial us, there's no point in starting the hole punching.
+ if len(ownAddrs) == 0 {
+ return 0, nil, nil, errors.New("rejecting hole punch request, as we don't have any public addresses")
+ }
+
+ if err := str.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for stream", "err", err)
+ return 0, nil, nil, err
+ }
+ defer str.Scope().ReleaseMemory(maxMsgSize)
+
+ wr := pbio.NewDelimitedWriter(str)
+ rd := pbio.NewDelimitedReader(str, maxMsgSize)
+
+ // Read Connect message
+ msg := new(pb.HolePunch)
+
+ str.SetDeadline(time.Now().Add(StreamTimeout))
+
+ if err := rd.ReadMsg(msg); err != nil {
+ return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err)
+ }
+ if t := msg.GetType(); t != pb.HolePunch_CONNECT {
+ return 0, nil, nil, fmt.Errorf("expected CONNECT message from initiator but got %d", t)
+ }
+
+ obsDial := removeRelayAddrs(addrsFromBytes(msg.ObsAddrs))
+ if s.filter != nil {
+ obsDial = s.filter.FilterRemote(str.Conn().RemotePeer(), obsDial)
+ }
+
+ log.Debug("received hole punch request", "peer", str.Conn().RemotePeer(), "addrs", obsDial)
+ if len(obsDial) == 0 {
+ return 0, nil, nil, errors.New("expected CONNECT message to contain at least one address")
+ }
+
+ // Write CONNECT message
+ msg.Reset()
+ msg.Type = pb.HolePunch_CONNECT.Enum()
+ msg.ObsAddrs = addrsToBytes(ownAddrs)
+ tstart := time.Now()
+ if err := wr.WriteMsg(msg); err != nil {
+ return 0, nil, nil, fmt.Errorf("failed to write CONNECT message to initiator: %w", err)
+ }
+
+ // Read SYNC message
+ msg.Reset()
+ if err := rd.ReadMsg(msg); err != nil {
+ return 0, nil, nil, fmt.Errorf("failed to read message from initiator: %w", err)
+ }
+ if t := msg.GetType(); t != pb.HolePunch_SYNC {
+ return 0, nil, nil, fmt.Errorf("expected SYNC message from initiator but got %d", t)
+ }
+ return time.Since(tstart), obsDial, ownAddrs, nil
+}
+
+func (s *Service) handleNewStream(str network.Stream) {
+ // Check directionality of the underlying connection.
+ // Peer A receives an inbound connection from peer B.
+ // Peer A opens a new hole punch stream to peer B.
+ // Peer B receives this stream, calling this function.
+ // Peer B sees the underlying connection as an outbound connection.
+ if str.Conn().Stat().Direction == network.DirInbound {
+ str.Reset()
+ return
+ }
+
+ if err := str.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to holepunch service", "err", err)
+ str.Reset()
+ return
+ }
+
+ rp := str.Conn().RemotePeer()
+ rtt, addrs, ownAddrs, err := s.incomingHolePunch(str)
+ if err != nil {
+ s.tracer.ProtocolError(rp, err)
+ log.Debug("error handling holepunching stream", "peer", rp, "err", err)
+ str.Reset()
+ return
+ }
+ str.Close()
+
+ // Hole punch now by forcing a connect
+ pi := peer.AddrInfo{
+ ID: rp,
+ Addrs: addrs,
+ }
+ s.tracer.StartHolePunch(rp, addrs, rtt)
+ log.Debug("starting hole punch", "peer", rp)
+ start := time.Now()
+ s.tracer.HolePunchAttempt(pi.ID)
+ ctx, cancel := context.WithTimeout(s.ctx, s.directDialTimeout)
+ isClient := false
+ if s.legacyBehavior {
+ isClient = true
+ }
+ err = holePunchConnect(ctx, s.host, pi, isClient)
+ cancel()
+ dt := time.Since(start)
+ s.tracer.EndHolePunch(rp, dt, err)
+ s.tracer.HolePunchFinished("receiver", 1, addrs, ownAddrs, getDirectConnection(s.host, rp))
+}
+
+// DirectConnect is only exposed for testing purposes.
+// TODO: find a solution for this.
+func (s *Service) DirectConnect(p peer.ID) error {
+ <-s.hasPublicAddrsChan
+ s.holePuncherMx.Lock()
+ holePuncher := s.holePuncher
+ s.holePuncherMx.Unlock()
+ return holePuncher.DirectConnect(p)
+}
diff --git a/p2p/protocol/holepunch/tracer.go b/p2p/protocol/holepunch/tracer.go
new file mode 100644
index 0000000000..3ba06f653d
--- /dev/null
+++ b/p2p/protocol/holepunch/tracer.go
@@ -0,0 +1,290 @@
+package holepunch
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+const (
+ tracerGCInterval = 2 * time.Minute
+ tracerCacheDuration = 5 * time.Minute
+)
+
+// WithTracer enables holepunch tracing with EventTracer et
+func WithTracer(et EventTracer) Option {
+ return func(hps *Service) error {
+ hps.tracer = &tracer{
+ et: et,
+ mt: nil,
+ self: hps.host.ID(),
+ peers: make(map[peer.ID]peerInfo),
+ }
+ return nil
+ }
+}
+
+// WithMetricsTracer enables holepunch Tracing with MetricsTracer mt
+func WithMetricsTracer(mt MetricsTracer) Option {
+ return func(hps *Service) error {
+ hps.tracer = &tracer{
+ et: nil,
+ mt: mt,
+ self: hps.host.ID(),
+ peers: make(map[peer.ID]peerInfo),
+ }
+ return nil
+ }
+}
+
+// WithMetricsAndEventTracer enables holepunch tracking with MetricsTracer and EventTracer
+func WithMetricsAndEventTracer(mt MetricsTracer, et EventTracer) Option {
+ return func(hps *Service) error {
+ hps.tracer = &tracer{
+ et: et,
+ mt: mt,
+ self: hps.host.ID(),
+ peers: make(map[peer.ID]peerInfo),
+ }
+ return nil
+ }
+}
+
+type tracer struct {
+ et EventTracer
+ mt MetricsTracer
+ self peer.ID
+
+ refCount sync.WaitGroup
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ mutex sync.Mutex
+ peers map[peer.ID]peerInfo
+}
+
+type peerInfo struct {
+ counter int
+ last time.Time
+}
+
+type EventTracer interface {
+ Trace(evt *Event)
+}
+
+type Event struct {
+ Timestamp int64 // UNIX nanos
+ Peer peer.ID // local peer ID
+ Remote peer.ID // remote peer ID
+ Type string // event type
+ Evt interface{} // the actual event
+}
+
+// Event Types
+const (
+ DirectDialEvtT = "DirectDial"
+ ProtocolErrorEvtT = "ProtocolError"
+ StartHolePunchEvtT = "StartHolePunch"
+ EndHolePunchEvtT = "EndHolePunch"
+ HolePunchAttemptEvtT = "HolePunchAttempt"
+)
+
+// Event Objects
+type DirectDialEvt struct {
+ Success bool
+ EllapsedTime time.Duration
+ Error string `json:",omitempty"`
+}
+
+type ProtocolErrorEvt struct {
+ Error string
+}
+
+type StartHolePunchEvt struct {
+ RemoteAddrs []string
+ RTT time.Duration
+}
+
+type EndHolePunchEvt struct {
+ Success bool
+ EllapsedTime time.Duration
+ Error string `json:",omitempty"`
+}
+
+type HolePunchAttemptEvt struct {
+ Attempt int
+}
+
+// tracer interface
+func (t *tracer) DirectDialSuccessful(p peer.ID, dt time.Duration) {
+ if t == nil {
+ return
+ }
+
+ if t.et != nil {
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: DirectDialEvtT,
+ Evt: &DirectDialEvt{
+ Success: true,
+ EllapsedTime: dt,
+ },
+ })
+ }
+
+ if t.mt != nil {
+ t.mt.DirectDialFinished(true)
+ }
+}
+
+func (t *tracer) DirectDialFailed(p peer.ID, dt time.Duration, err error) {
+ if t == nil {
+ return
+ }
+
+ if t.et != nil {
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: DirectDialEvtT,
+ Evt: &DirectDialEvt{
+ Success: false,
+ EllapsedTime: dt,
+ Error: err.Error(),
+ },
+ })
+ }
+
+ if t.mt != nil {
+ t.mt.DirectDialFinished(false)
+ }
+}
+
+func (t *tracer) ProtocolError(p peer.ID, err error) {
+ if t != nil && t.et != nil {
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: ProtocolErrorEvtT,
+ Evt: &ProtocolErrorEvt{
+ Error: err.Error(),
+ },
+ })
+ }
+}
+
+func (t *tracer) StartHolePunch(p peer.ID, obsAddrs []ma.Multiaddr, rtt time.Duration) {
+ if t != nil && t.et != nil {
+ addrs := make([]string, 0, len(obsAddrs))
+ for _, a := range obsAddrs {
+ addrs = append(addrs, a.String())
+ }
+
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: StartHolePunchEvtT,
+ Evt: &StartHolePunchEvt{
+ RemoteAddrs: addrs,
+ RTT: rtt,
+ },
+ })
+ }
+}
+
+func (t *tracer) EndHolePunch(p peer.ID, dt time.Duration, err error) {
+ if t != nil && t.et != nil {
+ evt := &EndHolePunchEvt{
+ Success: err == nil,
+ EllapsedTime: dt,
+ }
+ if err != nil {
+ evt.Error = err.Error()
+ }
+
+ t.et.Trace(&Event{
+ Timestamp: time.Now().UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: EndHolePunchEvtT,
+ Evt: evt,
+ })
+ }
+}
+
+func (t *tracer) HolePunchFinished(side string, numAttempts int, theirAddrs []ma.Multiaddr, ourAddrs []ma.Multiaddr, directConn network.Conn) {
+ if t != nil && t.mt != nil {
+ t.mt.HolePunchFinished(side, numAttempts, theirAddrs, ourAddrs, directConn)
+ }
+}
+
+func (t *tracer) HolePunchAttempt(p peer.ID) {
+ if t != nil && t.et != nil {
+ now := time.Now()
+ t.mutex.Lock()
+ attempt := t.peers[p]
+ attempt.counter++
+ counter := attempt.counter
+ attempt.last = now
+ t.peers[p] = attempt
+ t.mutex.Unlock()
+
+ t.et.Trace(&Event{
+ Timestamp: now.UnixNano(),
+ Peer: t.self,
+ Remote: p,
+ Type: HolePunchAttemptEvtT,
+ Evt: &HolePunchAttemptEvt{Attempt: counter},
+ })
+ }
+}
+
+// gc cleans up the peers map. This is only run when tracer is initialised with a non nil
+// EventTracer
+func (t *tracer) gc() {
+ defer t.refCount.Done()
+ timer := time.NewTicker(tracerGCInterval)
+ defer timer.Stop()
+
+ for {
+ select {
+ case <-timer.C:
+ now := time.Now()
+ t.mutex.Lock()
+ for id, entry := range t.peers {
+ if entry.last.Before(now.Add(-tracerCacheDuration)) {
+ delete(t.peers, id)
+ }
+ }
+ t.mutex.Unlock()
+ case <-t.ctx.Done():
+ return
+ }
+ }
+}
+
+func (t *tracer) Start() {
+ if t != nil && t.et != nil {
+ t.ctx, t.ctxCancel = context.WithCancel(context.Background())
+ t.refCount.Add(1)
+ go t.gc()
+ }
+}
+
+func (t *tracer) Close() error {
+ if t != nil && t.et != nil {
+ t.ctxCancel()
+ t.refCount.Wait()
+ }
+ return nil
+}
diff --git a/p2p/protocol/holepunch/util.go b/p2p/protocol/holepunch/util.go
new file mode 100644
index 0000000000..2990cbb6f3
--- /dev/null
+++ b/p2p/protocol/holepunch/util.go
@@ -0,0 +1,62 @@
+package holepunch
+
+import (
+ "context"
+ "slices"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func removeRelayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return slices.DeleteFunc(addrs, isRelayAddress)
+}
+
+func isRelayAddress(a ma.Multiaddr) bool {
+ _, err := a.ValueForProtocol(ma.P_CIRCUIT)
+ return err == nil
+}
+
+func addrsToBytes(as []ma.Multiaddr) [][]byte {
+ bzs := make([][]byte, 0, len(as))
+ for _, a := range as {
+ bzs = append(bzs, a.Bytes())
+ }
+ return bzs
+}
+
+func addrsFromBytes(bzs [][]byte) []ma.Multiaddr {
+ addrs := make([]ma.Multiaddr, 0, len(bzs))
+ for _, bz := range bzs {
+ a, err := ma.NewMultiaddrBytes(bz)
+ if err == nil {
+ addrs = append(addrs, a)
+ }
+ }
+ return addrs
+}
+
+func getDirectConnection(h host.Host, p peer.ID) network.Conn {
+ for _, c := range h.Network().ConnsToPeer(p) {
+ if !isRelayAddress(c.RemoteMultiaddr()) {
+ return c
+ }
+ }
+ return nil
+}
+
+func holePunchConnect(ctx context.Context, host host.Host, pi peer.AddrInfo, isClient bool) error {
+ holePunchCtx := network.WithSimultaneousConnect(ctx, isClient, "hole-punching")
+ forceDirectConnCtx := network.WithForceDirectDial(holePunchCtx, "hole-punching")
+
+ log.Debug("holepunchConnect", "source_peer", host.ID(), "destination_peer", pi.ID, "addrs", pi.Addrs)
+ if err := host.Connect(forceDirectConnCtx, pi); err != nil {
+ log.Debug("hole punch attempt with peer failed", "destination_peer", pi.ID, "err", err)
+ return err
+ }
+ log.Debug("hole punch successful", "destination_peer", pi.ID)
+ return nil
+}
diff --git a/p2p/protocol/identify/id.go b/p2p/protocol/identify/id.go
index 66930a3b46..ca56c364f9 100644
--- a/p2p/protocol/identify/id.go
+++ b/p2p/protocol/identify/id.go
@@ -1,208 +1,752 @@
package identify
import (
+ "bytes"
"context"
- "strings"
+ "errors"
+ "fmt"
+ "io"
+ "net/netip"
+ "slices"
"sync"
-
- pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
-
- semver "github.com/coreos/go-semver/semver"
- ggio "github.com/gogo/protobuf/io"
- logging "github.com/ipfs/go-log"
- ic "github.com/libp2p/go-libp2p-crypto"
- host "github.com/libp2p/go-libp2p-host"
- lgbl "github.com/libp2p/go-libp2p-loggables"
- metrics "github.com/libp2p/go-libp2p-metrics"
- mstream "github.com/libp2p/go-libp2p-metrics/stream"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ useragent "github.com/libp2p/go-libp2p/p2p/protocol/identify/internal/user-agent"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
+ "github.com/libp2p/go-libp2p/x/rate"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/libp2p/go-msgio/pbio"
ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
msmux "github.com/multiformats/go-multistream"
+ "google.golang.org/protobuf/proto"
)
var log = logging.Logger("net/identify")
-// ID is the protocol.ID of the Identify Service.
-const ID = "/ipfs/id/1.0.0"
+const (
+ // ID is the protocol.ID of version 1.0.0 of the identify service.
+ ID = "/ipfs/id/1.0.0"
+ // IDPush is the protocol.ID of the Identify push protocol.
+ // It sends full identify messages containing the current state of the peer.
+ IDPush = "/ipfs/id/push/1.0.0"
+ // DefaultTimeout for all id interactions, incoming / outgoing, id / id-push.
+ DefaultTimeout = 5 * time.Second
+ // ServiceName is the default identify service name
+ ServiceName = "libp2p.identify"
+
+ legacyIDSize = 2 * 1024
+ signedIDSize = 8 * 1024
+ maxOwnIdentifyMsgSize = 4 * 1024 // smaller than what we accept. This is 4k to be compatible with rust-libp2p
+ maxMessages = 10
+ maxPushConcurrency = 32
+ // number of addresses to keep for peers we have disconnected from for peerstore.RecentlyConnectedTTL time
+ // This number can be small as we already filter peer addresses based on whether the peer is connected to us over
+ // localhost, private IP or public IP address
+ recentlyConnectedPeerMaxAddrs = 20
+ connectedPeerMaxAddrs = 500
+)
+
+var (
+ defaultNetworkPrefixRateLimits = []rate.PrefixLimit{
+ {Prefix: netip.MustParsePrefix("127.0.0.0/8"), Limit: rate.Limit{}}, // inf
+ {Prefix: netip.MustParsePrefix("::1/128"), Limit: rate.Limit{}}, // inf
+ }
+ defaultGlobalRateLimit = rate.Limit{RPS: 2000, Burst: 3000}
+ defaultIPv4SubnetRateLimits = []rate.SubnetLimit{
+ {PrefixLength: 24, Limit: rate.Limit{RPS: 0.2, Burst: 10}}, // 1 every 5 seconds
+ }
+ defaultIPv6SubnetRateLimits = []rate.SubnetLimit{
+ {PrefixLength: 56, Limit: rate.Limit{RPS: 0.2, Burst: 10}}, // 1 every 5 seconds
+ {PrefixLength: 48, Limit: rate.Limit{RPS: 0.5, Burst: 20}}, // 1 every 2 seconds
+ }
+)
+
+type identifySnapshot struct {
+ seq uint64
+ protocols []protocol.ID
+ addrs []ma.Multiaddr
+ record *record.Envelope
+}
+
+// Equal says if two snapshots are identical.
+// It does NOT compare the sequence number.
+func (s identifySnapshot) Equal(other *identifySnapshot) bool {
+ hasRecord := s.record != nil
+ otherHasRecord := other.record != nil
+ if hasRecord != otherHasRecord {
+ return false
+ }
+ if hasRecord && !s.record.Equal(other.record) {
+ return false
+ }
+ if !slices.Equal(s.protocols, other.protocols) {
+ return false
+ }
+ if len(s.addrs) != len(other.addrs) {
+ return false
+ }
+ for i, a := range s.addrs {
+ if !a.Equal(other.addrs[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type IDService interface {
+ // IdentifyConn synchronously triggers an identify request on the connection and
+ // waits for it to complete. If the connection is being identified by another
+ // caller, this call will wait. If the connection has already been identified,
+ // it will return immediately.
+ IdentifyConn(network.Conn)
+ // IdentifyWait triggers an identify (if the connection has not already been
+ // identified) and returns a channel that is closed when the identify protocol
+ // completes.
+ IdentifyWait(network.Conn) <-chan struct{}
+ Start()
+ io.Closer
+}
+
+type identifyPushSupport uint8
+
+const (
+ identifyPushSupportUnknown identifyPushSupport = iota
+ identifyPushSupported
+ identifyPushUnsupported
+)
-// LibP2PVersion holds the current protocol version for a client running this code
-// TODO(jbenet): fix the versioning mess.
-const LibP2PVersion = "ipfs/0.1.0"
+type entry struct {
+ // The IdentifyWaitChan is created when IdentifyWait is called for the first time.
+ // IdentifyWait closes this channel when the Identify request completes, or when it fails.
+ IdentifyWaitChan chan struct{}
-var ClientVersion = "go-libp2p/3.3.4"
+ // PushSupport saves our knowledge about the peer's support of the Identify Push protocol.
+ // Before the identify request returns, we don't know yet if the peer supports Identify Push.
+ PushSupport identifyPushSupport
+ // Sequence is the sequence number of the last snapshot we sent to this peer.
+ Sequence uint64
+}
-// IDService is a structure that implements ProtocolIdentify.
+// idService is a structure that implements ProtocolIdentify.
// It is a trivial service that gives the other peer some
// useful information about the local peer. A sort of hello.
//
-// The IDService sends:
-// * Our IPFS Protocol Version
-// * Our IPFS Agent Version
-// * Our public Listen Addresses
-type IDService struct {
- Host host.Host
-
- Reporter metrics.Reporter
- // connections undergoing identification
- // for wait purposes
- currid map[inet.Conn]chan struct{}
- currmu sync.RWMutex
+// The idService sends:
+// - Our libp2p Protocol Version
+// - Our libp2p Agent Version
+// - Our public Listen Addresses
+type idService struct {
+ Host host.Host
+ UserAgent string
+ ProtocolVersion string
+
+ metricsTracer MetricsTracer
+
+ setupCompleted chan struct{} // is closed when Start has finished setting up
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ // track resources that need to be shut down before we shut down
+ refCount sync.WaitGroup
+
+ disableSignedPeerRecord bool
+ timeout time.Duration
+
+ connsMu sync.RWMutex
+ // The conns map contains all connections we're currently handling.
+ // Connections are inserted as soon as they're available in the swarm
+ // Connections are removed from the map when the connection disconnects.
+ conns map[network.Conn]entry
addrMu sync.Mutex
- // our own observed addresses.
- // TODO: instead of expiring, remove these when we disconnect
- observedAddrs ObservedAddrSet
+ emitters struct {
+ evtPeerProtocolsUpdated event.Emitter
+ evtPeerIdentificationCompleted event.Emitter
+ evtPeerIdentificationFailed event.Emitter
+ }
+
+ currentSnapshot struct {
+ sync.Mutex
+ snapshot identifySnapshot
+ }
+
+ rateLimiter *rate.Limiter
}
-// NewIDService constructs a new *IDService and activates it by
+// NewIDService constructs a new *idService and activates it by
// attaching its stream handler to the given host.Host.
-func NewIDService(h host.Host) *IDService {
- s := &IDService{
- Host: h,
- currid: make(map[inet.Conn]chan struct{}),
+func NewIDService(h host.Host, opts ...Option) (*idService, error) {
+ cfg := config{
+ timeout: DefaultTimeout,
}
- h.SetStreamHandler(ID, s.RequestHandler)
- h.Network().Notify((*netNotifiee)(s))
- return s
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+
+ userAgent := useragent.DefaultUserAgent()
+ if cfg.userAgent != "" {
+ userAgent = cfg.userAgent
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ s := &idService{
+ Host: h,
+ UserAgent: userAgent,
+ ProtocolVersion: cfg.protocolVersion,
+ ctx: ctx,
+ ctxCancel: cancel,
+ conns: make(map[network.Conn]entry),
+ disableSignedPeerRecord: cfg.disableSignedPeerRecord,
+ setupCompleted: make(chan struct{}),
+ metricsTracer: cfg.metricsTracer,
+ timeout: cfg.timeout,
+ rateLimiter: &rate.Limiter{
+ GlobalLimit: defaultGlobalRateLimit,
+ NetworkPrefixLimits: defaultNetworkPrefixRateLimits,
+ SubnetRateLimiter: rate.SubnetLimiter{
+ IPv4SubnetLimits: defaultIPv4SubnetRateLimits,
+ IPv6SubnetLimits: defaultIPv6SubnetRateLimits,
+ GracePeriod: 1 * time.Minute,
+ },
+ },
+ }
+
+ var err error
+ s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{})
+ if err != nil {
+ log.Warn("identify service not emitting peer protocol updates", "err", err)
+ }
+ s.emitters.evtPeerIdentificationCompleted, err = h.EventBus().Emitter(&event.EvtPeerIdentificationCompleted{})
+ if err != nil {
+ log.Warn("identify service not emitting identification completed events", "err", err)
+ }
+ s.emitters.evtPeerIdentificationFailed, err = h.EventBus().Emitter(&event.EvtPeerIdentificationFailed{})
+ if err != nil {
+ log.Warn("identify service not emitting identification failed events", "err", err)
+ }
+ return s, nil
}
-// OwnObservedAddrs returns the addresses peers have reported we've dialed from
-func (ids *IDService) OwnObservedAddrs() []ma.Multiaddr {
- return ids.observedAddrs.Addrs()
+func (ids *idService) Start() {
+ ids.Host.Network().Notify((*netNotifiee)(ids))
+ ids.Host.SetStreamHandler(ID, ids.handleIdentifyRequest)
+ ids.Host.SetStreamHandler(IDPush, ids.rateLimiter.Limit(ids.handlePush))
+ ids.updateSnapshot()
+ close(ids.setupCompleted)
+
+ ids.refCount.Add(1)
+ go ids.loop(ids.ctx)
}
-func (ids *IDService) IdentifyConn(c inet.Conn) {
- ids.currmu.Lock()
- if wait, found := ids.currid[c]; found {
- ids.currmu.Unlock()
- log.Debugf("IdentifyConn called twice on: %s", c)
- <-wait // already identifying it. wait for it.
+func (ids *idService) loop(ctx context.Context) {
+ defer ids.refCount.Done()
+
+ sub, err := ids.Host.EventBus().Subscribe(
+ []any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}},
+ eventbus.BufSize(256),
+ eventbus.Name("identify (loop)"),
+ )
+ if err != nil {
+ log.Error("failed to subscribe to events on the bus", "err", err)
return
}
- ch := make(chan struct{})
- ids.currid[c] = ch
- ids.currmu.Unlock()
+ defer sub.Close()
+
+ // Send pushes from a separate Go routine.
+ // That way, we can end up with
+ // * this Go routine busy looping over all peers in sendPushes
+ // * another push being queued in the triggerPush channel
+ triggerPush := make(chan struct{}, 1)
+ ids.refCount.Add(1)
+ go func() {
+ defer ids.refCount.Done()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-triggerPush:
+ ids.sendPushes(ctx)
+ }
+ }
+ }()
+
+ for {
+ select {
+ case e, ok := <-sub.Out():
+ if !ok {
+ return
+ }
+ if updated := ids.updateSnapshot(); !updated {
+ continue
+ }
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.TriggeredPushes(e)
+ }
+ select {
+ case triggerPush <- struct{}{}:
+ default: // we already have one more push queued, no need to queue another one
+ }
+ case <-ctx.Done():
+ return
+ }
+ }
+}
+
+func (ids *idService) sendPushes(ctx context.Context) {
+ ids.connsMu.RLock()
+ conns := make([]network.Conn, 0, len(ids.conns))
+ for c, e := range ids.conns {
+ // Push even if we don't know if push is supported.
+ // This will be only the case while the IdentifyWaitChan call is in flight.
+ if e.PushSupport == identifyPushSupported || e.PushSupport == identifyPushSupportUnknown {
+ conns = append(conns, c)
+ }
+ }
+ ids.connsMu.RUnlock()
+
+ sem := make(chan struct{}, maxPushConcurrency)
+ var wg sync.WaitGroup
+ for _, c := range conns {
+ // check if the connection is still alive
+ ids.connsMu.RLock()
+ e, ok := ids.conns[c]
+ ids.connsMu.RUnlock()
+ if !ok {
+ continue
+ }
+ // check if we already sent the current snapshot to this peer
+ ids.currentSnapshot.Lock()
+ snapshot := ids.currentSnapshot.snapshot
+ ids.currentSnapshot.Unlock()
+ if e.Sequence >= snapshot.seq {
+ log.Debug("already sent this snapshot to peer", "peer", c.RemotePeer(), "seq", snapshot.seq)
+ continue
+ }
+ // we haven't, send it now
+ sem <- struct{}{}
+ wg.Add(1)
+ go func(c network.Conn) {
+ defer wg.Done()
+ defer func() { <-sem }()
+ ctx, cancel := context.WithTimeout(ctx, ids.timeout)
+ defer cancel()
+
+ str, err := newStreamAndNegotiate(ctx, c, IDPush, ids.timeout)
+ if err != nil { // connection might have been closed recently
+ return
+ }
+ // TODO: find out if the peer supports push if we didn't have any information about push support
+ if err := ids.sendIdentifyResp(str, true); err != nil {
+ log.Debug("failed to send identify push", "peer", c.RemotePeer(), "error", err)
+ return
+ }
+ }(c)
+ }
+ wg.Wait()
+}
+
+// Close shuts down the idService
+func (ids *idService) Close() error {
+ ids.ctxCancel()
+ ids.refCount.Wait()
+ return nil
+}
+
+// IdentifyConn runs the Identify protocol on a connection.
+// It returns when we've received the peer's Identify message (or the request fails).
+// If successful, the peer store will contain the peer's addresses and supported protocols.
+func (ids *idService) IdentifyConn(c network.Conn) {
+ <-ids.IdentifyWait(c)
+}
+
+// IdentifyWait runs the Identify protocol on a connection.
+// It doesn't block and returns a channel that is closed when we receive
+// the peer's Identify message (or the request fails).
+// If successful, the peer store will contain the peer's addresses and supported protocols.
+func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} {
+ ids.connsMu.Lock()
+ defer ids.connsMu.Unlock()
+
+ e, found := ids.conns[c]
+ if !found {
+ // No entry found. We may have gotten an out of order notification. Check it we should have this conn (because we're still connected)
+ // We hold the ids.connsMu lock so this is safe since a disconnect event will be processed later if we are connected.
+ if c.IsClosed() {
+ log.Debug("connection not found in identify service", "peer", c.RemotePeer())
+ ch := make(chan struct{})
+ close(ch)
+ return ch
+ } else {
+ ids.addConnWithLock(c)
+ }
+ }
+
+ if e.IdentifyWaitChan != nil {
+ return e.IdentifyWaitChan
+ }
+ // First call to IdentifyWait for this connection. Create the channel.
+ e.IdentifyWaitChan = make(chan struct{})
+ ids.conns[c] = e
+
+ // Spawn an identify. The connection may actually be closed
+ // already, but that doesn't really matter. We'll fail to open a
+ // stream then forget the connection.
+ go func() {
+ defer close(e.IdentifyWaitChan)
+ if err := ids.identifyConn(c); err != nil {
+ log.Warn("failed to identify peer", "peer", c.RemotePeer(), "error", err)
+ ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err})
+ return
+ }
+ }()
- defer close(ch)
+ return e.IdentifyWaitChan
+}
- s, err := c.NewStream()
+// newStreamAndNegotiate opens a new stream on the given connection and negotiates the given protocol.
+func newStreamAndNegotiate(ctx context.Context, c network.Conn, proto protocol.ID, timeout time.Duration) (network.Stream, error) {
+ s, err := c.NewStream(network.WithAllowLimitedConn(ctx, "identify"))
if err != nil {
- log.Debugf("error opening initial stream for %s: %s", ID, err)
- log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer())
- c.Close()
- return
+ log.Debug("error opening identify stream", "peer", c.RemotePeer(), "error", err)
+ return nil, fmt.Errorf("failed to open new stream: %w", err)
}
- defer s.Close()
- s.SetProtocol(ID)
+ // Ignore the error. Consistent with our previous behavior. (See https://github.com/libp2p/go-libp2p/issues/3109)
+ _ = s.SetDeadline(time.Now().Add(timeout))
- if ids.Reporter != nil {
- s = mstream.WrapStream(s, ids.Reporter)
+ if err := s.SetProtocol(proto); err != nil {
+ log.Warn("error setting identify protocol for stream", "err", err)
+ _ = s.Reset()
+ return nil, fmt.Errorf("failed to set protocol: %w", err)
}
// ok give the response to our handler.
- if err := msmux.SelectProtoOrFail(ID, s); err != nil {
- log.Event(context.TODO(), "IdentifyOpenFailed", c.RemotePeer(), logging.Metadata{"error": err})
- return
+ if err := msmux.SelectProtoOrFail(proto, s); err != nil {
+ log.Info("failed negotiate identify protocol with peer", "peer", c.RemotePeer(), "error", err)
+ _ = s.Reset()
+ return nil, fmt.Errorf("multistream mux select protocol failed: %w", err)
}
+ return s, nil
+}
- ids.ResponseHandler(s)
+func (ids *idService) identifyConn(c network.Conn) error {
+ ctx, cancel := context.WithTimeout(context.Background(), ids.timeout)
+ defer cancel()
+ s, err := newStreamAndNegotiate(network.WithAllowLimitedConn(ctx, "identify"), c, ID, ids.timeout)
+ if err != nil {
+ log.Debug("error opening identify stream", "peer", c.RemotePeer(), "error", err)
+ return err
+ }
- ids.currmu.Lock()
- _, found := ids.currid[c]
- delete(ids.currid, c)
- ids.currmu.Unlock()
+ return ids.handleIdentifyResponse(s, false)
+}
- if !found {
- log.Errorf("IdentifyConn failed to find channel (programmer error) for %s", c)
- return
+// handlePush handles incoming identify push streams
+func (ids *idService) handlePush(s network.Stream) {
+ s.SetDeadline(time.Now().Add(ids.timeout))
+ if err := ids.handleIdentifyResponse(s, true); err != nil {
+ log.Debug("failed to handle identify push", "err", err)
}
}
-func (ids *IDService) RequestHandler(s inet.Stream) {
+func (ids *idService) handleIdentifyRequest(s network.Stream) {
+ _ = ids.sendIdentifyResp(s, false)
+}
+
+func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error {
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ s.Reset()
+ return fmt.Errorf("failed to attaching stream to identify service: %w", err)
+ }
defer s.Close()
- c := s.Conn()
- if ids.Reporter != nil {
- s = mstream.WrapStream(s, ids.Reporter)
+ ids.currentSnapshot.Lock()
+ snapshot := ids.currentSnapshot.snapshot
+ ids.currentSnapshot.Unlock()
+
+ log.Debug("sending snapshot", "seq", snapshot.seq, "protocols", snapshot.protocols, "addrs", snapshot.addrs)
+
+ mes := ids.createBaseIdentifyResponse(s.Conn(), &snapshot)
+ mes.SignedPeerRecord = ids.getSignedRecord(&snapshot)
+
+ log.Debug("sending identify message", "id", ID, "remote_peer", s.Conn().RemotePeer(), "remote_multiaddr", s.Conn().RemoteMultiaddr())
+ if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil {
+ return err
}
- w := ggio.NewDelimitedWriter(s)
- mes := pb.Identify{}
- ids.populateMessage(&mes, s.Conn())
- w.WriteMsg(&mes)
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.IdentifySent(isPush, len(mes.Protocols), len(mes.ListenAddrs))
+ }
- log.Debugf("%s sent message to %s %s", ID,
- c.RemotePeer(), c.RemoteMultiaddr())
+ ids.connsMu.Lock()
+ defer ids.connsMu.Unlock()
+ e, ok := ids.conns[s.Conn()]
+ // The connection might already have been closed.
+ // We *should* receive the Connected notification from the swarm before we're able to accept the peer's
+ // Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here.
+ // The only consequence would be that we send a spurious Push to that peer later.
+ if !ok {
+ return nil
+ }
+ e.Sequence = snapshot.seq
+ ids.conns[s.Conn()] = e
+ return nil
}
-func (ids *IDService) ResponseHandler(s inet.Stream) {
- defer s.Close()
+func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) error {
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Warn("error attaching stream to identify service", "err", err)
+ s.Reset()
+ return err
+ }
+
+ if err := s.Scope().ReserveMemory(signedIDSize, network.ReservationPriorityAlways); err != nil {
+ log.Warn("error reserving memory for identify stream", "err", err)
+ s.Reset()
+ return err
+ }
+ defer s.Scope().ReleaseMemory(signedIDSize)
+
c := s.Conn()
- r := ggio.NewDelimitedReader(s, 2048)
- mes := pb.Identify{}
- if err := r.ReadMsg(&mes); err != nil {
- log.Warning("error reading identify message: ", err)
- return
+ r := pbio.NewDelimitedReader(s, signedIDSize)
+ mes := &pb.Identify{}
+
+ if err := readAllIDMessages(r, mes); err != nil {
+ log.Warn("error reading identify message", "err", err)
+ s.Reset()
+ return err
+ }
+
+ defer s.Close()
+
+ log.Debug("received identify message", "protocol", s.Protocol(), "remote_peer", c.RemotePeer(), "remote_multiaddr", c.RemoteMultiaddr())
+
+ ids.consumeMessage(mes, c, isPush)
+
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.IdentifyReceived(isPush, len(mes.Protocols), len(mes.ListenAddrs))
+ }
+
+ ids.connsMu.Lock()
+ defer ids.connsMu.Unlock()
+ e, ok := ids.conns[c]
+ if !ok { // might already have disconnected
+ return nil
+ }
+ sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush)
+ if supportsIdentifyPush := err == nil && len(sup) > 0; supportsIdentifyPush {
+ e.PushSupport = identifyPushSupported
+ } else {
+ e.PushSupport = identifyPushUnsupported
+ }
+
+ if ids.metricsTracer != nil {
+ ids.metricsTracer.ConnPushSupport(e.PushSupport)
}
- ids.consumeMessage(&mes, c)
- log.Debugf("%s received message from %s %s", ID,
- c.RemotePeer(), c.RemoteMultiaddr())
+ ids.conns[c] = e
+ return nil
}
-func (ids *IDService) populateMessage(mes *pb.Identify, c inet.Conn) {
+func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error {
+ mes := &pb.Identify{}
+ for i := 0; i < maxMessages; i++ {
+ switch err := r.ReadMsg(mes); err {
+ case io.EOF:
+ return nil
+ case nil:
+ proto.Merge(finalMsg, mes)
+ default:
+ return err
+ }
+ }
+
+ return fmt.Errorf("too many parts")
+}
- // set protocols this node is currently handling
+func (ids *idService) updateSnapshot() (updated bool) {
protos := ids.Host.Mux().Protocols()
- mes.Protocols = make([]string, len(protos))
- for i, p := range protos {
- mes.Protocols[i] = string(p)
+ slices.Sort(protos)
+
+ addrs := ids.Host.Addrs()
+ slices.SortFunc(addrs, func(a, b ma.Multiaddr) int { return bytes.Compare(a.Bytes(), b.Bytes()) })
+
+ usedSpace := len(ids.ProtocolVersion) + len(ids.UserAgent)
+ for i := 0; i < len(protos); i++ {
+ usedSpace += len(protos[i])
}
+ addrs = trimHostAddrList(addrs, maxOwnIdentifyMsgSize-usedSpace-256) // 256 bytes of buffer
- // observed address so other side is informed of their
- // "public" address, at least in relation to us.
- mes.ObservedAddr = c.RemoteMultiaddr().Bytes()
+ snapshot := identifySnapshot{
+ addrs: addrs,
+ protocols: protos,
+ }
+
+ if !ids.disableSignedPeerRecord {
+ if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok {
+ snapshot.record = cab.GetPeerRecord(ids.Host.ID())
+ }
+ }
+
+ ids.currentSnapshot.Lock()
+ defer ids.currentSnapshot.Unlock()
+
+ if ids.currentSnapshot.snapshot.Equal(&snapshot) {
+ return false
+ }
- // set listen addrs, get our latest addrs from Host.
- laddrs := ids.Host.Addrs()
- mes.ListenAddrs = make([][]byte, len(laddrs))
- for i, addr := range laddrs {
- mes.ListenAddrs[i] = addr.Bytes()
+ snapshot.seq = ids.currentSnapshot.snapshot.seq + 1
+ ids.currentSnapshot.snapshot = snapshot
+
+ log.Debug("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs)
+ return true
+}
+
+func (ids *idService) writeChunkedIdentifyMsg(s network.Stream, mes *pb.Identify) error {
+ writer := pbio.NewDelimitedWriter(s)
+
+ if mes.SignedPeerRecord == nil || proto.Size(mes) <= legacyIDSize {
+ return writer.WriteMsg(mes)
}
- log.Debugf("%s sent listen addrs to %s: %s", c.LocalPeer(), c.RemotePeer(), laddrs)
+ sr := mes.SignedPeerRecord
+ mes.SignedPeerRecord = nil
+ if err := writer.WriteMsg(mes); err != nil {
+ return err
+ }
+ // then write just the signed record
+ return writer.WriteMsg(&pb.Identify{SignedPeerRecord: sr})
+}
+
+func (ids *idService) createBaseIdentifyResponse(conn network.Conn, snapshot *identifySnapshot) *pb.Identify {
+ mes := &pb.Identify{}
+
+ remoteAddr := conn.RemoteMultiaddr()
+ localAddr := conn.LocalMultiaddr()
+
+ // set protocols this node is currently handling
+ mes.Protocols = protocol.ConvertToStrings(snapshot.protocols)
+
+ // observed address so other side is informed of their
+ // "public" address, at least in relation to us.
+ mes.ObservedAddr = remoteAddr.Bytes()
+
+ // populate unsigned addresses.
+ // peers that do not yet support signed addresses will need this.
+ // Note: LocalMultiaddr is sometimes 0.0.0.0
+ viaLoopback := manet.IsIPLoopback(localAddr) || manet.IsIPLoopback(remoteAddr)
+ mes.ListenAddrs = make([][]byte, 0, len(snapshot.addrs))
+ for _, addr := range snapshot.addrs {
+ if !viaLoopback && manet.IsIPLoopback(addr) {
+ continue
+ }
+ mes.ListenAddrs = append(mes.ListenAddrs, addr.Bytes())
+ }
// set our public key
ownKey := ids.Host.Peerstore().PubKey(ids.Host.ID())
+
+ // check if we even have a public key.
if ownKey == nil {
- log.Errorf("did not have own public key in Peerstore")
+ // public key is nil. We are either using insecure transport or something erratic happened.
+ // check if we're even operating in "secure mode"
+ if ids.Host.Peerstore().PrivKey(ids.Host.ID()) != nil {
+ // private key is present. But NO public key. Something bad happened.
+ log.Error("did not have own public key in Peerstore")
+ }
+ // if neither of the key is present it is safe to assume that we are using an insecure transport.
} else {
- if kb, err := ownKey.Bytes(); err != nil {
- log.Errorf("failed to convert key to bytes")
+ // public key is present. Safe to proceed.
+ if kb, err := crypto.MarshalPublicKey(ownKey); err != nil {
+ log.Error("failed to convert key to bytes")
} else {
mes.PublicKey = kb
}
}
// set protocol versions
- pv := LibP2PVersion
- av := ClientVersion
- mes.ProtocolVersion = &pv
- mes.AgentVersion = &av
+ mes.ProtocolVersion = &ids.ProtocolVersion
+ mes.AgentVersion = &ids.UserAgent
+
+ return mes
+}
+
+func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte {
+ if ids.disableSignedPeerRecord || snapshot.record == nil {
+ return nil
+ }
+
+ recBytes, err := snapshot.record.Marshal()
+ if err != nil {
+ log.Error("failed to marshal signed record", "err", err)
+ return nil
+ }
+
+ return recBytes
+}
+
+// diff takes two slices of strings (a and b) and computes which elements were added and removed in b
+func diff(a, b []protocol.ID) (added, removed []protocol.ID) {
+ // This is O(n^2), but it's fine because the slices are small.
+ for _, x := range b {
+ var found bool
+ for _, y := range a {
+ if x == y {
+ found = true
+ break
+ }
+ }
+ if !found {
+ added = append(added, x)
+ }
+ }
+ for _, x := range a {
+ var found bool
+ for _, y := range b {
+ if x == y {
+ found = true
+ break
+ }
+ }
+ if !found {
+ removed = append(removed, x)
+ }
+ }
+ return
}
-func (ids *IDService) consumeMessage(mes *pb.Identify, c inet.Conn) {
+func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn, isPush bool) {
p := c.RemotePeer()
- // mes.Protocols
- ids.Host.Peerstore().SetProtocols(p, mes.Protocols...)
+ supported, _ := ids.Host.Peerstore().GetProtocols(p)
+ mesProtocols := protocol.ConvertFromStrings(mes.Protocols)
+ added, removed := diff(supported, mesProtocols)
+ ids.Host.Peerstore().SetProtocols(p, mesProtocols...)
+ if isPush {
+ ids.emitters.evtPeerProtocolsUpdated.Emit(event.EvtPeerProtocolsUpdated{
+ Peer: p,
+ Added: added,
+ Removed: removed,
+ })
+ }
- // mes.ObservedAddr
- ids.consumeObservedAddress(mes.GetObservedAddr(), c)
+ obsAddr, err := ma.NewMultiaddrBytes(mes.GetObservedAddr())
+ if err != nil {
+ log.Debug("error parsing received observed addr", "connection", c, "err", err)
+ obsAddr = nil
+ }
// mes.ListenAddrs
laddrs := mes.GetListenAddrs()
@@ -210,71 +754,149 @@ func (ids *IDService) consumeMessage(mes *pb.Identify, c inet.Conn) {
for _, addr := range laddrs {
maddr, err := ma.NewMultiaddrBytes(addr)
if err != nil {
- log.Debugf("%s failed to parse multiaddr from %s %s", ID,
- p, c.RemoteMultiaddr())
+ log.Debug("failed to parse multiaddr", "id", ID, "peer", p, "remote_multiaddr", c.RemoteMultiaddr())
continue
}
lmaddrs = append(lmaddrs, maddr)
}
- // if the address reported by the connection roughly matches their annoucned
- // listener addresses, its likely to be an external NAT address
- if HasConsistentTransport(c.RemoteMultiaddr(), lmaddrs) {
- lmaddrs = append(lmaddrs, c.RemoteMultiaddr())
+ // NOTE: Do not add `c.RemoteMultiaddr()` to the peerstore if the remote
+ // peer doesn't tell us to do so. Otherwise, we'll advertise it.
+ //
+ // This can cause an "addr-splosion" issue where the network will slowly
+ // gossip and collect observed but unadvertised addresses. Given a NAT
+ // that picks random source ports, this can cause DHT nodes to collect
+ // many undialable addresses for other peers.
+
+ // add certified addresses for the peer, if they sent us a signed peer record
+ // otherwise use the unsigned addresses.
+ signedPeerRecord, err := signedPeerRecordFromMessage(mes)
+ if err != nil {
+ log.Debug("error getting peer record from Identify message", "err", err)
}
// Extend the TTLs on the known (probably) good addresses.
// Taking the lock ensures that we don't concurrently process a disconnect.
ids.addrMu.Lock()
+ ttl := peerstore.RecentlyConnectedAddrTTL
switch ids.Host.Network().Connectedness(p) {
- case inet.Connected:
- ids.Host.Peerstore().AddAddrs(p, lmaddrs, pstore.ConnectedAddrTTL)
- default:
- ids.Host.Peerstore().AddAddrs(p, lmaddrs, pstore.RecentlyConnectedAddrTTL)
+ case network.Limited, network.Connected:
+ ttl = peerstore.ConnectedAddrTTL
+ }
+
+ // Downgrade connected and recently connected addrs to a temporary TTL.
+ for _, ttl := range []time.Duration{
+ peerstore.RecentlyConnectedAddrTTL,
+ peerstore.ConnectedAddrTTL,
+ } {
+ ids.Host.Peerstore().UpdateAddrs(p, ttl, peerstore.TempAddrTTL)
+ }
+
+ var addrs []ma.Multiaddr
+ if signedPeerRecord != nil {
+ signedAddrs, err := ids.consumeSignedPeerRecord(c.RemotePeer(), signedPeerRecord)
+ if err != nil {
+ log.Debug("failed to consume signed peer record", "err", err)
+ signedPeerRecord = nil
+ } else {
+ addrs = signedAddrs
+ }
+ } else {
+ addrs = lmaddrs
+ }
+ addrs = filterAddrs(addrs, c.RemoteMultiaddr())
+ if len(addrs) > connectedPeerMaxAddrs {
+ addrs = addrs[:connectedPeerMaxAddrs]
}
+
+ ids.Host.Peerstore().AddAddrs(p, addrs, ttl)
+
+ // Finally, expire all temporary addrs.
+ ids.Host.Peerstore().UpdateAddrs(p, peerstore.TempAddrTTL, 0)
ids.addrMu.Unlock()
- log.Debugf("%s received listen addrs for %s: %s", c.LocalPeer(), c.RemotePeer(), lmaddrs)
+ log.Debug("received listen addresses",
+ "local_peer", c.LocalPeer(),
+ "remote_peer", c.RemotePeer(),
+ "addresses", addrs)
// get protocol versions
pv := mes.GetProtocolVersion()
av := mes.GetAgentVersion()
- // version check. if we shouldn't talk, bail.
- // TODO: at this point, we've already exchanged information.
- // move this into a first handshake before the connection can open streams.
- if !protocolVersionsAreCompatible(pv, LibP2PVersion) {
- logProtocolMismatchDisconnect(c, pv, av)
- c.Close()
- return
- }
-
ids.Host.Peerstore().Put(p, "ProtocolVersion", pv)
ids.Host.Peerstore().Put(p, "AgentVersion", av)
// get the key from the other side. we may not have it (no-auth transport)
ids.consumeReceivedPubKey(c, mes.PublicKey)
+
+ ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{
+ Peer: c.RemotePeer(),
+ Conn: c,
+ ListenAddrs: lmaddrs,
+ Protocols: mesProtocols,
+ SignedPeerRecord: signedPeerRecord,
+ ObservedAddr: obsAddr,
+ ProtocolVersion: pv,
+ AgentVersion: av,
+ })
}
-func (ids *IDService) consumeReceivedPubKey(c inet.Conn, kb []byte) {
+func (ids *idService) consumeSignedPeerRecord(p peer.ID, signedPeerRecord *record.Envelope) ([]ma.Multiaddr, error) {
+ if signedPeerRecord.PublicKey == nil {
+ return nil, errors.New("missing pubkey")
+ }
+ id, err := peer.IDFromPublicKey(signedPeerRecord.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to derive peer ID: %s", err)
+ }
+ if id != p {
+ return nil, fmt.Errorf("received signed peer record envelope for unexpected peer ID. expected %s, got %s", p, id)
+ }
+ r, err := signedPeerRecord.Record()
+ if err != nil {
+ return nil, fmt.Errorf("failed to obtain record: %w", err)
+ }
+ rec, ok := r.(*peer.PeerRecord)
+ if !ok {
+ return nil, errors.New("not a peer record")
+ }
+ if rec.PeerID != p {
+ return nil, fmt.Errorf("received signed peer record for unexpected peer ID. expected %s, got %s", p, rec.PeerID)
+ }
+ // Don't put the signed peer record into the peer store.
+ // They're not used anywhere.
+ // All we care about are the addresses.
+ return rec.Addrs, nil
+}
+
+func (ids *idService) consumeReceivedPubKey(c network.Conn, kb []byte) {
lp := c.LocalPeer()
rp := c.RemotePeer()
if kb == nil {
- log.Debugf("%s did not receive public key for remote peer: %s", lp, rp)
+ log.Debug("did not receive public key for remote peer",
+ "local_peer", lp,
+ "remote_peer", rp)
return
}
- newKey, err := ic.UnmarshalPublicKey(kb)
+ newKey, err := crypto.UnmarshalPublicKey(kb)
if err != nil {
- log.Errorf("%s cannot unmarshal key from remote peer: %s", lp, rp)
+ log.Warn("cannot unmarshal key from remote peer",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "err", err)
return
}
// verify key matches peer.ID
np, err := peer.IDFromPublicKey(newKey)
if err != nil {
- log.Debugf("%s cannot get peer.ID from key of remote peer: %s, %s", lp, rp, err)
+ log.Debug("cannot get peer.ID from key of remote peer",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "err", err)
return
}
@@ -285,12 +907,18 @@ func (ids *IDService) consumeReceivedPubKey(c inet.Conn, kb []byte) {
// if local peerid is empty, then use the new, sent key.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
- log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
+ log.Debug("could not add key for peer to peerstore",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "err", err)
}
} else {
// we have a local peer.ID and it does not match the sent key... error.
- log.Errorf("%s received key for remote peer %s mismatch: %s", lp, rp, np)
+ log.Error("received key for remote peer mismatch",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "peer_id", np)
}
return
}
@@ -300,7 +928,10 @@ func (ids *IDService) consumeReceivedPubKey(c inet.Conn, kb []byte) {
// no key? no auth transport. set this one.
err := ids.Host.Peerstore().AddPubKey(rp, newKey)
if err != nil {
- log.Debugf("%s could not add key for %s to peerstore: %s", lp, rp, err)
+ log.Debug("could not add key for peer to peerstore",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "err", err)
}
return
}
@@ -313,21 +944,31 @@ func (ids *IDService) consumeReceivedPubKey(c inet.Conn, kb []byte) {
// weird, got a different key... but the different key MATCHES the peer.ID.
// this odd. let's log error and investigate. this should basically never happen
// and it means we have something funky going on and possibly a bug.
- log.Errorf("%s identify got a different key for: %s", lp, rp)
+ log.Error("identify got a different key",
+ "local_peer", lp,
+ "remote_peer", rp)
// okay... does ours NOT match the remote peer.ID?
cp, err := peer.IDFromPublicKey(currKey)
if err != nil {
- log.Errorf("%s cannot get peer.ID from local key of remote peer: %s, %s", lp, rp, err)
+ log.Error("cannot get peer.ID from local key of remote peer",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "err", err)
return
}
if cp != rp {
- log.Errorf("%s local key for remote peer %s yields different peer.ID: %s", lp, rp, cp)
+ log.Error("local key for remote peer yields different peer.ID",
+ "local_peer", lp,
+ "remote_peer", rp,
+ "calculated_peer_id", cp)
return
}
// okay... curr key DOES NOT match new key. both match peer.ID. wat?
- log.Errorf("%s local key and received key for %s do not match, but match peer.ID", lp, rp)
+ log.Error("local key and received key do not match, but match peer.ID",
+ "local_peer", lp,
+ "remote_peer", rp)
}
// HasConsistentTransport returns true if the address 'a' shares a
@@ -359,128 +1000,146 @@ func HasConsistentTransport(a ma.Multiaddr, green []ma.Multiaddr) bool {
return false
}
-// IdentifyWait returns a channel which will be closed once
-// "ProtocolIdentify" (handshake3) finishes on given conn.
-// This happens async so the connection can start to be used
-// even if handshake3 knowledge is not necesary.
-// Users **MUST** call IdentifyWait _after_ IdentifyConn
-func (ids *IDService) IdentifyWait(c inet.Conn) <-chan struct{} {
- ids.currmu.Lock()
- ch, found := ids.currid[c]
- ids.currmu.Unlock()
- if found {
- return ch
+// addConnWithLock assuems caller holds the connsMu lock
+func (ids *idService) addConnWithLock(c network.Conn) {
+ _, found := ids.conns[c]
+ if !found {
+ <-ids.setupCompleted
+ ids.conns[c] = entry{}
}
-
- // if not found, it means we are already done identifying it, or
- // haven't even started. either way, return a new channel closed.
- ch = make(chan struct{})
- close(ch)
- return ch
}
-func (ids *IDService) consumeObservedAddress(observed []byte, c inet.Conn) {
- if observed == nil {
- return
- }
-
- maddr, err := ma.NewMultiaddrBytes(observed)
- if err != nil {
- log.Debugf("error parsing received observed addr for %s: %s", c, err)
- return
- }
-
- // we should only use ObservedAddr when our connection's LocalAddr is one
- // of our ListenAddrs. If we Dial out using an ephemeral addr, knowing that
- // address's external mapping is not very useful because the port will not be
- // the same as the listen addr.
- ifaceaddrs, err := ids.Host.Network().InterfaceListenAddresses()
- if err != nil {
- log.Infof("failed to get interface listen addrs", err)
- return
- }
-
- log.Debugf("identify identifying observed multiaddr: %s %s", c.LocalMultiaddr(), ifaceaddrs)
- if !addrInAddrs(c.LocalMultiaddr(), ifaceaddrs) {
- // not in our list
- return
+func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) {
+ if len(msg.SignedPeerRecord) == 0 {
+ return nil, nil
}
-
- // ok! we have the observed version of one of our ListenAddresses!
- log.Debugf("added own observed listen addr: %s --> %s", c.LocalMultiaddr(), maddr)
- ids.observedAddrs.Add(maddr, c.RemoteMultiaddr())
+ env, _, err := record.ConsumeEnvelope(msg.SignedPeerRecord, peer.PeerRecordEnvelopeDomain)
+ return env, err
}
-func addrInAddrs(a ma.Multiaddr, as []ma.Multiaddr) bool {
- for _, b := range as {
- if a.Equal(b) {
- return true
- }
- }
- return false
-}
+// netNotifiee defines methods to be used with the swarm
+type netNotifiee idService
-// protocolVersionsAreCompatible checks that the two implementations
-// can talk to each other. It will use semver, but for now while
-// we're in tight development, we will return false for minor version
-// changes too.
-func protocolVersionsAreCompatible(v1, v2 string) bool {
- if strings.HasPrefix(v1, "ipfs/") {
- v1 = v1[5:]
- }
- if strings.HasPrefix(v2, "ipfs/") {
- v2 = v2[5:]
- }
-
- v1s, err := semver.NewVersion(v1)
- if err != nil {
- return false
- }
-
- v2s, err := semver.NewVersion(v2)
- if err != nil {
- return false
- }
-
- return v1s.Major == v2s.Major && v1s.Minor == v2s.Minor
+func (nn *netNotifiee) IDService() *idService {
+ return (*idService)(nn)
}
-// netNotifiee defines methods to be used with the IpfsDHT
-type netNotifiee IDService
+func (nn *netNotifiee) Connected(_ network.Network, c network.Conn) {
+ ids := nn.IDService()
-func (nn *netNotifiee) IDService() *IDService {
- return (*IDService)(nn)
-}
+ ids.connsMu.Lock()
+ ids.addConnWithLock(c)
+ ids.connsMu.Unlock()
-func (nn *netNotifiee) Connected(n inet.Network, v inet.Conn) {
- // TODO: deprecate the setConnHandler hook, and kick off
- // identification here.
+ nn.IDService().IdentifyWait(c)
}
-func (nn *netNotifiee) Disconnected(n inet.Network, v inet.Conn) {
- // undo the setting of addresses to peer.ConnectedAddrTTL we did
+func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
ids := nn.IDService()
+
+ // Stop tracking the connection.
+ ids.connsMu.Lock()
+ delete(ids.conns, c)
+ ids.connsMu.Unlock()
+
+ // Last disconnect.
+ // Undo the setting of addresses to peer.ConnectedAddrTTL we did
ids.addrMu.Lock()
defer ids.addrMu.Unlock()
- if ids.Host.Network().Connectedness(v.RemotePeer()) != inet.Connected {
- // Last disconnect.
- ps := ids.Host.Peerstore()
- ps.UpdateAddrs(v.RemotePeer(), pstore.ConnectedAddrTTL, pstore.RecentlyConnectedAddrTTL)
+ // This check MUST happen after acquiring the Lock as identify on a different connection
+ // might be trying to add addresses.
+ switch ids.Host.Network().Connectedness(c.RemotePeer()) {
+ case network.Connected, network.Limited:
+ return
+ }
+ // peerstore returns the elements in a random order as it uses a map to store the addresses
+ addrs := ids.Host.Peerstore().Addrs(c.RemotePeer())
+ n := len(addrs)
+ if n > recentlyConnectedPeerMaxAddrs {
+ // We want to always save the address we are connected to
+ for i, a := range addrs {
+ if a.Equal(c.RemoteMultiaddr()) {
+ addrs[i], addrs[0] = addrs[0], addrs[i]
+ }
+ }
+ n = recentlyConnectedPeerMaxAddrs
+ }
+ ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.TempAddrTTL)
+ ids.Host.Peerstore().AddAddrs(c.RemotePeer(), addrs[:n], peerstore.RecentlyConnectedAddrTTL)
+ ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.TempAddrTTL, 0)
+}
+
+func (nn *netNotifiee) Listen(_ network.Network, _ ma.Multiaddr) {}
+func (nn *netNotifiee) ListenClose(_ network.Network, _ ma.Multiaddr) {}
+
+// filterAddrs filters the address slice based on the remote multiaddr:
+// - if it's a localhost address, no filtering is applied
+// - if it's a private network address, all localhost addresses are filtered out
+// - if it's a public address, all non-public addresses are filtered out
+// - if none of the above, (e.g. discard prefix), no filtering is applied.
+// We can't do anything meaningful here so we do nothing.
+func filterAddrs(addrs []ma.Multiaddr, remote ma.Multiaddr) []ma.Multiaddr {
+ switch {
+ case manet.IsIPLoopback(remote):
+ return addrs
+ case manet.IsPrivateAddr(remote):
+ return ma.FilterAddrs(addrs, func(a ma.Multiaddr) bool { return !manet.IsIPLoopback(a) })
+ case manet.IsPublicAddr(remote):
+ return ma.FilterAddrs(addrs, manet.IsPublicAddr)
+ default:
+ return addrs
}
}
-func (nn *netNotifiee) OpenedStream(n inet.Network, v inet.Stream) {}
-func (nn *netNotifiee) ClosedStream(n inet.Network, v inet.Stream) {}
-func (nn *netNotifiee) Listen(n inet.Network, a ma.Multiaddr) {}
-func (nn *netNotifiee) ListenClose(n inet.Network, a ma.Multiaddr) {}
+func trimHostAddrList(addrs []ma.Multiaddr, maxSize int) []ma.Multiaddr {
+ totalSize := 0
+ for _, a := range addrs {
+ totalSize += len(a.Bytes())
+ }
+ if totalSize <= maxSize {
+ return addrs
+ }
-func logProtocolMismatchDisconnect(c inet.Conn, protocol, agent string) {
- lm := make(lgbl.DeferredMap)
- lm["remotePeer"] = func() interface{} { return c.RemotePeer().Pretty() }
- lm["remoteAddr"] = func() interface{} { return c.RemoteMultiaddr().String() }
- lm["protocolVersion"] = protocol
- lm["agentVersion"] = agent
- log.Event(context.TODO(), "IdentifyProtocolMismatch", lm)
- log.Debugf("IdentifyProtocolMismatch %s %s %s (disconnected)", c.RemotePeer(), protocol, agent)
+ score := func(addr ma.Multiaddr) int {
+ var res int
+ if manet.IsPublicAddr(addr) {
+ res |= 1 << 12
+ } else if !manet.IsIPLoopback(addr) {
+ res |= 1 << 11
+ }
+ var protocolWeight int
+ ma.ForEach(addr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_QUIC_V1:
+ protocolWeight = 5
+ case ma.P_TCP:
+ protocolWeight = 4
+ case ma.P_WSS:
+ protocolWeight = 3
+ case ma.P_WEBTRANSPORT:
+ protocolWeight = 2
+ case ma.P_WEBRTC_DIRECT:
+ protocolWeight = 1
+ case ma.P_P2P:
+ return false
+ }
+ return true
+ })
+ res |= 1 << protocolWeight
+ return res
+ }
+
+ slices.SortStableFunc(addrs, func(a, b ma.Multiaddr) int {
+ return score(b) - score(a) // b-a for reverse order
+ })
+ totalSize = 0
+ for i, a := range addrs {
+ totalSize += len(a.Bytes())
+ if totalSize > maxSize {
+ addrs = addrs[:i]
+ break
+ }
+ }
+ return addrs
}
diff --git a/p2p/protocol/identify/id_glass_test.go b/p2p/protocol/identify/id_glass_test.go
new file mode 100644
index 0000000000..3eec26cb75
--- /dev/null
+++ b/p2p/protocol/identify/id_glass_test.go
@@ -0,0 +1,208 @@
+package identify
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ recordPb "github.com/libp2p/go-libp2p/core/record/pb"
+ blhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ ma "github.com/multiformats/go-multiaddr"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFastDisconnect(t *testing.T) {
+ // This test checks to see if we correctly abort sending an identify
+ // response if the peer disconnects before we handle the request.
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ target := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer target.Close()
+ ids, err := NewIDService(target)
+ require.NoError(t, err)
+ defer ids.Close()
+ ids.Start()
+
+ sync := make(chan struct{})
+ target.SetStreamHandler(ID, func(s network.Stream) {
+ // Wait till the stream is set up on both sides.
+ select {
+ case <-sync:
+ case <-ctx.Done():
+ return
+ }
+
+ // Kill the connection, and make sure we're completely disconnected.
+ assert.Eventually(t,
+ func() bool {
+ for _, conn := range target.Network().ConnsToPeer(s.Conn().RemotePeer()) {
+ conn.Close()
+ }
+ return target.Network().Connectedness(s.Conn().RemotePeer()) != network.Connected
+ },
+ 2*time.Second,
+ time.Millisecond,
+ )
+ // Now try to handle the response.
+ // This should not block indefinitely, or panic, or anything like that.
+ //
+ // However, if we have a bug, that _could_ happen.
+ ids.handleIdentifyRequest(s)
+
+ // Ok, allow the outer test to continue.
+ select {
+ case <-sync:
+ case <-ctx.Done():
+ return
+ }
+ })
+
+ source := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer source.Close()
+
+ // only connect to the first address, to make sure we only end up with one connection
+ require.NoError(t, source.Connect(ctx, peer.AddrInfo{ID: target.ID(), Addrs: target.Addrs()}))
+ s, err := source.NewStream(ctx, target.ID(), ID)
+ require.NoError(t, err)
+ select {
+ case sync <- struct{}{}:
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ }
+ s.Reset()
+ select {
+ case sync <- struct{}{}:
+ case <-ctx.Done():
+ t.Fatal(ctx.Err())
+ }
+ // double-check to make sure we didn't actually timeout somewhere.
+ require.NoError(t, ctx.Err())
+}
+
+func TestWrongSignedPeerRecord(t *testing.T) {
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h1.Close()
+ ids, err := NewIDService(h1)
+ require.NoError(t, err)
+ ids.Start()
+ defer ids.Close()
+
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ ids2, err := NewIDService(h2)
+ require.NoError(t, err)
+ ids2.Start()
+ defer ids2.Close()
+
+ h3 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ ids3, err := NewIDService(h3)
+ require.NoError(t, err)
+ ids3.Start()
+ defer ids3.Close()
+
+ h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()})
+ s, err := h2.NewStream(context.Background(), h1.ID(), IDPush)
+ require.NoError(t, err)
+
+ err = ids3.sendIdentifyResp(s, true)
+ // This should fail because the peer record is signed by h3, not h2
+ require.NoError(t, err)
+ time.Sleep(time.Second)
+
+ require.Empty(t, h1.Peerstore().Addrs(h3.ID()), "h1 should not know about h3 since it was relayed over h2")
+}
+
+func TestInvalidSignedPeerRecord(t *testing.T) {
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h1.Close()
+ ids, err := NewIDService(h1)
+ require.NoError(t, err)
+ ids.Start()
+ defer ids.Close()
+
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ ids2, err := NewIDService(h2)
+ require.NoError(t, err)
+ // We don't want to start the identify service, we'll manage the messages h2
+ // sends manually so we can tweak it
+ // ids2.Start()
+
+ h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()})
+ require.Empty(t, h1.Peerstore().Addrs(h2.ID()))
+
+ s, err := h2.NewStream(context.Background(), h1.ID(), IDPush)
+ require.NoError(t, err)
+
+ ids2.updateSnapshot()
+ ids2.currentSnapshot.Lock()
+ snapshot := ids2.currentSnapshot.snapshot
+ ids2.currentSnapshot.Unlock()
+ mes := ids2.createBaseIdentifyResponse(s.Conn(), &snapshot)
+ fmt.Println("Signed record is", snapshot.record)
+ marshalled, err := snapshot.record.Marshal()
+ require.NoError(t, err)
+
+ var envPb recordPb.Envelope
+ err = proto.Unmarshal(marshalled, &envPb)
+ require.NoError(t, err)
+
+ envPb.Signature = []byte("invalid")
+
+ mes.SignedPeerRecord, err = proto.Marshal(&envPb)
+ require.NoError(t, err)
+
+ err = ids2.writeChunkedIdentifyMsg(s, mes)
+ require.NoError(t, err)
+ fmt.Println("Done sending msg")
+ s.Close()
+
+ // Wait a bit for h1 to process the message
+ time.Sleep(1 * time.Second)
+
+ cab, ok := h1.Peerstore().(peerstore.CertifiedAddrBook)
+ require.True(t, ok)
+ require.Nil(t, cab.GetPeerRecord(h2.ID()))
+}
+
+func TestIncomingAddrFilter(t *testing.T) {
+ lhAddr := ma.StringCast("/ip4/127.0.0.1/udp/123/quic-v1")
+ privAddr := ma.StringCast("/ip4/192.168.1.101/tcp/123")
+ pubAddr := ma.StringCast("/ip6/2001::1/udp/123/quic-v1")
+ pubDNSAddr := ma.StringCast("/dns/example.com/udp/123/quic-v1")
+ privDNSAddr := ma.StringCast("/dns4/localhost/udp/123/quic-v1")
+ tests := []struct {
+ output []ma.Multiaddr
+ remote ma.Multiaddr
+ }{
+ {
+ output: []ma.Multiaddr{lhAddr, privAddr, pubAddr, pubDNSAddr, privDNSAddr},
+ remote: lhAddr,
+ },
+ {
+ output: []ma.Multiaddr{privAddr, pubAddr, pubDNSAddr, privDNSAddr},
+ remote: privAddr,
+ },
+ {
+ output: []ma.Multiaddr{pubAddr, pubDNSAddr},
+ remote: pubAddr,
+ },
+ }
+ for _, tc := range tests {
+ t.Run(fmt.Sprintf("remote:%s", tc.remote), func(t *testing.T) {
+ input := []ma.Multiaddr{lhAddr, privAddr, pubAddr, pubDNSAddr, privDNSAddr}
+ got := filterAddrs(input, tc.remote)
+ require.ElementsMatch(t, tc.output, got, "%s\n%s", tc.output, got)
+ })
+ }
+}
diff --git a/p2p/protocol/identify/id_test.go b/p2p/protocol/identify/id_test.go
index 7e82f9528c..62bb26f94a 100644
--- a/p2p/protocol/identify/id_test.go
+++ b/p2p/protocol/identify/id_test.go
@@ -2,183 +2,960 @@ package identify_test
import (
"context"
+ "errors"
+ "fmt"
+ "math/rand"
+ "slices"
+ "sync"
"testing"
"time"
- ic "github.com/libp2p/go-libp2p-crypto"
- testutil "github.com/libp2p/go-libp2p-netutil"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
- identify "github.com/libp2p/go-libp2p/p2p/protocol/identify"
-
- blhost "github.com/libp2p/go-libp2p-blankhost"
- host "github.com/libp2p/go-libp2p-host"
+ "github.com/libp2p/go-libp2p"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+ coretest "github.com/libp2p/go-libp2p/core/test"
+ blhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ useragent "github.com/libp2p/go-libp2p/p2p/protocol/identify/internal/user-agent"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
+
+ mockClock "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p-testing/race"
+ "github.com/libp2p/go-msgio/pbio"
ma "github.com/multiformats/go-multiaddr"
+ matest "github.com/multiformats/go-multiaddr/matest"
+ "github.com/stretchr/testify/require"
)
-func subtestIDService(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+func testKnowsAddrs(t *testing.T, h host.Host, p peer.ID, expected []ma.Multiaddr) {
+ t.Helper()
+ require.True(t, matest.AssertMultiaddrsMatch(t, expected, h.Peerstore().Addrs(p)), fmt.Sprintf("%s did not have addr for %s", h.ID(), p))
+}
+
+func testHasAgentVersion(t *testing.T, h host.Host, p peer.ID) {
+ v, err := h.Peerstore().Get(p, "AgentVersion")
+ require.NoError(t, err, "fetching agent version")
+ require.Equal(t, useragent.DefaultUserAgent(), v, "agent version")
+}
- h1 := blhost.NewBlankHost(testutil.GenSwarmNetwork(t, ctx))
- h2 := blhost.NewBlankHost(testutil.GenSwarmNetwork(t, ctx))
+func testHasPublicKey(t *testing.T, h host.Host, p peer.ID, shouldBe ic.PubKey) {
+ k := h.Peerstore().PubKey(p)
+ if k == nil {
+ t.Error("no public key")
+ return
+ }
+ if !k.Equals(shouldBe) {
+ t.Error("key mismatch")
+ return
+ }
+
+ p2, err := peer.IDFromPublicKey(k)
+ if err != nil {
+ t.Error("could not make key")
+ } else if p != p2 {
+ t.Error("key does not match peerid")
+ }
+}
+
+// we're using BlankHost in our tests, which doesn't automatically generate peer records
+// and emit address change events on the bus like BasicHost.
+// This generates a record, puts it in the peerstore and emits an addr change event
+// which will cause the identify service to push it to all peers it's connected to.
+func emitAddrChangeEvt(t *testing.T, h host.Host) {
+ t.Helper()
+
+ key := h.Peerstore().PrivKey(h.ID())
+ if key == nil {
+ t.Fatal("no private key for host")
+ }
+
+ rec := peer.NewPeerRecord()
+ rec.PeerID = h.ID()
+ rec.Addrs = h.Addrs()
+ signed, err := record.Seal(rec, key)
+ if err != nil {
+ t.Fatalf("error generating peer record: %s", err)
+ }
+
+ cab, ok := peerstore.GetCertifiedAddrBook(h.Peerstore())
+ require.True(t, ok)
+ _, err = cab.ConsumePeerRecord(signed, peerstore.PermanentAddrTTL)
+ require.NoError(t, err)
+
+ evt := event.EvtLocalAddressesUpdated{}
+ emitter, err := h.EventBus().Emitter(new(event.EvtLocalAddressesUpdated), eventbus.Stateful)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = emitter.Emit(evt)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestIDService gives the ID service 1s to finish after dialing
+// this is because it used to be concurrent. Now, Dial wait till the
+// id service is done.
+func TestIDService(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("This test modifies peerstore.RecentlyConnectedAddrTTL, which is racy.")
+ }
+ // This test is highly timing dependent, waiting on timeouts/expiration.
+ oldTTL := peerstore.RecentlyConnectedAddrTTL
+ oldTempTTL := peerstore.TempAddrTTL
+ peerstore.RecentlyConnectedAddrTTL = 500 * time.Millisecond
+ peerstore.TempAddrTTL = 50 * time.Millisecond
+ t.Cleanup(func() {
+ peerstore.RecentlyConnectedAddrTTL = oldTTL
+ peerstore.TempAddrTTL = oldTempTTL
+ })
+
+ clk := mockClock.NewMock()
+ swarm1 := swarmt.GenSwarm(t, swarmt.WithClock(clk))
+ swarm2 := swarmt.GenSwarm(t, swarmt.WithClock(clk))
+ h1 := blhost.NewBlankHost(swarm1)
+ h2 := blhost.NewBlankHost(swarm2)
h1p := h1.ID()
h2p := h2.ID()
- ids1 := identify.NewIDService(h1)
- ids2 := identify.NewIDService(h2)
+ ids1, err := identify.NewIDService(h1)
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2)
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ sub, err := ids1.Host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted))
+ if err != nil {
+ t.Fatal(err)
+ }
testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing
+ // the forgetMe addr represents an address for h1 that h2 has learned out of band
+ // (not via identify protocol). During the identify exchange, it will be
+ // forgotten and replaced by the addrs h1 sends.
forgetMe, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
- h2.Peerstore().AddAddr(h1p, forgetMe, pstore.RecentlyConnectedAddrTTL)
- time.Sleep(500 * time.Millisecond)
-
+ h2.Peerstore().AddAddr(h1p, forgetMe, peerstore.RecentlyConnectedAddrTTL)
h2pi := h2.Peerstore().PeerInfo(h2p)
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, h1.Connect(context.Background(), h2pi))
h1t2c := h1.Network().ConnsToPeer(h2p)
- if len(h1t2c) == 0 {
- t.Fatal("should have a conn here")
- }
+ require.NotEmpty(t, h1t2c, "should have a conn here")
ids1.IdentifyConn(h1t2c[0])
- // the IDService should be opened automatically, by the network.
+ // the idService should be opened automatically, by the network.
// what we should see now is that both peers know about each others listen addresses.
t.Log("test peer1 has peer2 addrs correctly")
- testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p)) // has them
- testHasProtocolVersions(t, h1, h2p)
+ testKnowsAddrs(t, h1, h2p, h2.Addrs()) // has them
+ testHasAgentVersion(t, h1, h2p)
testHasPublicKey(t, h1, h2p, h2.Peerstore().PubKey(h2p)) // h1 should have h2's public key
// now, this wait we do have to do. it's the wait for the Listening side
// to be done identifying the connection.
c := h2.Network().ConnsToPeer(h1.ID())
- if len(c) < 1 {
- t.Fatal("should have connection by now at least.")
- }
+ require.NotEmpty(t, c, "should have connection by now at least.")
ids2.IdentifyConn(c[0])
- addrs := h1.Peerstore().Addrs(h1p)
- addrs = append(addrs, c[0].RemoteMultiaddr(), forgetMe)
-
// and the protocol versions.
t.Log("test peer2 has peer1 addrs correctly")
- testKnowsAddrs(t, h2, h1p, addrs) // has them
- testHasProtocolVersions(t, h2, h1p)
+ testKnowsAddrs(t, h2, h1p, h1.Addrs()) // has them
+ testHasAgentVersion(t, h2, h1p)
testHasPublicKey(t, h2, h1p, h1.Peerstore().PubKey(h1p)) // h1 should have h2's public key
// Need both sides to actually notice that the connection has been closed.
+ sentDisconnect1 := waitForDisconnectNotification(swarm1)
+ sentDisconnect2 := waitForDisconnectNotification(swarm2)
h1.Network().ClosePeer(h2p)
h2.Network().ClosePeer(h1p)
if len(h2.Network().ConnsToPeer(h1.ID())) != 0 || len(h1.Network().ConnsToPeer(h2.ID())) != 0 {
t.Fatal("should have no connections")
}
- testKnowsAddrs(t, h2, h1p, addrs)
- testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p))
-
- time.Sleep(500 * time.Millisecond)
-
- // Forget the first one.
- testKnowsAddrs(t, h2, h1p, addrs[:len(addrs)-1])
+ t.Log("testing addrs just after disconnect")
+ // addresses don't immediately expire on disconnect, so we should still have them
+ testKnowsAddrs(t, h2, h1p, h1.Addrs())
+ testKnowsAddrs(t, h1, h2p, h2.Addrs())
- time.Sleep(500 * time.Millisecond)
+ <-sentDisconnect1
+ <-sentDisconnect2
- // Forget the rest.
+ // the addrs had their TTLs reduced on disconnect, and
+ // will be forgotten soon after
+ t.Log("testing addrs after TTL expiration")
+ clk.Add(time.Second)
testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{})
testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{})
+
+ // test that we received the "identify completed" event.
+ select {
+ case evtAny := <-sub.Out():
+ assertCorrectEvtPeerIdentificationCompleted(t, evtAny, h2)
+ case <-time.After(3 * time.Second):
+ t.Fatalf("expected EvtPeerIdentificationCompleted event within 10 seconds; none received")
+ }
}
-func testKnowsAddrs(t *testing.T, h host.Host, p peer.ID, expected []ma.Multiaddr) {
+func assertCorrectEvtPeerIdentificationCompleted(t *testing.T, evtAny interface{}, other host.Host) {
t.Helper()
+ evt := evtAny.(event.EvtPeerIdentificationCompleted)
+ require.NotNil(t, evt.Conn)
+ require.Equal(t, other.ID(), evt.Peer)
+
+ require.Equal(t, len(other.Addrs()), len(evt.ListenAddrs))
+ if len(other.Addrs()) == len(evt.ListenAddrs) {
+ otherAddrsStrings := make([]string, len(other.Addrs()))
+ evtAddrStrings := make([]string, len(evt.ListenAddrs))
+ for i, a := range other.Addrs() {
+ otherAddrsStrings[i] = a.String()
+ evtAddrStrings[i] = evt.ListenAddrs[i].String()
+ }
+ slices.Sort(otherAddrsStrings)
+ slices.Sort(evtAddrStrings)
+ require.Equal(t, otherAddrsStrings, evtAddrStrings)
+ }
+
+ otherProtos := other.Mux().Protocols()
+ slices.Sort(otherProtos)
+ evtProtos := evt.Protocols
+ slices.Sort(evtProtos)
+ require.Equal(t, otherProtos, evtProtos)
+ idFromSignedRecord, err := peer.IDFromPublicKey(evt.SignedPeerRecord.PublicKey)
+ require.NoError(t, err)
+ require.Equal(t, other.ID(), idFromSignedRecord)
+ require.Equal(t, peer.PeerRecordEnvelopePayloadType, evt.SignedPeerRecord.PayloadType)
+ var peerRecord peer.PeerRecord
+ evt.SignedPeerRecord.TypedRecord(&peerRecord)
+ require.Equal(t, other.ID(), peerRecord.PeerID)
+ matest.AssertMultiaddrsMatch(t, other.Addrs(), peerRecord.Addrs)
+}
+
+func TestProtoMatching(t *testing.T) {
+ tcp1, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
+ tcp2, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/2345")
+ tcp3, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/4567")
+ utp, _ := ma.NewMultiaddr("/ip4/1.2.3.4/udp/1234/utp")
+
+ if !identify.HasConsistentTransport(tcp1, []ma.Multiaddr{tcp2, tcp3, utp}) {
+ t.Fatal("expected match")
+ }
+
+ if identify.HasConsistentTransport(utp, []ma.Multiaddr{tcp2, tcp3}) {
+ t.Fatal("expected mismatch")
+ }
+}
- actual := h.Peerstore().Addrs(p)
+func TestLocalhostAddrFiltering(t *testing.T) {
+ t.Skip("need to fix this test")
+ mn := mocknet.New()
+ defer mn.Close()
+ id1 := coretest.RandPeerIDFatal(t)
+ ps1, err := pstoremem.NewPeerstore()
+ if err != nil {
+ t.Fatal(err)
+ }
+ p1addr1, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
+ p1addr2, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2345")
+ ps1.AddAddrs(id1, []ma.Multiaddr{p1addr1, p1addr2}, peerstore.PermanentAddrTTL)
+ p1, err := mn.AddPeerWithPeerstore(id1, ps1)
+ if err != nil {
+ t.Fatal(err)
+ }
- if len(actual) != len(expected) {
- t.Errorf("expected: %s", expected)
- t.Errorf("actual: %s", actual)
- t.Fatal("dont have the same addresses")
+ id2 := coretest.RandPeerIDFatal(t)
+ ps2, err := pstoremem.NewPeerstore()
+ if err != nil {
+ t.Fatal(err)
+ }
+ p2addr1, _ := ma.NewMultiaddr("/ip4/1.2.3.5/tcp/1234")
+ p2addr2, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/3456")
+ p2addrs := []ma.Multiaddr{p2addr1, p2addr2}
+ ps2.AddAddrs(id2, p2addrs, peerstore.PermanentAddrTTL)
+ p2, err := mn.AddPeerWithPeerstore(id2, ps2)
+ if err != nil {
+ t.Fatal(err)
}
- have := map[string]struct{}{}
- for _, addr := range actual {
- have[addr.String()] = struct{}{}
+ id3 := coretest.RandPeerIDFatal(t)
+ ps3, err := pstoremem.NewPeerstore()
+ if err != nil {
+ t.Fatal(err)
}
- for _, addr := range expected {
- if _, found := have[addr.String()]; !found {
- t.Errorf("%s did not have addr for %s: %s", h.ID(), p, addr)
- // panic("ahhhhhhh")
+ p3addr1, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/4567")
+ ps3.AddAddrs(id3, []ma.Multiaddr{p3addr1}, peerstore.PermanentAddrTTL)
+ p3, err := mn.AddPeerWithPeerstore(id3, ps3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = mn.LinkAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ p1.Connect(context.Background(), peer.AddrInfo{
+ ID: id2,
+ Addrs: p2addrs[0:1],
+ })
+ p3.Connect(context.Background(), peer.AddrInfo{
+ ID: id2,
+ Addrs: p2addrs[1:],
+ })
+
+ ids1, err := identify.NewIDService(p1)
+ require.NoError(t, err)
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(p2)
+ require.NoError(t, err)
+ ids2.Start()
+
+ ids3, err := identify.NewIDService(p3)
+ require.NoError(t, err)
+ ids3.Start()
+
+ defer func() {
+ ids1.Close()
+ ids2.Close()
+ ids3.Close()
+ }()
+
+ conns := p2.Network().ConnsToPeer(id1)
+ if len(conns) == 0 {
+ t.Fatal("no conns")
+ }
+ conn := conns[0]
+ ids2.IdentifyConn(conn)
+ addrs := p2.Peerstore().Addrs(id1)
+ if len(addrs) != 1 {
+ t.Fatalf("expected one addr, found %s", addrs)
+ }
+
+ conns = p3.Network().ConnsToPeer(id2)
+ if len(conns) == 0 {
+ t.Fatal("no conns")
+ }
+ conn = conns[0]
+ ids3.IdentifyConn(conn)
+ addrs = p3.Peerstore().Addrs(id2)
+ if len(addrs) != 2 {
+ t.Fatalf("expected 2 addrs for %s, found %d: %s", id2, len(addrs), addrs)
+ }
+}
+
+// TestIdentifyPushWhileIdentifyingConn tests that the host waits to push updates if an identify is ongoing.
+func TestIdentifyPushWhileIdentifyingConn(t *testing.T) {
+ t.Skip()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ defer h1.Close()
+ t.Log("h1:", h1.ID())
+ t.Log("h2:", h2.ID())
+
+ ids1, err := identify.NewIDService(h1)
+ require.NoError(t, err)
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2)
+ require.NoError(t, err)
+ ids2.Start()
+
+ defer ids1.Close()
+ defer ids2.Close()
+
+ // replace the original identify handler by one that blocks until we close the block channel.
+ // this allows us to control how long identify runs.
+ block := make(chan struct{})
+ handler := func(s network.Stream) {
+ <-block
+ w := pbio.NewDelimitedWriter(s)
+ w.WriteMsg(&pb.Identify{Protocols: protocol.ConvertToStrings(h1.Mux().Protocols())})
+ s.Close()
+ }
+ h1.RemoveStreamHandler(identify.ID)
+ h1.SetStreamHandler(identify.ID, handler)
+
+ // from h2 connect to h1.
+ if err := h2.Connect(ctx, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()}); err != nil {
+ t.Fatal(err)
+ }
+
+ // from h2, identify h1.
+ conn := h2.Network().ConnsToPeer(h1.ID())[0]
+ go ids2.IdentifyConn(conn)
+
+ <-time.After(500 * time.Millisecond)
+
+ // subscribe to events in h1; after identify h1 should receive the update from h2 and publish an event in the bus.
+ sub, err := h1.EventBus().Subscribe(&event.EvtPeerProtocolsUpdated{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer sub.Close()
+
+ // add a handler in h2; the update to h1 will queue until we're done identifying h1.
+ h2.SetStreamHandler(protocol.TestingID, func(_ network.Stream) {})
+ <-time.After(500 * time.Millisecond)
+
+ // make sure we haven't received any events yet.
+ if q := len(sub.Out()); q > 0 {
+ t.Fatalf("expected no events yet; queued: %d", q)
+ }
+
+ close(block)
+ select {
+ case evt := <-sub.Out():
+ e := evt.(event.EvtPeerProtocolsUpdated)
+ if e.Peer != h2.ID() || len(e.Added) != 1 || e.Added[0] != protocol.TestingID {
+ t.Fatalf("expected an event for protocol changes in h2, with the testing protocol added; instead got: %v", evt)
}
+ case <-time.After(2 * time.Second):
+ t.Fatalf("timed out while waiting for an event for the protocol changes in h2")
}
}
-func testHasProtocolVersions(t *testing.T, h host.Host, p peer.ID) {
- v, err := h.Peerstore().Get(p, "ProtocolVersion")
- if v == nil {
- t.Error("no protocol version")
- return
+func TestIdentifyPushOnAddrChange(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+
+ h1p := h1.ID()
+ h2p := h2.ID()
+
+ ids1, err := identify.NewIDService(h1)
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2)
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
+ testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing
+
+ require.NoError(t, h1.Connect(ctx, h2.Peerstore().PeerInfo(h2p)))
+ // h1 should immediately see a connection from h2
+ require.NotEmpty(t, h1.Network().ConnsToPeer(h2p))
+ // wait for h2 to Identify itself so we are sure h2 has seen the connection.
+ ids1.IdentifyConn(h1.Network().ConnsToPeer(h2p)[0])
+
+ // h2 should now see the connection and we should wait for h1 to Identify itself to h2.
+ require.NotEmpty(t, h2.Network().ConnsToPeer(h1p))
+ ids2.IdentifyConn(h2.Network().ConnsToPeer(h1p)[0])
+
+ testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p))
+ testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addrs(h1p))
+
+ // change addr on host 1 and ensure host2 gets a push
+ lad := ma.StringCast("/ip4/127.0.0.1/tcp/1234")
+ require.NoError(t, h1.Network().Listen(lad))
+ matest.AssertMultiaddrsContain(t, h1.Addrs(), lad)
+
+ h2AddrStream := h2.Peerstore().AddrStream(ctx, h1p)
+
+ emitAddrChangeEvt(t, h1)
+
+ // Wait for h2 to process the new addr
+ waitForAddrInStream(t, h2AddrStream, lad, 10*time.Second, "h2 did not receive addr change")
+
+ require.True(t, ma.Contains(h2.Peerstore().Addrs(h1p), lad))
+
+ // change addr on host2 and ensure host 1 gets a pus
+ lad = ma.StringCast("/ip4/127.0.0.1/tcp/1235")
+ require.NoError(t, h2.Network().Listen(lad))
+ matest.AssertMultiaddrsContain(t, h2.Addrs(), lad)
+ h1AddrStream := h1.Peerstore().AddrStream(ctx, h2p)
+ emitAddrChangeEvt(t, h2)
+
+ // Wait for h1 to process the new addr
+ waitForAddrInStream(t, h1AddrStream, lad, 10*time.Second, "h1 did not receive addr change")
+
+ require.True(t, ma.Contains(h1.Peerstore().Addrs(h2p), lad))
+
+ // change addr on host2 again
+ lad2 := ma.StringCast("/ip4/127.0.0.1/tcp/1236")
+ require.NoError(t, h2.Network().Listen(lad2))
+ matest.AssertMultiaddrsContain(t, h2.Addrs(), lad2)
+ emitAddrChangeEvt(t, h2)
+
+ // Wait for h1 to process the new addr
+ waitForAddrInStream(t, h1AddrStream, lad2, 10*time.Second, "h1 did not receive addr change")
+
+ require.True(t, ma.Contains(h1.Peerstore().Addrs(h2p), lad2))
+}
+
+func TestUserAgent(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1, err := libp2p.New(libp2p.UserAgent("foo"), libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer h1.Close()
+
+ h2, err := libp2p.New(libp2p.UserAgent("bar"), libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ if err != nil {
+ t.Fatal(err)
}
- if v.(string) != identify.LibP2PVersion {
- t.Error("protocol mismatch", err)
+ defer h2.Close()
+
+ err = h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
+ if err != nil {
+ t.Fatal(err)
}
- v, err = h.Peerstore().Get(p, "AgentVersion")
- if v.(string) != identify.ClientVersion {
- t.Error("agent version mismatch", err)
+ av, err := h1.Peerstore().Get(h2.ID(), "AgentVersion")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ver, ok := av.(string); !ok || ver != "bar" {
+ t.Errorf("expected agent version %q, got %q", "bar", av)
}
}
-func testHasPublicKey(t *testing.T, h host.Host, p peer.ID, shouldBe ic.PubKey) {
- k := h.Peerstore().PubKey(p)
- if k == nil {
- t.Error("no public key")
- return
+func TestNotListening(t *testing.T) {
+ // Make sure we don't panic if we're not listening on any addresses.
+ //
+ // https://github.com/libp2p/go-libp2p/issues/939
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1, err := libp2p.New(libp2p.NoListenAddrs)
+ if err != nil {
+ t.Fatal(err)
}
- if !k.Equals(shouldBe) {
- t.Error("key mismatch")
- return
+ defer h1.Close()
+
+ h2, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ if err != nil {
+ t.Fatal(err)
}
+ defer h2.Close()
- p2, err := peer.IDFromPublicKey(k)
+ err = h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
if err != nil {
- t.Error("could not make key")
- } else if p != p2 {
- t.Error("key does not match peerid")
+ t.Fatal(err)
}
}
-// TestIDServiceWait gives the ID service 1s to finish after dialing
-// this is becasue it used to be concurrent. Now, Dial wait till the
-// id service is done.
-func TestIDService(t *testing.T) {
- oldTTL := pstore.RecentlyConnectedAddrTTL
- pstore.RecentlyConnectedAddrTTL = time.Second
- defer func() {
- pstore.RecentlyConnectedAddrTTL = oldTTL
- }()
+func TestSendPush(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h2.Close()
+ defer h1.Close()
+
+ ids1, err := identify.NewIDService(h1)
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2)
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ err = h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
+ require.NoError(t, err)
+
+ // wait for them to Identify each other
+ ids1.IdentifyConn(h1.Network().ConnsToPeer(h2.ID())[0])
+ ids2.IdentifyConn(h2.Network().ConnsToPeer(h1.ID())[0])
+
+ // h1 starts listening on a new protocol and h2 finds out about that through a push
+ h1.SetStreamHandler("rand", func(network.Stream) {})
+ require.Eventually(t, func() bool {
+ sup, err := h2.Peerstore().SupportsProtocols(h1.ID(), []protocol.ID{"rand"}...)
+ return err == nil && len(sup) == 1 && sup[0] == "rand"
+ }, time.Second, 10*time.Millisecond)
+
+ // h1 stops listening on a protocol and h2 finds out about it via a push
+ h1.RemoveStreamHandler("rand")
+ require.Eventually(t, func() bool {
+ sup, err := h2.Peerstore().SupportsProtocols(h1.ID(), []protocol.ID{"rand"}...)
+ return err == nil && len(sup) == 0
+ }, time.Second, 10*time.Millisecond)
+}
+
+func TestLargeIdentifyMessage(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("setting peerstore.RecentlyConnectedAddrTTL is racy")
+ }
+ oldTTL := peerstore.RecentlyConnectedAddrTTL
+ oldTempTTL := peerstore.TempAddrTTL
+ peerstore.RecentlyConnectedAddrTTL = 500 * time.Millisecond
+ peerstore.TempAddrTTL = 50 * time.Millisecond
+ t.Cleanup(func() {
+ peerstore.RecentlyConnectedAddrTTL = oldTTL
+ peerstore.TempAddrTTL = oldTempTTL
+ })
+
+ clk := mockClock.NewMock()
+ swarm1 := swarmt.GenSwarm(t, swarmt.WithClock(clk))
+ swarm2 := swarmt.GenSwarm(t, swarmt.WithClock(clk))
+ h1 := blhost.NewBlankHost(swarm1)
+ h2 := blhost.NewBlankHost(swarm2)
+
+ // add protocol strings to make the message larger
+ // about 2K of protocol strings
+ for i := 0; i < 500; i++ {
+ r := protocol.ID(fmt.Sprintf("rand%d", i))
+ h1.SetStreamHandler(r, func(network.Stream) {})
+ h2.SetStreamHandler(r, func(network.Stream) {})
+ }
+
+ h1p := h1.ID()
+ h2p := h2.ID()
+
+ ids1, err := identify.NewIDService(h1)
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2)
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ sub, err := ids1.Host.EventBus().Subscribe(new(event.EvtPeerIdentificationCompleted))
+ require.NoError(t, err)
+
+ testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
+ testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing
+
+ // the forgetMe addr represents an address for h1 that h2 has learned out of band
+ // (not via identify protocol). During the identify exchange, it will be
+ // forgotten and replaced by the addrs h1 sends.
+ forgetMe, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
+ h2.Peerstore().AddAddr(h1p, forgetMe, peerstore.RecentlyConnectedAddrTTL)
+
+ h2pi := h2.Peerstore().PeerInfo(h2p)
+ h2pi.Addrs = h2pi.Addrs[:1]
+ require.NoError(t, h1.Connect(context.Background(), h2pi))
+
+ h1t2c := h1.Network().ConnsToPeer(h2p)
+ require.Len(t, h1t2c, 1, "should have a conn here")
+
+ ids1.IdentifyConn(h1t2c[0])
+
+ // the idService should be opened automatically, by the network.
+ // what we should see now is that both peers know about each others listen addresses.
+ t.Log("test peer1 has peer2 addrs correctly")
+ testKnowsAddrs(t, h1, h2p, h2.Addrs()) // has them
+ testHasAgentVersion(t, h1, h2p)
+ testHasPublicKey(t, h1, h2p, h2.Peerstore().PubKey(h2p)) // h1 should have h2's public key
+
+ // now, this wait we do have to do. it's the wait for the Listening side
+ // to be done identifying the connection.
+ c := h2.Network().ConnsToPeer(h1.ID())
+ if len(c) != 1 {
+ t.Fatal("should have connection by now at least.")
+ }
+ ids2.IdentifyConn(c[0])
+
+ // and the protocol versions.
+ t.Log("test peer2 has peer1 addrs correctly")
+ testKnowsAddrs(t, h2, h1p, h1.Addrs()) // has them
+ testHasAgentVersion(t, h2, h1p)
+ testHasPublicKey(t, h2, h1p, h1.Peerstore().PubKey(h1p)) // h1 should have h2's public key
+
+ // Need both sides to actually notice that the connection has been closed.
+ sentDisconnect1 := waitForDisconnectNotification(swarm1)
+ sentDisconnect2 := waitForDisconnectNotification(swarm2)
+ h1.Network().ClosePeer(h2p)
+ h2.Network().ClosePeer(h1p)
+ if len(h2.Network().ConnsToPeer(h1.ID())) != 0 || len(h1.Network().ConnsToPeer(h2.ID())) != 0 {
+ t.Fatal("should have no connections")
+ }
+
+ t.Log("testing addrs just after disconnect")
+ // addresses don't immediately expire on disconnect, so we should still have them
+ testKnowsAddrs(t, h2, h1p, h1.Addrs())
+ testKnowsAddrs(t, h1, h2p, h2.Addrs())
+
+ <-sentDisconnect1
+ <-sentDisconnect2
+
+ // the addrs had their TTLs reduced on disconnect, and
+ // will be forgotten soon after
+ t.Log("testing addrs after TTL expiration")
+ clk.Add(time.Second)
+ testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{})
+ testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{})
- N := 3
- for i := 0; i < N; i++ {
- subtestIDService(t)
+ // test that we received the "identify completed" event.
+ select {
+ case evtAny := <-sub.Out():
+ assertCorrectEvtPeerIdentificationCompleted(t, evtAny, h2)
+ case <-time.After(3 * time.Second):
+ t.Fatalf("expected EvtPeerIdentificationCompleted event within 3 seconds; none received")
}
}
-func TestProtoMatching(t *testing.T) {
- tcp1, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
- tcp2, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/2345")
- tcp3, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/4567")
- utp, _ := ma.NewMultiaddr("/ip4/1.2.3.4/udp/1234/utp")
+func randString(n int) string {
+ chars := "abcdefghijklmnopqrstuvwxyz"
+ buf := make([]byte, n)
+ for i := 0; i < n; i++ {
+ buf[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(buf)
+}
- if !identify.HasConsistentTransport(tcp1, []ma.Multiaddr{tcp2, tcp3, utp}) {
- t.Fatal("expected match")
+func TestLargePushMessage(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+
+ // add protocol strings to make the message larger
+ // about 3K of protocol strings
+ for i := 0; i < 100; i++ {
+ r := protocol.ID(fmt.Sprintf("%s-%d", randString(30), i))
+ h1.SetStreamHandler(r, func(network.Stream) {})
+ h2.SetStreamHandler(r, func(network.Stream) {})
}
- if identify.HasConsistentTransport(utp, []ma.Multiaddr{tcp2, tcp3}) {
- t.Fatal("expected mismatch")
+ h1p := h1.ID()
+ h2p := h2.ID()
+
+ ids1, err := identify.NewIDService(h1)
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2)
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ testKnowsAddrs(t, h1, h2p, []ma.Multiaddr{}) // nothing
+ testKnowsAddrs(t, h2, h1p, []ma.Multiaddr{}) // nothing
+
+ h2pi := h2.Peerstore().PeerInfo(h2p)
+ require.NoError(t, h1.Connect(ctx, h2pi))
+ // h1 should immediately see a connection from h2
+ require.NotEmpty(t, h1.Network().ConnsToPeer(h2p))
+ // wait for h2 to Identify itself so we are sure h2 has seen the connection.
+ ids1.IdentifyConn(h1.Network().ConnsToPeer(h2p)[0])
+
+ // h2 should now see the connection and we should wait for h1 to Identify itself to h2.
+ require.NotEmpty(t, h2.Network().ConnsToPeer(h1p))
+ ids2.IdentifyConn(h2.Network().ConnsToPeer(h1p)[0])
+
+ testKnowsAddrs(t, h1, h2p, h2.Peerstore().Addrs(h2p))
+ testKnowsAddrs(t, h2, h1p, h1.Peerstore().Addrs(h1p))
+
+ // change addr on host 1 and ensure host2 gets a push
+ lad := ma.StringCast("/ip4/127.0.0.1/tcp/1234")
+ require.NoError(t, h1.Network().Listen(lad))
+ matest.AssertMultiaddrsContain(t, h1.Addrs(), lad)
+ emitAddrChangeEvt(t, h1)
+
+ require.Eventually(t, func() bool {
+ return ma.Contains(h2.Peerstore().Addrs(h1p), lad)
+ }, time.Second, 10*time.Millisecond)
+
+ // change addr on host2 and ensure host 1 gets a pus
+ lad = ma.StringCast("/ip4/127.0.0.1/tcp/1235")
+ require.NoError(t, h2.Network().Listen(lad))
+ matest.AssertMultiaddrsContain(t, h2.Addrs(), lad)
+ emitAddrChangeEvt(t, h2)
+
+ require.Eventually(t, func() bool {
+ return ma.Contains(h1.Peerstore().Addrs(h2p), lad)
+ }, time.Second, 10*time.Millisecond)
+
+ // change addr on host2 again
+ lad2 := ma.StringCast("/ip4/127.0.0.1/tcp/1236")
+ require.NoError(t, h2.Network().Listen(lad2))
+ matest.AssertMultiaddrsContain(t, h2.Addrs(), lad2)
+ emitAddrChangeEvt(t, h2)
+
+ require.Eventually(t, func() bool {
+ return ma.Contains(h1.Peerstore().Addrs(h2p), lad2)
+ }, time.Second, 10*time.Millisecond)
+}
+
+func TestIdentifyResponseReadTimeout(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h1.Close()
+ defer h2.Close()
+
+ h2p := h2.ID()
+ ids1, err := identify.NewIDService(h1, identify.WithTimeout(100*time.Millisecond))
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2, identify.WithTimeout(100*time.Millisecond))
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ // remote stream handler will just hang and not send back an identify response
+ h2.SetStreamHandler(identify.ID, func(_ network.Stream) {
+ time.Sleep(100 * time.Second)
+ })
+
+ sub, err := ids1.Host.EventBus().Subscribe(new(event.EvtPeerIdentificationFailed))
+ require.NoError(t, err)
+
+ h2pi := h2.Peerstore().PeerInfo(h2p)
+ require.NoError(t, h1.Connect(ctx, h2pi))
+
+ select {
+ case ev := <-sub.Out():
+ fev := ev.(event.EvtPeerIdentificationFailed)
+ require.Contains(t, fev.Reason.Error(), "deadline")
+ case <-time.After(5 * time.Second):
+ t.Fatal("did not receive identify failure event")
+ }
+}
+
+func TestIncomingIDStreamsTimeout(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ protocols := []protocol.ID{identify.IDPush}
+
+ for _, p := range protocols {
+ h1 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ h2 := blhost.NewBlankHost(swarmt.GenSwarm(t))
+ defer h1.Close()
+ defer h2.Close()
+
+ ids1, err := identify.NewIDService(h1, identify.WithTimeout(100*time.Millisecond))
+ require.NoError(t, err)
+ defer ids1.Close()
+ ids1.Start()
+
+ ids2, err := identify.NewIDService(h2, identify.WithTimeout(100*time.Millisecond))
+ require.NoError(t, err)
+ defer ids2.Close()
+ ids2.Start()
+
+ h2p := h2.ID()
+ h2pi := h2.Peerstore().PeerInfo(h2p)
+ require.NoError(t, h1.Connect(ctx, h2pi))
+
+ _, err = h1.NewStream(ctx, h2p, p)
+ require.NoError(t, err)
+
+ // remote peer should eventually reset stream
+ require.Eventually(t, func() bool {
+ for _, c := range h2.Network().ConnsToPeer(h1.ID()) {
+ if len(c.GetStreams()) > 0 {
+ return false
+ }
+ }
+ return true
+ }, 5*time.Second, 200*time.Millisecond)
+ }
+}
+
+func TestOutOfOrderConnectedNotifs(t *testing.T) {
+ h1, err := libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ defer h1.Close()
+ h2, err := libp2p.New(libp2p.ListenAddrs(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")))
+ require.NoError(t, err)
+ defer h2.Close()
+
+ doneCh := make(chan struct{})
+ errCh := make(chan error)
+
+ // This callback may be called before identify's Connnected callback completes. If it does, the IdentifyWait should still finish successfully.
+ h1.Network().Notify(&network.NotifyBundle{
+ ConnectedF: func(_ network.Network, c network.Conn) {
+ idChan := h1.(interface{ IDService() identify.IDService }).IDService().IdentifyWait(c)
+ go func() {
+ <-idChan
+ protos, err := h1.Peerstore().GetProtocols(h2.ID())
+ if err != nil {
+ errCh <- err
+ }
+ if len(protos) == 0 {
+ errCh <- errors.New("no protocols found. Identify did not complete")
+ }
+
+ close(doneCh)
+ }()
+ },
+ })
+
+ h1.Connect(context.Background(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
+
+ select {
+ case <-doneCh:
+ case err := <-errCh:
+ t.Fatalf("err: %v", err)
+ case <-time.After(3 * time.Second):
+ t.Fatalf("identify wait never completed")
}
}
+
+func waitForAddrInStream(t *testing.T, s <-chan ma.Multiaddr, expected ma.Multiaddr, timeout time.Duration, failMsg string) {
+ t.Helper()
+ for {
+ select {
+ case addr := <-s:
+ if addr.Equal(expected) {
+ return
+ }
+ continue
+ case <-time.After(timeout):
+ t.Fatal(failMsg)
+ }
+ }
+}
+
+func waitForDisconnectNotification(swarm *swarm.Swarm) <-chan struct{} {
+ done := make(chan struct{})
+ var once sync.Once
+ var nb *network.NotifyBundle
+ nb = &network.NotifyBundle{
+ DisconnectedF: func(_ network.Network, _ network.Conn) {
+ once.Do(func() {
+ go func() {
+ swarm.StopNotify(nb)
+ close(done)
+ }()
+ })
+ },
+ }
+ swarm.Notify(nb)
+
+ return done
+}
diff --git a/p2p/protocol/identify/internal/user-agent/user_agent.go b/p2p/protocol/identify/internal/user-agent/user_agent.go
new file mode 100644
index 0000000000..a639d4c5f5
--- /dev/null
+++ b/p2p/protocol/identify/internal/user-agent/user_agent.go
@@ -0,0 +1,49 @@
+package useragent
+
+import (
+ "fmt"
+ "runtime/debug"
+)
+
+func DefaultUserAgent() string {
+ return defaultUserAgent
+}
+
+var defaultUserAgent = "github.com/libp2p/go-libp2p"
+
+func init() {
+ bi, ok := debug.ReadBuildInfo()
+ if !ok {
+ return
+ }
+ version := bi.Main.Version
+ // version will only be non-empty if built as a dependency of another module
+ if version == "" {
+ return
+ }
+
+ if version != "(devel)" {
+ defaultUserAgent = fmt.Sprintf("%s@%s", bi.Main.Path, bi.Main.Version)
+ return
+ }
+
+ var revision string
+ var dirty bool
+ for _, bs := range bi.Settings {
+ switch bs.Key {
+ case "vcs.revision":
+ revision = bs.Value
+ if len(revision) > 9 {
+ revision = revision[:9]
+ }
+ case "vcs.modified":
+ if bs.Value == "true" {
+ dirty = true
+ }
+ }
+ }
+ defaultUserAgent = fmt.Sprintf("%s@%s", bi.Main.Path, revision)
+ if dirty {
+ defaultUserAgent += "-dirty"
+ }
+}
diff --git a/p2p/protocol/identify/metrics.go b/p2p/protocol/identify/metrics.go
new file mode 100644
index 0000000000..28598fa33b
--- /dev/null
+++ b/p2p/protocol/identify/metrics.go
@@ -0,0 +1,206 @@
+package identify
+
+import (
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/metricshelper"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const metricNamespace = "libp2p_identify"
+
+var (
+ pushesTriggered = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "identify_pushes_triggered_total",
+ Help: "Pushes Triggered",
+ },
+ []string{"trigger"},
+ )
+ identify = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "identify_total",
+ Help: "Identify",
+ },
+ []string{"dir"},
+ )
+ identifyPush = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "identify_push_total",
+ Help: "Identify Push",
+ },
+ []string{"dir"},
+ )
+ connPushSupportTotal = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: metricNamespace,
+ Name: "conn_push_support_total",
+ Help: "Identify Connection Push Support",
+ },
+ []string{"support"},
+ )
+ protocolsCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "protocols_count",
+ Help: "Protocols Count",
+ },
+ )
+ addrsCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: metricNamespace,
+ Name: "addrs_count",
+ Help: "Address Count",
+ },
+ )
+ numProtocolsReceived = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "protocols_received",
+ Help: "Number of Protocols received",
+ Buckets: buckets,
+ },
+ )
+ numAddrsReceived = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: metricNamespace,
+ Name: "addrs_received",
+ Help: "Number of addrs received",
+ Buckets: buckets,
+ },
+ )
+ collectors = []prometheus.Collector{
+ pushesTriggered,
+ identify,
+ identifyPush,
+ connPushSupportTotal,
+ protocolsCount,
+ addrsCount,
+ numProtocolsReceived,
+ numAddrsReceived,
+ }
+ // 1 to 20 and then up to 100 in steps of 5
+ buckets = append(
+ prometheus.LinearBuckets(1, 1, 20),
+ prometheus.LinearBuckets(25, 5, 16)...,
+ )
+)
+
+type MetricsTracer interface {
+ // TriggeredPushes counts IdentifyPushes triggered by event
+ TriggeredPushes(event any)
+
+ // ConnPushSupport counts peers by Push Support
+ ConnPushSupport(identifyPushSupport)
+
+ // IdentifyReceived tracks metrics on receiving an identify response
+ IdentifyReceived(isPush bool, numProtocols int, numAddrs int)
+
+ // IdentifySent tracks metrics on sending an identify response
+ IdentifySent(isPush bool, numProtocols int, numAddrs int)
+}
+
+type metricsTracer struct{}
+
+var _ MetricsTracer = &metricsTracer{}
+
+type metricsTracerSetting struct {
+ reg prometheus.Registerer
+}
+
+type MetricsTracerOption func(*metricsTracerSetting)
+
+func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption {
+ return func(s *metricsTracerSetting) {
+ if reg != nil {
+ s.reg = reg
+ }
+ }
+}
+
+func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer {
+ setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer}
+ for _, opt := range opts {
+ opt(setting)
+ }
+ metricshelper.RegisterCollectors(setting.reg, collectors...)
+ return &metricsTracer{}
+}
+
+func (t *metricsTracer) TriggeredPushes(ev any) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ typ := "unknown"
+ switch ev.(type) {
+ case event.EvtLocalProtocolsUpdated:
+ typ = "protocols_updated"
+ case event.EvtLocalAddressesUpdated:
+ typ = "addresses_updated"
+ }
+ *tags = append(*tags, typ)
+ pushesTriggered.WithLabelValues(*tags...).Inc()
+}
+
+func (t *metricsTracer) IncrementPushSupport(s identifyPushSupport) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, getPushSupport(s))
+ connPushSupportTotal.WithLabelValues(*tags...).Inc()
+}
+
+func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ if isPush {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound))
+ identifyPush.WithLabelValues(*tags...).Inc()
+ } else {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirInbound))
+ identify.WithLabelValues(*tags...).Inc()
+ }
+
+ protocolsCount.Set(float64(numProtocols))
+ addrsCount.Set(float64(numAddrs))
+}
+
+func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs int) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ if isPush {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirInbound))
+ identifyPush.WithLabelValues(*tags...).Inc()
+ } else {
+ *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound))
+ identify.WithLabelValues(*tags...).Inc()
+ }
+
+ numProtocolsReceived.Observe(float64(numProtocols))
+ numAddrsReceived.Observe(float64(numAddrs))
+}
+
+func (t *metricsTracer) ConnPushSupport(support identifyPushSupport) {
+ tags := metricshelper.GetStringSlice()
+ defer metricshelper.PutStringSlice(tags)
+
+ *tags = append(*tags, getPushSupport(support))
+ connPushSupportTotal.WithLabelValues(*tags...).Inc()
+}
+
+func getPushSupport(s identifyPushSupport) string {
+ switch s {
+ case identifyPushSupported:
+ return "supported"
+ case identifyPushUnsupported:
+ return "not supported"
+ default:
+ return "unknown"
+ }
+}
diff --git a/p2p/protocol/identify/metrics_test.go b/p2p/protocol/identify/metrics_test.go
new file mode 100644
index 0000000000..2cf5a209a1
--- /dev/null
+++ b/p2p/protocol/identify/metrics_test.go
@@ -0,0 +1,38 @@
+//go:build nocover
+
+package identify
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/event"
+)
+
+func TestMetricsNoAllocNoCover(t *testing.T) {
+ events := []any{
+ event.EvtLocalAddressesUpdated{},
+ event.EvtLocalProtocolsUpdated{},
+ event.EvtNATDeviceTypeChanged{},
+ }
+
+ pushSupport := []identifyPushSupport{
+ identifyPushSupportUnknown,
+ identifyPushSupported,
+ identifyPushUnsupported,
+ }
+
+ tr := NewMetricsTracer()
+ tests := map[string]func(){
+ "TriggeredPushes": func() { tr.TriggeredPushes(events[rand.Intn(len(events))]) },
+ "ConnPushSupport": func() { tr.ConnPushSupport(pushSupport[rand.Intn(len(pushSupport))]) },
+ "IdentifyReceived": func() { tr.IdentifyReceived(rand.Intn(2) == 0, rand.Intn(20), rand.Intn(20)) },
+ "IdentifySent": func() { tr.IdentifySent(rand.Intn(2) == 0, rand.Intn(20), rand.Intn(20)) },
+ }
+ for method, f := range tests {
+ allocs := testing.AllocsPerRun(1000, f)
+ if allocs > 0 {
+ t.Fatalf("Alloc Test: %s, got: %0.2f, expected: 0 allocs", method, allocs)
+ }
+ }
+}
diff --git a/p2p/protocol/identify/obsaddr.go b/p2p/protocol/identify/obsaddr.go
deleted file mode 100644
index 0e72ab3433..0000000000
--- a/p2p/protocol/identify/obsaddr.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package identify
-
-import (
- "sync"
- "time"
-
- pstore "github.com/libp2p/go-libp2p-peerstore"
- ma "github.com/multiformats/go-multiaddr"
-)
-
-const ActivationThresh = 4
-
-// ObservedAddr is an entry for an address reported by our peers.
-// We only use addresses that:
-// - have been observed at least 4 times in last 1h. (counter symmetric nats)
-// - have been observed at least once recently (1h), because our position in the
-// network, or network port mapppings, may have changed.
-type ObservedAddr struct {
- Addr ma.Multiaddr
- SeenBy map[string]time.Time
- LastSeen time.Time
- Activated bool
-}
-
-func (oa *ObservedAddr) TryActivate(ttl time.Duration) bool {
- // cleanup SeenBy set
- now := time.Now()
- for k, t := range oa.SeenBy {
- if now.Sub(t) > ttl*ActivationThresh {
- delete(oa.SeenBy, k)
- }
- }
-
- // We only activate if in the TTL other peers observed the same address
- // of ours at least 4 times.
- return len(oa.SeenBy) >= ActivationThresh
-}
-
-// ObservedAddrSet keeps track of a set of ObservedAddrs
-// the zero-value is ready to be used.
-type ObservedAddrSet struct {
- sync.Mutex // guards whole datastruct.
-
- addrs map[string]*ObservedAddr
- ttl time.Duration
-}
-
-func (oas *ObservedAddrSet) Addrs() []ma.Multiaddr {
- oas.Lock()
- defer oas.Unlock()
-
- // for zero-value.
- if oas.addrs == nil {
- return nil
- }
-
- now := time.Now()
- addrs := make([]ma.Multiaddr, 0, len(oas.addrs))
- for s, a := range oas.addrs {
- // remove timed out addresses.
- if now.Sub(a.LastSeen) > oas.ttl {
- delete(oas.addrs, s)
- continue
- }
-
- if a.Activated || a.TryActivate(oas.ttl) {
- addrs = append(addrs, a.Addr)
- }
- }
- return addrs
-}
-
-func (oas *ObservedAddrSet) Add(addr ma.Multiaddr, observer ma.Multiaddr) {
- oas.Lock()
- defer oas.Unlock()
-
- // for zero-value.
- if oas.addrs == nil {
- oas.addrs = make(map[string]*ObservedAddr)
- oas.ttl = pstore.OwnObservedAddrTTL
- }
-
- s := addr.String()
- oa, found := oas.addrs[s]
-
- // first time seeing address.
- if !found {
- oa = &ObservedAddr{
- Addr: addr,
- SeenBy: make(map[string]time.Time),
- }
- oas.addrs[s] = oa
- }
-
- // mark the observer
- oa.SeenBy[observerGroup(observer)] = time.Now()
- oa.LastSeen = time.Now()
-}
-
-// observerGroup is a function that determines what part of
-// a multiaddr counts as a different observer. for example,
-// two ipfs nodes at the same IP/TCP transport would get
-// the exact same NAT mapping; they would count as the
-// same observer. This may protect against NATs who assign
-// different ports to addresses at different IP hosts, but
-// not TCP ports.
-//
-// Here, we use the root multiaddr address. This is mostly
-// IP addresses. In practice, this is what we want.
-func observerGroup(m ma.Multiaddr) string {
- //TODO: If IPv6 rolls out we should mark /64 routing zones as one group
- return ma.Split(m)[0].String()
-}
-
-func (oas *ObservedAddrSet) SetTTL(ttl time.Duration) {
- oas.Lock()
- defer oas.Unlock()
- oas.ttl = ttl
-}
-
-func (oas *ObservedAddrSet) TTL() time.Duration {
- oas.Lock()
- defer oas.Unlock()
- // for zero-value.
- if oas.addrs == nil {
- oas.ttl = pstore.OwnObservedAddrTTL
- }
- return oas.ttl
-}
diff --git a/p2p/protocol/identify/obsaddr_test.go b/p2p/protocol/identify/obsaddr_test.go
deleted file mode 100644
index acf3d30d00..0000000000
--- a/p2p/protocol/identify/obsaddr_test.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package identify
-
-import (
- "testing"
- "time"
-
- ma "github.com/multiformats/go-multiaddr"
-)
-
-// TestObsAddrSet
-func TestObsAddrSet(t *testing.T) {
- m := func(s string) ma.Multiaddr {
- m, err := ma.NewMultiaddr(s)
- if err != nil {
- t.Error(err)
- }
- return m
- }
-
- addrsMarch := func(a, b []ma.Multiaddr) bool {
- if len(a) != len(b) {
- return false
- }
-
- for _, aa := range a {
- found := false
- for _, bb := range b {
- if aa.Equal(bb) {
- found = true
- break
- }
- }
- if !found {
- return false
- }
- }
- return true
- }
-
- a1 := m("/ip4/1.2.3.4/tcp/1231")
- a2 := m("/ip4/1.2.3.4/tcp/1232")
- a3 := m("/ip4/1.2.3.4/tcp/1233")
- a4 := m("/ip4/1.2.3.4/tcp/1234")
- a5 := m("/ip4/1.2.3.4/tcp/1235")
-
- b1 := m("/ip4/1.2.3.6/tcp/1236")
- b2 := m("/ip4/1.2.3.7/tcp/1237")
- b3 := m("/ip4/1.2.3.8/tcp/1237")
- b4 := m("/ip4/1.2.3.9/tcp/1237")
- b5 := m("/ip4/1.2.3.10/tcp/1237")
-
- oas := ObservedAddrSet{}
-
- if !addrsMarch(oas.Addrs(), nil) {
- t.Error("addrs should be empty")
- }
-
- oas.Add(a1, a4)
- oas.Add(a2, a4)
- oas.Add(a3, a4)
-
- // these are all different so we should not yet get them.
- if !addrsMarch(oas.Addrs(), nil) {
- t.Error("addrs should _still_ be empty (once)")
- }
-
- // same observer, so should not yet get them.
- oas.Add(a1, a4)
- oas.Add(a2, a4)
- oas.Add(a3, a4)
- if !addrsMarch(oas.Addrs(), nil) {
- t.Error("addrs should _still_ be empty (same obs)")
- }
-
- // different observer, but same observer group.
- oas.Add(a1, a5)
- oas.Add(a2, a5)
- oas.Add(a3, a5)
- if !addrsMarch(oas.Addrs(), nil) {
- t.Error("addrs should _still_ be empty (same obs group)")
- }
-
- oas.Add(a1, b1)
- oas.Add(a1, b2)
- oas.Add(a1, b3)
- if !addrsMarch(oas.Addrs(), []ma.Multiaddr{a1}) {
- t.Error("addrs should only have a1")
- }
-
- oas.Add(a2, a5)
- oas.Add(a1, a5)
- oas.Add(a1, a5)
- oas.Add(a2, b1)
- oas.Add(a1, b1)
- oas.Add(a1, b1)
- oas.Add(a2, b2)
- oas.Add(a1, b2)
- oas.Add(a1, b2)
- oas.Add(a2, b4)
- oas.Add(a2, b5)
- if !addrsMarch(oas.Addrs(), []ma.Multiaddr{a1, a2}) {
- t.Error("addrs should only have a1, a2")
- }
-
- // change the timeout constant so we can time it out.
- oas.SetTTL(time.Millisecond * 200)
- <-time.After(time.Millisecond * 210)
- if !addrsMarch(oas.Addrs(), nil) {
- t.Error("addrs should have timed out")
- }
-}
diff --git a/p2p/protocol/identify/opts.go b/p2p/protocol/identify/opts.go
new file mode 100644
index 0000000000..f71777ddac
--- /dev/null
+++ b/p2p/protocol/identify/opts.go
@@ -0,0 +1,50 @@
+package identify
+
+import "time"
+
+type config struct {
+ protocolVersion string
+ userAgent string
+ disableSignedPeerRecord bool
+ metricsTracer MetricsTracer
+ timeout time.Duration
+}
+
+// Option is an option function for identify.
+type Option func(*config)
+
+// ProtocolVersion sets the protocol version string that will be used to
+// identify the family of protocols used by the peer.
+func ProtocolVersion(s string) Option {
+ return func(cfg *config) {
+ cfg.protocolVersion = s
+ }
+}
+
+// UserAgent sets the user agent this node will identify itself with to peers.
+func UserAgent(ua string) Option {
+ return func(cfg *config) {
+ cfg.userAgent = ua
+ }
+}
+
+// DisableSignedPeerRecord disables populating signed peer records on the outgoing Identify response
+// and ONLY sends the unsigned addresses.
+func DisableSignedPeerRecord() Option {
+ return func(cfg *config) {
+ cfg.disableSignedPeerRecord = true
+ }
+}
+
+func WithMetricsTracer(tr MetricsTracer) Option {
+ return func(cfg *config) {
+ cfg.metricsTracer = tr
+ }
+}
+
+// WithTimeout sets the timeout for identify interactions.
+func WithTimeout(timeout time.Duration) Option {
+ return func(cfg *config) {
+ cfg.timeout = timeout
+ }
+}
diff --git a/p2p/protocol/identify/pb/Makefile b/p2p/protocol/identify/pb/Makefile
deleted file mode 100644
index d08f1c3ebb..0000000000
--- a/p2p/protocol/identify/pb/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-
-PB = $(wildcard *.proto)
-GO = $(PB:.proto=.pb.go)
-
-all: $(GO)
-
-%.pb.go: %.proto
- protoc --gogo_out=. --proto_path=../../../../../../:/usr/local/opt/protobuf/include:. $<
-
-clean:
- rm *.pb.go
diff --git a/p2p/protocol/identify/pb/identify.pb.go b/p2p/protocol/identify/pb/identify.pb.go
index cc4cba4975..fc92dc9a05 100644
--- a/p2p/protocol/identify/pb/identify.pb.go
+++ b/p2p/protocol/identify/pb/identify.pb.go
@@ -1,31 +1,33 @@
-// Code generated by protoc-gen-gogo.
-// source: identify.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/protocol/identify/pb/identify.proto
-/*
-Package identify_pb is a generated protocol buffer package.
+package pb
-It is generated from these files:
- identify.proto
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
-It has these top-level messages:
- Identify
-*/
-package identify_pb
-
-import proto "github.com/gogo/protobuf/proto"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = math.Inf
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type Identify struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// protocolVersion determines compatibility between peers
- ProtocolVersion *string `protobuf:"bytes,5,opt,name=protocolVersion" json:"protocolVersion,omitempty"`
+ ProtocolVersion *string `protobuf:"bytes,5,opt,name=protocolVersion" json:"protocolVersion,omitempty"` // e.g. ipfs/1.0.0
// agentVersion is like a UserAgent string in browsers, or client version in bittorrent
// includes the client name and client.
- AgentVersion *string `protobuf:"bytes,6,opt,name=agentVersion" json:"agentVersion,omitempty"`
+ AgentVersion *string `protobuf:"bytes,6,opt,name=agentVersion" json:"agentVersion,omitempty"` // e.g. go-ipfs/0.1.0
// publicKey is this node's public key (which also gives its node.ID)
// - may not need to be sent, as secure channel implies it has been sent.
// - then again, if we change / disable secure channel, may still want it.
@@ -37,55 +39,154 @@ type Identify struct {
// determine whether its connection to the local peer goes through NAT.
ObservedAddr []byte `protobuf:"bytes,4,opt,name=observedAddr" json:"observedAddr,omitempty"`
// protocols are the services this node is running
- Protocols []string `protobuf:"bytes,3,rep,name=protocols" json:"protocols,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Protocols []string `protobuf:"bytes,3,rep,name=protocols" json:"protocols,omitempty"`
+ // signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord,
+ // signed by the sending node. It contains the same addresses as the listenAddrs field, but
+ // in a form that lets us share authenticated addrs with other peers.
+ // see github.com/libp2p/go-libp2p/core/record/pb/envelope.proto and
+ // github.com/libp2p/go-libp2p/core/peer/pb/peer_record.proto for message definitions.
+ SignedPeerRecord []byte `protobuf:"bytes,8,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Identify) Reset() {
+ *x = Identify{}
+ mi := &file_p2p_protocol_identify_pb_identify_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Identify) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Identify) Reset() { *m = Identify{} }
-func (m *Identify) String() string { return proto.CompactTextString(m) }
-func (*Identify) ProtoMessage() {}
+func (*Identify) ProtoMessage() {}
-func (m *Identify) GetProtocolVersion() string {
- if m != nil && m.ProtocolVersion != nil {
- return *m.ProtocolVersion
+func (x *Identify) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_protocol_identify_pb_identify_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Identify.ProtoReflect.Descriptor instead.
+func (*Identify) Descriptor() ([]byte, []int) {
+ return file_p2p_protocol_identify_pb_identify_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Identify) GetProtocolVersion() string {
+ if x != nil && x.ProtocolVersion != nil {
+ return *x.ProtocolVersion
}
return ""
}
-func (m *Identify) GetAgentVersion() string {
- if m != nil && m.AgentVersion != nil {
- return *m.AgentVersion
+func (x *Identify) GetAgentVersion() string {
+ if x != nil && x.AgentVersion != nil {
+ return *x.AgentVersion
}
return ""
}
-func (m *Identify) GetPublicKey() []byte {
- if m != nil {
- return m.PublicKey
+func (x *Identify) GetPublicKey() []byte {
+ if x != nil {
+ return x.PublicKey
+ }
+ return nil
+}
+
+func (x *Identify) GetListenAddrs() [][]byte {
+ if x != nil {
+ return x.ListenAddrs
}
return nil
}
-func (m *Identify) GetListenAddrs() [][]byte {
- if m != nil {
- return m.ListenAddrs
+func (x *Identify) GetObservedAddr() []byte {
+ if x != nil {
+ return x.ObservedAddr
}
return nil
}
-func (m *Identify) GetObservedAddr() []byte {
- if m != nil {
- return m.ObservedAddr
+func (x *Identify) GetProtocols() []string {
+ if x != nil {
+ return x.Protocols
}
return nil
}
-func (m *Identify) GetProtocols() []string {
- if m != nil {
- return m.Protocols
+func (x *Identify) GetSignedPeerRecord() []byte {
+ if x != nil {
+ return x.SignedPeerRecord
}
return nil
}
-func init() {
+var File_p2p_protocol_identify_pb_identify_proto protoreflect.FileDescriptor
+
+const file_p2p_protocol_identify_pb_identify_proto_rawDesc = "" +
+ "\n" +
+ "'p2p/protocol/identify/pb/identify.proto\x12\videntify.pb\"\x86\x02\n" +
+ "\bIdentify\x12(\n" +
+ "\x0fprotocolVersion\x18\x05 \x01(\tR\x0fprotocolVersion\x12\"\n" +
+ "\fagentVersion\x18\x06 \x01(\tR\fagentVersion\x12\x1c\n" +
+ "\tpublicKey\x18\x01 \x01(\fR\tpublicKey\x12 \n" +
+ "\vlistenAddrs\x18\x02 \x03(\fR\vlistenAddrs\x12\"\n" +
+ "\fobservedAddr\x18\x04 \x01(\fR\fobservedAddr\x12\x1c\n" +
+ "\tprotocols\x18\x03 \x03(\tR\tprotocols\x12*\n" +
+ "\x10signedPeerRecord\x18\b \x01(\fR\x10signedPeerRecordB6Z4github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
+
+var (
+ file_p2p_protocol_identify_pb_identify_proto_rawDescOnce sync.Once
+ file_p2p_protocol_identify_pb_identify_proto_rawDescData []byte
+)
+
+func file_p2p_protocol_identify_pb_identify_proto_rawDescGZIP() []byte {
+ file_p2p_protocol_identify_pb_identify_proto_rawDescOnce.Do(func() {
+ file_p2p_protocol_identify_pb_identify_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_protocol_identify_pb_identify_proto_rawDesc), len(file_p2p_protocol_identify_pb_identify_proto_rawDesc)))
+ })
+ return file_p2p_protocol_identify_pb_identify_proto_rawDescData
+}
+
+var file_p2p_protocol_identify_pb_identify_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_p2p_protocol_identify_pb_identify_proto_goTypes = []any{
+ (*Identify)(nil), // 0: identify.pb.Identify
+}
+var file_p2p_protocol_identify_pb_identify_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_p2p_protocol_identify_pb_identify_proto_init() }
+func file_p2p_protocol_identify_pb_identify_proto_init() {
+ if File_p2p_protocol_identify_pb_identify_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_protocol_identify_pb_identify_proto_rawDesc), len(file_p2p_protocol_identify_pb_identify_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_protocol_identify_pb_identify_proto_goTypes,
+ DependencyIndexes: file_p2p_protocol_identify_pb_identify_proto_depIdxs,
+ MessageInfos: file_p2p_protocol_identify_pb_identify_proto_msgTypes,
+ }.Build()
+ File_p2p_protocol_identify_pb_identify_proto = out.File
+ file_p2p_protocol_identify_pb_identify_proto_goTypes = nil
+ file_p2p_protocol_identify_pb_identify_proto_depIdxs = nil
}
diff --git a/p2p/protocol/identify/pb/identify.proto b/p2p/protocol/identify/pb/identify.proto
index 7d31e0474a..113438708a 100644
--- a/p2p/protocol/identify/pb/identify.proto
+++ b/p2p/protocol/identify/pb/identify.proto
@@ -1,5 +1,9 @@
+syntax = "proto2";
+
package identify.pb;
+option go_package = "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb";
+
message Identify {
// protocolVersion determines compatibility between peers
@@ -24,4 +28,11 @@ message Identify {
// protocols are the services this node is running
repeated string protocols = 3;
+
+ // signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord,
+ // signed by the sending node. It contains the same addresses as the listenAddrs field, but
+ // in a form that lets us share authenticated addrs with other peers.
+ // see github.com/libp2p/go-libp2p/core/record/pb/envelope.proto and
+ // github.com/libp2p/go-libp2p/core/peer/pb/peer_record.proto for message definitions.
+ optional bytes signedPeerRecord = 8;
}
diff --git a/p2p/protocol/identify/snapshot_test.go b/p2p/protocol/identify/snapshot_test.go
new file mode 100644
index 0000000000..55354a49a6
--- /dev/null
+++ b/p2p/protocol/identify/snapshot_test.go
@@ -0,0 +1,47 @@
+package identify
+
+import (
+ "crypto/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/record"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSnapshotEquality(t *testing.T) {
+ addr1 := ma.StringCast("/ip4/127.0.0.1/tcp/1234")
+ addr2 := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1")
+
+ _, pubKey1, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ _, pubKey2, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ record1 := &record.Envelope{PublicKey: pubKey1}
+ record2 := &record.Envelope{PublicKey: pubKey2}
+
+ for _, tc := range []struct {
+ s1, s2 *identifySnapshot
+ result bool
+ }{
+ {s1: &identifySnapshot{record: record1}, s2: &identifySnapshot{record: record1}, result: true},
+ {s1: &identifySnapshot{record: record1}, s2: &identifySnapshot{record: record2}, result: false},
+ {s1: &identifySnapshot{addrs: []ma.Multiaddr{addr1}}, s2: &identifySnapshot{addrs: []ma.Multiaddr{addr1}}, result: true},
+ {s1: &identifySnapshot{addrs: []ma.Multiaddr{addr1}}, s2: &identifySnapshot{addrs: []ma.Multiaddr{addr2}}, result: false},
+ {s1: &identifySnapshot{addrs: []ma.Multiaddr{addr1, addr2}}, s2: &identifySnapshot{addrs: []ma.Multiaddr{addr2}}, result: false},
+ {s1: &identifySnapshot{addrs: []ma.Multiaddr{addr1}}, s2: &identifySnapshot{addrs: []ma.Multiaddr{addr1, addr2}}, result: false},
+ {s1: &identifySnapshot{protocols: []protocol.ID{"/foo"}}, s2: &identifySnapshot{protocols: []protocol.ID{"/foo"}}, result: true},
+ {s1: &identifySnapshot{protocols: []protocol.ID{"/foo"}}, s2: &identifySnapshot{protocols: []protocol.ID{"/bar"}}, result: false},
+ {s1: &identifySnapshot{protocols: []protocol.ID{"/foo", "/bar"}}, s2: &identifySnapshot{protocols: []protocol.ID{"/bar"}}, result: false},
+ {s1: &identifySnapshot{protocols: []protocol.ID{"/foo"}}, s2: &identifySnapshot{protocols: []protocol.ID{"/foo", "/bar"}}, result: false},
+ } {
+ if tc.result {
+ require.Truef(t, tc.s1.Equal(tc.s2), "expected equal: %+v and %+v", tc.s1, tc.s2)
+ } else {
+ require.Falsef(t, tc.s1.Equal(tc.s2), "expected unequal: %+v and %+v", tc.s1, tc.s2)
+ }
+ }
+}
diff --git a/p2p/protocol/ping/ping.go b/p2p/protocol/ping/ping.go
index 54464f3080..d6a62e6114 100644
--- a/p2p/protocol/ping/ping.go
+++ b/p2p/protocol/ping/ping.go
@@ -3,24 +3,31 @@ package ping
import (
"bytes"
"context"
+ "crypto/rand"
+ "encoding/binary"
"errors"
"io"
+ mrand "math/rand"
"time"
- u "github.com/ipfs/go-ipfs-util"
- logging "github.com/ipfs/go-log"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- peer "github.com/libp2p/go-libp2p-peer"
+ pool "github.com/libp2p/go-buffer-pool"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ logging "github.com/libp2p/go-libp2p/gologshim"
)
var log = logging.Logger("ping")
-const PingSize = 32
+const (
+ PingSize = 32
+ pingTimeout = 10 * time.Second
+ pingDuration = 30 * time.Second
-const ID = "/ipfs/ping/1.0.0"
+ ID = "/ipfs/ping/1.0.0"
-const pingTimeout = time.Second * 60
+ ServiceName = "libp2p.ping"
+)
type PingService struct {
Host host.Host
@@ -32,8 +39,24 @@ func NewPingService(h host.Host) *PingService {
return ps
}
-func (p *PingService) PingHandler(s inet.Stream) {
- buf := make([]byte, PingSize)
+func (p *PingService) PingHandler(s network.Stream) {
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to ping service", "err", err)
+ s.Reset()
+ return
+ }
+
+ if err := s.Scope().ReserveMemory(PingSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for ping stream", "err", err)
+ s.Reset()
+ return
+ }
+ defer s.Scope().ReleaseMemory(PingSize)
+
+ s.SetDeadline(time.Now().Add(pingDuration))
+
+ buf := pool.Get(PingSize)
+ defer pool.Put(buf)
errCh := make(chan error, 1)
defer close(errCh)
@@ -44,19 +67,14 @@ func (p *PingService) PingHandler(s inet.Stream) {
select {
case <-timer.C:
log.Debug("ping timeout")
- s.Reset()
case err, ok := <-errCh:
if ok {
- log.Debug(err)
- if err == io.EOF {
- s.Close()
- } else {
- s.Reset()
- }
+ log.Debug("ping error", "err", err)
} else {
log.Error("ping loop failed without error")
}
}
+ s.Close()
}()
for {
@@ -76,59 +94,110 @@ func (p *PingService) PingHandler(s inet.Stream) {
}
}
-func (ps *PingService) Ping(ctx context.Context, p peer.ID) (<-chan time.Duration, error) {
- s, err := ps.Host.NewStream(ctx, p, ID)
+// Result is a result of a ping attempt, either an RTT or an error.
+type Result struct {
+ RTT time.Duration
+ Error error
+}
+
+func (ps *PingService) Ping(ctx context.Context, p peer.ID) <-chan Result {
+ return Ping(ctx, ps.Host, p)
+}
+
+func pingError(err error) chan Result {
+ ch := make(chan Result, 1)
+ ch <- Result{Error: err}
+ close(ch)
+ return ch
+}
+
+// Ping pings the remote peer until the context is canceled, returning a stream
+// of RTTs or errors.
+func Ping(ctx context.Context, h host.Host, p peer.ID) <-chan Result {
+ s, err := h.NewStream(network.WithAllowLimitedConn(ctx, "ping"), p, ID)
if err != nil {
- return nil, err
+ return pingError(err)
+ }
+
+ if err := s.Scope().SetService(ServiceName); err != nil {
+ log.Debug("error attaching stream to ping service", "err", err)
+ s.Reset()
+ return pingError(err)
+ }
+
+ b := make([]byte, 8)
+ if _, err := rand.Read(b); err != nil {
+ log.Error("failed to get cryptographic random", "err", err)
+ s.Reset()
+ return pingError(err)
}
+ ra := mrand.New(mrand.NewSource(int64(binary.BigEndian.Uint64(b))))
+
+ ctx, cancel := context.WithCancel(ctx)
- out := make(chan time.Duration)
+ out := make(chan Result)
go func() {
defer close(out)
- defer s.Close()
- for {
+ defer cancel()
+
+ for ctx.Err() == nil {
+ var res Result
+ res.RTT, res.Error = ping(s, ra)
+
+ // canceled, ignore everything.
+ if ctx.Err() != nil {
+ return
+ }
+
+ // No error, record the RTT.
+ if res.Error == nil {
+ h.Peerstore().RecordLatency(p, res.RTT)
+ }
+
select {
+ case out <- res:
case <-ctx.Done():
return
- default:
- t, err := ping(s)
- if err != nil {
- s.Reset()
- log.Debugf("ping error: %s", err)
- return
- }
-
- ps.Host.Peerstore().RecordLatency(p, t)
- select {
- case out <- t:
- case <-ctx.Done():
- return
- }
}
}
}()
+ context.AfterFunc(ctx, func() {
+ // forces the ping to abort.
+ s.Reset()
+ })
- return out, nil
+ return out
}
-func ping(s inet.Stream) (time.Duration, error) {
- buf := make([]byte, PingSize)
- u.NewTimeSeededRand().Read(buf)
+func ping(s network.Stream, randReader io.Reader) (time.Duration, error) {
+ if err := s.Scope().ReserveMemory(2*PingSize, network.ReservationPriorityAlways); err != nil {
+ log.Debug("error reserving memory for ping stream", "err", err)
+ s.Reset()
+ return 0, err
+ }
+ defer s.Scope().ReleaseMemory(2 * PingSize)
+
+ buf := pool.Get(PingSize)
+ defer pool.Put(buf)
+
+ if _, err := io.ReadFull(randReader, buf); err != nil {
+ return 0, err
+ }
before := time.Now()
- _, err := s.Write(buf)
- if err != nil {
+ if _, err := s.Write(buf); err != nil {
return 0, err
}
- rbuf := make([]byte, PingSize)
- _, err = io.ReadFull(s, rbuf)
- if err != nil {
+ rbuf := pool.Get(PingSize)
+ defer pool.Put(rbuf)
+
+ if _, err := io.ReadFull(s, rbuf); err != nil {
return 0, err
}
if !bytes.Equal(buf, rbuf) {
- return 0, errors.New("ping packet was incorrect!")
+ return 0, errors.New("ping packet was incorrect")
}
return time.Since(before), nil
diff --git a/p2p/protocol/ping/ping_test.go b/p2p/protocol/ping/ping_test.go
index a260cb81d4..d33662ee91 100644
--- a/p2p/protocol/ping/ping_test.go
+++ b/p2p/protocol/ping/ping_test.go
@@ -1,50 +1,54 @@
-package ping
+package ping_test
import (
"context"
"testing"
"time"
- netutil "github.com/libp2p/go-libp2p-netutil"
- peer "github.com/libp2p/go-libp2p-peer"
- pstore "github.com/libp2p/go-libp2p-peerstore"
+ "github.com/libp2p/go-libp2p/core/peer"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
)
func TestPing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- h1 := bhost.New(netutil.GenSwarmNetwork(t, ctx))
- h2 := bhost.New(netutil.GenSwarmNetwork(t, ctx))
-
- err := h1.Connect(ctx, pstore.PeerInfo{
+ h1, err := bhost.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h1.Close()
+ h1.Start()
+ h2, err := bhost.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ defer h2.Close()
+ h2.Start()
+
+ err = h1.Connect(ctx, peer.AddrInfo{
ID: h2.ID(),
- Addrs: h2.Addrs(),
+ Addrs: []ma.Multiaddr{h2.Addrs()[0]},
})
+ require.NoError(t, err)
- if err != nil {
- t.Fatal(err)
- }
-
- ps1 := NewPingService(h1)
- ps2 := NewPingService(h2)
+ ps1 := ping.NewPingService(h1)
+ ps2 := ping.NewPingService(h2)
testPing(t, ps1, h2.ID())
testPing(t, ps2, h1.ID())
}
-func testPing(t *testing.T, ps *PingService, p peer.ID) {
+func testPing(t *testing.T, ps *ping.PingService, p peer.ID) {
pctx, cancel := context.WithCancel(context.Background())
defer cancel()
- ts, err := ps.Ping(pctx, p)
- if err != nil {
- t.Fatal(err)
- }
+ ts := ps.Ping(pctx, p)
for i := 0; i < 5; i++ {
select {
- case took := <-ts:
- t.Log("ping took: ", took)
+ case res := <-ts:
+ require.NoError(t, res.Error)
+ t.Log("ping took: ", res.RTT)
case <-time.After(time.Second * 4):
t.Fatal("failed to receive ping")
}
diff --git a/p2p/security/noise/benchmark_test.go b/p2p/security/noise/benchmark_test.go
new file mode 100644
index 0000000000..d59a1cb979
--- /dev/null
+++ b/p2p/security/noise/benchmark_test.go
@@ -0,0 +1,251 @@
+package noise
+
+import (
+ "context"
+ "io"
+ "math/rand"
+ "net"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/chacha20poly1305"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/sec"
+)
+
+type testMode int
+
+const (
+ readBufferGtEncMsg testMode = iota
+ readBufferLtPlainText
+)
+
+var bcs = map[string]struct {
+ m testMode
+}{
+ "readBuffer > encrypted message": {
+ readBufferGtEncMsg,
+ },
+ "readBuffer < decrypted plaintext": {
+ readBufferLtPlainText,
+ },
+}
+
+func makeTransport(b *testing.B) *Transport {
+ b.Helper()
+
+ priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 256)
+ if err != nil {
+ b.Fatal(err)
+ }
+ tpt, err := New(ID, priv, nil)
+ if err != nil {
+ b.Fatalf("error constructing transport: %v", err)
+ }
+ return tpt
+}
+
+type benchenv struct {
+ *testing.B
+
+ initTpt *Transport
+ respTpt *Transport
+ rndSrc rand.Source
+}
+
+func setupEnv(b *testing.B) *benchenv {
+ b.StopTimer()
+ defer b.StartTimer()
+ initTpt := makeTransport(b)
+ respTpt := makeTransport(b)
+
+ return &benchenv{
+ B: b,
+ initTpt: initTpt,
+ respTpt: respTpt,
+ rndSrc: rand.NewSource(42),
+ }
+}
+
+func (b benchenv) connect(stopTimer bool) (*secureSession, *secureSession) {
+ initConn, respConn := net.Pipe()
+
+ if stopTimer {
+ b.StopTimer()
+ defer b.StartTimer()
+ }
+
+ var initSession sec.SecureConn
+ var initErr error
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ initSession, initErr = b.initTpt.SecureOutbound(context.TODO(), initConn, b.respTpt.localID)
+ }()
+
+ respSession, respErr := b.respTpt.SecureInbound(context.TODO(), respConn, "")
+ <-done
+
+ if initErr != nil {
+ b.Fatal(initErr)
+ }
+
+ if respErr != nil {
+ b.Fatal(respErr)
+ }
+
+ return initSession.(*secureSession), respSession.(*secureSession)
+}
+
+func drain(r io.Reader, done chan<- error, writeTo io.Writer) {
+ _, err := io.Copy(writeTo, r)
+ done <- err
+}
+
+type discardWithBuffer struct {
+ buf []byte
+ io.Writer
+}
+
+func (d *discardWithBuffer) ReadFrom(r io.Reader) (n int64, err error) {
+ readSize := 0
+ for {
+ readSize, err = r.Read(d.buf)
+ n += int64(readSize)
+ if err != nil {
+ if err == io.EOF {
+ return n, nil
+ }
+ return
+ }
+ }
+}
+
+func sink(dst io.WriteCloser, src io.Reader, done chan<- error, buf []byte) {
+ _, err := io.CopyBuffer(dst, src, buf)
+ if err != nil {
+ done <- err
+ }
+ done <- dst.Close()
+}
+
+func pipeRandom(src rand.Source, w io.WriteCloser, r io.Reader, n int64, plainTextBuf []byte,
+ writeTo io.Writer) error {
+ rnd := rand.New(src)
+ lr := io.LimitReader(rnd, n)
+
+ writeCh := make(chan error, 1)
+ readCh := make(chan error, 1)
+
+ go sink(w, lr, writeCh, plainTextBuf)
+ go drain(r, readCh, writeTo)
+
+ writeDone := false
+ readDone := false
+ for !(readDone && writeDone) {
+ select {
+ case err := <-readCh:
+ if err != nil && err != io.EOF {
+ return err
+ }
+ readDone = true
+ case err := <-writeCh:
+ if err != nil && err != io.EOF {
+ return err
+ }
+ writeDone = true
+ }
+ }
+
+ return nil
+}
+
+func benchDataTransfer(b *benchenv, dataSize int64, m testMode) {
+ var totalBytes int64
+ var totalTime time.Duration
+
+ plainTextBufs := make([][]byte, 61)
+ writeTos := make(map[int]io.Writer)
+ for i := 0; i < len(plainTextBufs); i++ {
+ var rbuf []byte
+ // plaintext will be 2 KB to 62 KB
+ plainTextBufs[i] = make([]byte, (i+2)*1024)
+ switch m {
+ case readBufferGtEncMsg:
+ rbuf = make([]byte, len(plainTextBufs[i])+chacha20poly1305.Overhead+1)
+ case readBufferLtPlainText:
+ rbuf = make([]byte, len(plainTextBufs[i])-2)
+ }
+ writeTos[i] = &discardWithBuffer{rbuf, io.Discard}
+ }
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ initSession, respSession := b.connect(true)
+
+ start := time.Now()
+
+ bufi := i % len(plainTextBufs)
+ err := pipeRandom(b.rndSrc, initSession, respSession, dataSize, plainTextBufs[bufi], writeTos[bufi])
+ if err != nil {
+ b.Fatalf("error sending random data: %s", err)
+ }
+ elapsed := time.Since(start)
+ totalTime += elapsed
+ totalBytes += dataSize
+ }
+ bytesPerSec := float64(totalBytes) / totalTime.Seconds()
+ b.ReportMetric(bytesPerSec, "bytes/sec")
+}
+
+func BenchmarkTransfer1MB(b *testing.B) {
+ for n, bc := range bcs {
+ b.Run(n, func(b *testing.B) {
+ benchDataTransfer(setupEnv(b), 1024*1024, bc.m)
+ })
+ }
+
+}
+
+func BenchmarkTransfer100MB(b *testing.B) {
+ for n, bc := range bcs {
+ b.Run(n, func(b *testing.B) {
+ benchDataTransfer(setupEnv(b), 1024*1024*100, bc.m)
+ })
+ }
+}
+
+func BenchmarkTransfer500Mb(b *testing.B) {
+ for n, bc := range bcs {
+ b.Run(n, func(b *testing.B) {
+ benchDataTransfer(setupEnv(b), 1024*1024*500, bc.m)
+ })
+ }
+}
+
+func (b benchenv) benchHandshake() {
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ i, r := b.connect(false)
+ b.StopTimer()
+ err := i.Close()
+ if err != nil {
+ b.Errorf("error closing session: %s", err)
+ }
+ err = r.Close()
+ if err != nil {
+ b.Errorf("error closing session: %s", err)
+ }
+ b.StartTimer()
+ }
+}
+
+func BenchmarkHandshakeXX(b *testing.B) {
+ env := setupEnv(b)
+ env.benchHandshake()
+}
diff --git a/p2p/security/noise/crypto.go b/p2p/security/noise/crypto.go
new file mode 100644
index 0000000000..2b1c5afef4
--- /dev/null
+++ b/p2p/security/noise/crypto.go
@@ -0,0 +1,46 @@
+package noise
+
+import (
+ "errors"
+)
+
+// encrypt calls the cipher's encryption. It encrypts the provided plaintext,
+// slice-appending the ciphertext on out.
+//
+// Usually you want to pass a 0-len slice to this method, with enough capacity
+// to accommodate the ciphertext in order to spare allocs.
+//
+// encrypt returns a new slice header, whose len is the length of the resulting
+// ciphertext, including the authentication tag.
+//
+// This method will not allocate if the supplied slice is large enough to
+// accommodate the encrypted data + authentication tag. If so, the returned
+// slice header should be a view of the original slice.
+//
+// With the poly1305 MAC function that noise-libp2p uses, the authentication tag
+// adds an overhead of 16 bytes.
+func (s *secureSession) encrypt(out, plaintext []byte) ([]byte, error) {
+ if s.enc == nil {
+ return nil, errors.New("cannot encrypt, handshake incomplete")
+ }
+ return s.enc.Encrypt(out, nil, plaintext)
+}
+
+// decrypt calls the cipher's decryption. It decrypts the provided ciphertext,
+// slice-appending the plaintext on out.
+//
+// Usually you want to pass a 0-len slice to this method, with enough capacity
+// to accommodate the plaintext in order to spare allocs.
+//
+// decrypt returns a new slice header, whose len is the length of the resulting
+// plaintext, without the authentication tag.
+//
+// This method will not allocate if the supplied slice is large enough to
+// accommodate the plaintext. If so, the returned slice header should be a view
+// of the original slice.
+func (s *secureSession) decrypt(out, ciphertext []byte) ([]byte, error) {
+ if s.dec == nil {
+ return nil, errors.New("cannot decrypt, handshake incomplete")
+ }
+ return s.dec.Decrypt(out, nil, ciphertext)
+}
diff --git a/p2p/security/noise/crypto_test.go b/p2p/security/noise/crypto_test.go
new file mode 100644
index 0000000000..5c40a5ef61
--- /dev/null
+++ b/p2p/security/noise/crypto_test.go
@@ -0,0 +1,105 @@
+package noise
+
+import (
+ "bytes"
+ "context"
+ "net"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+)
+
+func TestEncryptAndDecrypt_InitToResp(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ plaintext := []byte("helloworld")
+ ciphertext, err := initConn.encrypt(nil, plaintext)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result, err := respConn.decrypt(nil, ciphertext)
+ if !bytes.Equal(plaintext, result) {
+ t.Fatalf("got %x expected %x", result, plaintext)
+ } else if err != nil {
+ t.Fatal(err)
+ }
+
+ plaintext = []byte("goodbye")
+ ciphertext, err = initConn.encrypt(nil, plaintext)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result, err = respConn.decrypt(nil, ciphertext)
+ if !bytes.Equal(plaintext, result) {
+ t.Fatalf("got %x expected %x", result, plaintext)
+ } else if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestEncryptAndDecrypt_RespToInit(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ plaintext := []byte("helloworld")
+ ciphertext, err := respConn.encrypt(nil, plaintext)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result, err := initConn.decrypt(nil, ciphertext)
+ if !bytes.Equal(plaintext, result) {
+ t.Fatalf("got %x expected %x", result, plaintext)
+ } else if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCryptoFailsIfCiphertextIsAltered(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ plaintext := []byte("helloworld")
+ ciphertext, err := respConn.encrypt(nil, plaintext)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ciphertext[0] = ^ciphertext[0]
+
+ _, err = initConn.decrypt(nil, ciphertext)
+ if err == nil {
+ t.Fatal("expected decryption to fail when ciphertext altered")
+ }
+}
+
+func TestCryptoFailsIfHandshakeIncomplete(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ init, resp := net.Pipe()
+ _ = resp.Close()
+
+ session, _ := newSecureSession(initTransport, context.TODO(), init, "remote-peer", nil, nil, nil, true, true)
+ _, err := session.encrypt(nil, []byte("hi"))
+ if err == nil {
+ t.Error("expected encryption error when handshake incomplete")
+ }
+ _, err = session.decrypt(nil, []byte("it's a secret"))
+ if err == nil {
+ t.Error("expected decryption error when handshake incomplete")
+ }
+}
diff --git a/p2p/security/noise/handshake.go b/p2p/security/noise/handshake.go
new file mode 100644
index 0000000000..d81aa72fb4
--- /dev/null
+++ b/p2p/security/noise/handshake.go
@@ -0,0 +1,286 @@
+package noise
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "os"
+ "runtime/debug"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+
+ "github.com/flynn/noise"
+ pool "github.com/libp2p/go-buffer-pool"
+ "google.golang.org/protobuf/proto"
+)
+
+// payloadSigPrefix is prepended to our Noise static key before signing with
+// our libp2p identity key.
+const payloadSigPrefix = "noise-libp2p-static-key:"
+
+// All noise session share a fixed cipher suite
+var cipherSuite = noise.NewCipherSuite(noise.DH25519, noise.CipherChaChaPoly, noise.HashSHA256)
+
+// runHandshake exchanges handshake messages with the remote peer to establish
+// a noise-libp2p session. It blocks until the handshake completes or fails.
+func (s *secureSession) runHandshake(ctx context.Context) (err error) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack())
+ err = fmt.Errorf("panic in Noise handshake: %s", rerr)
+ }
+ }()
+
+ kp, err := noise.DH25519.GenerateKeypair(rand.Reader)
+ if err != nil {
+ return fmt.Errorf("error generating static keypair: %w", err)
+ }
+
+ cfg := noise.Config{
+ CipherSuite: cipherSuite,
+ Pattern: noise.HandshakeXX,
+ Initiator: s.initiator,
+ StaticKeypair: kp,
+ Prologue: s.prologue,
+ }
+
+ hs, err := noise.NewHandshakeState(cfg)
+ if err != nil {
+ return fmt.Errorf("error initializing handshake state: %w", err)
+ }
+
+ // set a deadline to complete the handshake, if one has been supplied.
+ // clear it after we're done.
+ if deadline, ok := ctx.Deadline(); ok {
+ if err := s.SetDeadline(deadline); err == nil {
+ // schedule the deadline removal once we're done handshaking.
+ defer s.SetDeadline(time.Time{})
+ }
+ }
+
+ // We can re-use this buffer for all handshake messages.
+ hbuf := pool.Get(2 << 10)
+ defer pool.Put(hbuf)
+
+ if s.initiator {
+ // stage 0 //
+ // Handshake Msg Len = len(DH ephemeral key)
+ if err := s.sendHandshakeMessage(hs, nil, hbuf); err != nil {
+ return fmt.Errorf("error sending handshake message: %w", err)
+ }
+
+ // stage 1 //
+ plaintext, err := s.readHandshakeMessage(hs)
+ if err != nil {
+ return fmt.Errorf("error reading handshake message: %w", err)
+ }
+ rcvdEd, err := s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
+ if err != nil {
+ return err
+ }
+ if s.initiatorEarlyDataHandler != nil {
+ if err := s.initiatorEarlyDataHandler.Received(ctx, s.insecureConn, rcvdEd); err != nil {
+ return err
+ }
+ }
+
+ // stage 2 //
+ // Handshake Msg Len = len(DHT static key) + MAC(static key is encrypted) + len(Payload) + MAC(payload is encrypted)
+ var ed *pb.NoiseExtensions
+ if s.initiatorEarlyDataHandler != nil {
+ ed = s.initiatorEarlyDataHandler.Send(ctx, s.insecureConn, s.remoteID)
+ }
+ payload, err := s.generateHandshakePayload(kp, ed)
+ if err != nil {
+ return err
+ }
+ if err := s.sendHandshakeMessage(hs, payload, hbuf); err != nil {
+ return fmt.Errorf("error sending handshake message: %w", err)
+ }
+ return nil
+ } else {
+ // stage 0 //
+ if _, err := s.readHandshakeMessage(hs); err != nil {
+ return fmt.Errorf("error reading handshake message: %w", err)
+ }
+
+ // stage 1 //
+ // Handshake Msg Len = len(DH ephemeral key) + len(DHT static key) + MAC(static key is encrypted) + len(Payload) +
+ // MAC(payload is encrypted)
+ var ed *pb.NoiseExtensions
+ if s.responderEarlyDataHandler != nil {
+ ed = s.responderEarlyDataHandler.Send(ctx, s.insecureConn, s.remoteID)
+ }
+ payload, err := s.generateHandshakePayload(kp, ed)
+ if err != nil {
+ return err
+ }
+ if err := s.sendHandshakeMessage(hs, payload, hbuf); err != nil {
+ return fmt.Errorf("error sending handshake message: %w", err)
+ }
+
+ // stage 2 //
+ plaintext, err := s.readHandshakeMessage(hs)
+ if err != nil {
+ return fmt.Errorf("error reading handshake message: %w", err)
+ }
+ rcvdEd, err := s.handleRemoteHandshakePayload(plaintext, hs.PeerStatic())
+ if err != nil {
+ return err
+ }
+ if s.responderEarlyDataHandler != nil {
+ if err := s.responderEarlyDataHandler.Received(ctx, s.insecureConn, rcvdEd); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+// setCipherStates sets the initial cipher states that will be used to protect
+// traffic after the handshake.
+//
+// It is called when the final handshake message is processed by
+// either sendHandshakeMessage or readHandshakeMessage.
+func (s *secureSession) setCipherStates(cs1, cs2 *noise.CipherState) {
+ if s.initiator {
+ s.enc = cs1
+ s.dec = cs2
+ } else {
+ s.enc = cs2
+ s.dec = cs1
+ }
+}
+
+// sendHandshakeMessage sends the next handshake message in the sequence.
+//
+// If payload is non-empty, it will be included in the handshake message.
+// If this is the final message in the sequence, calls setCipherStates
+// to initialize cipher states.
+func (s *secureSession) sendHandshakeMessage(hs *noise.HandshakeState, payload []byte, hbuf []byte) error {
+ // the first two bytes will be the length of the noise handshake message.
+ bz, cs1, cs2, err := hs.WriteMessage(hbuf[:LengthPrefixLength], payload)
+ if err != nil {
+ return err
+ }
+
+ // bz will also include the length prefix as we passed a slice of LengthPrefixLength length
+ // to hs.Write().
+ binary.BigEndian.PutUint16(bz, uint16(len(bz)-LengthPrefixLength))
+
+ _, err = s.writeMsgInsecure(bz)
+ if err != nil {
+ return err
+ }
+
+ if cs1 != nil && cs2 != nil {
+ s.setCipherStates(cs1, cs2)
+ }
+ return nil
+}
+
+// readHandshakeMessage reads a message from the insecure conn and tries to
+// process it as the expected next message in the handshake sequence.
+//
+// If the message contains a payload, it will be decrypted and returned.
+//
+// If this is the final message in the sequence, it calls setCipherStates
+// to initialize cipher states.
+func (s *secureSession) readHandshakeMessage(hs *noise.HandshakeState) ([]byte, error) {
+ l, err := s.readNextInsecureMsgLen()
+ if err != nil {
+ return nil, err
+ }
+
+ buf := pool.Get(l)
+ defer pool.Put(buf)
+
+ if err := s.readNextMsgInsecure(buf); err != nil {
+ return nil, err
+ }
+
+ msg, cs1, cs2, err := hs.ReadMessage(nil, buf)
+ if err != nil {
+ return nil, err
+ }
+ if cs1 != nil && cs2 != nil {
+ s.setCipherStates(cs1, cs2)
+ }
+ return msg, nil
+}
+
+// generateHandshakePayload creates a libp2p handshake payload with a
+// signature of our static noise key.
+func (s *secureSession) generateHandshakePayload(localStatic noise.DHKey, ext *pb.NoiseExtensions) ([]byte, error) {
+ // obtain the public key from the handshake session, so we can sign it with
+ // our libp2p secret key.
+ localKeyRaw, err := crypto.MarshalPublicKey(s.LocalPublicKey())
+ if err != nil {
+ return nil, fmt.Errorf("error serializing libp2p identity key: %w", err)
+ }
+
+ // prepare payload to sign; perform signature.
+ toSign := append([]byte(payloadSigPrefix), localStatic.Public...)
+ signedPayload, err := s.localKey.Sign(toSign)
+ if err != nil {
+ return nil, fmt.Errorf("error sigining handshake payload: %w", err)
+ }
+
+ // create payload
+ payloadEnc, err := proto.Marshal(&pb.NoiseHandshakePayload{
+ IdentityKey: localKeyRaw,
+ IdentitySig: signedPayload,
+ Extensions: ext,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling handshake payload: %w", err)
+ }
+ return payloadEnc, nil
+}
+
+// handleRemoteHandshakePayload unmarshals the handshake payload object sent
+// by the remote peer and validates the signature against the peer's static Noise key.
+// It returns the data attached to the payload.
+func (s *secureSession) handleRemoteHandshakePayload(payload []byte, remoteStatic []byte) (*pb.NoiseExtensions, error) {
+ // unmarshal payload
+ nhp := new(pb.NoiseHandshakePayload)
+ err := proto.Unmarshal(payload, nhp)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshaling remote handshake payload: %w", err)
+ }
+
+ // unpack remote peer's public libp2p key
+ remotePubKey, err := crypto.UnmarshalPublicKey(nhp.GetIdentityKey())
+ if err != nil {
+ return nil, err
+ }
+ id, err := peer.IDFromPublicKey(remotePubKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // check the peer ID if enabled
+ if s.checkPeerID && s.remoteID != id {
+ return nil, sec.ErrPeerIDMismatch{Expected: s.remoteID, Actual: id}
+ }
+
+ // verify payload is signed by asserted remote libp2p key.
+ sig := nhp.GetIdentitySig()
+ msg := append([]byte(payloadSigPrefix), remoteStatic...)
+ ok, err := remotePubKey.Verify(msg, sig)
+ if err != nil {
+ return nil, fmt.Errorf("error verifying signature: %w", err)
+ } else if !ok {
+ return nil, fmt.Errorf("handshake signature invalid")
+ }
+
+ // set remote peer key and id
+ s.remoteID = id
+ s.remoteKey = remotePubKey
+ return nhp.Extensions, nil
+}
diff --git a/p2p/security/noise/pb/payload.pb.go b/p2p/security/noise/pb/payload.pb.go
new file mode 100644
index 0000000000..dcadad7d49
--- /dev/null
+++ b/p2p/security/noise/pb/payload.pb.go
@@ -0,0 +1,199 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/security/noise/pb/payload.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type NoiseExtensions struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ WebtransportCerthashes [][]byte `protobuf:"bytes,1,rep,name=webtransport_certhashes,json=webtransportCerthashes" json:"webtransport_certhashes,omitempty"`
+ StreamMuxers []string `protobuf:"bytes,2,rep,name=stream_muxers,json=streamMuxers" json:"stream_muxers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *NoiseExtensions) Reset() {
+ *x = NoiseExtensions{}
+ mi := &file_p2p_security_noise_pb_payload_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NoiseExtensions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NoiseExtensions) ProtoMessage() {}
+
+func (x *NoiseExtensions) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_security_noise_pb_payload_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NoiseExtensions.ProtoReflect.Descriptor instead.
+func (*NoiseExtensions) Descriptor() ([]byte, []int) {
+ return file_p2p_security_noise_pb_payload_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NoiseExtensions) GetWebtransportCerthashes() [][]byte {
+ if x != nil {
+ return x.WebtransportCerthashes
+ }
+ return nil
+}
+
+func (x *NoiseExtensions) GetStreamMuxers() []string {
+ if x != nil {
+ return x.StreamMuxers
+ }
+ return nil
+}
+
+type NoiseHandshakePayload struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey" json:"identity_key,omitempty"`
+ IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig" json:"identity_sig,omitempty"`
+ Extensions *NoiseExtensions `protobuf:"bytes,4,opt,name=extensions" json:"extensions,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *NoiseHandshakePayload) Reset() {
+ *x = NoiseHandshakePayload{}
+ mi := &file_p2p_security_noise_pb_payload_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NoiseHandshakePayload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NoiseHandshakePayload) ProtoMessage() {}
+
+func (x *NoiseHandshakePayload) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_security_noise_pb_payload_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NoiseHandshakePayload.ProtoReflect.Descriptor instead.
+func (*NoiseHandshakePayload) Descriptor() ([]byte, []int) {
+ return file_p2p_security_noise_pb_payload_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *NoiseHandshakePayload) GetIdentityKey() []byte {
+ if x != nil {
+ return x.IdentityKey
+ }
+ return nil
+}
+
+func (x *NoiseHandshakePayload) GetIdentitySig() []byte {
+ if x != nil {
+ return x.IdentitySig
+ }
+ return nil
+}
+
+func (x *NoiseHandshakePayload) GetExtensions() *NoiseExtensions {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+var File_p2p_security_noise_pb_payload_proto protoreflect.FileDescriptor
+
+const file_p2p_security_noise_pb_payload_proto_rawDesc = "" +
+ "\n" +
+ "#p2p/security/noise/pb/payload.proto\x12\x02pb\"o\n" +
+ "\x0fNoiseExtensions\x127\n" +
+ "\x17webtransport_certhashes\x18\x01 \x03(\fR\x16webtransportCerthashes\x12#\n" +
+ "\rstream_muxers\x18\x02 \x03(\tR\fstreamMuxers\"\x92\x01\n" +
+ "\x15NoiseHandshakePayload\x12!\n" +
+ "\fidentity_key\x18\x01 \x01(\fR\videntityKey\x12!\n" +
+ "\fidentity_sig\x18\x02 \x01(\fR\videntitySig\x123\n" +
+ "\n" +
+ "extensions\x18\x04 \x01(\v2\x13.pb.NoiseExtensionsR\n" +
+ "extensionsB3Z1github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+
+var (
+ file_p2p_security_noise_pb_payload_proto_rawDescOnce sync.Once
+ file_p2p_security_noise_pb_payload_proto_rawDescData []byte
+)
+
+func file_p2p_security_noise_pb_payload_proto_rawDescGZIP() []byte {
+ file_p2p_security_noise_pb_payload_proto_rawDescOnce.Do(func() {
+ file_p2p_security_noise_pb_payload_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_security_noise_pb_payload_proto_rawDesc), len(file_p2p_security_noise_pb_payload_proto_rawDesc)))
+ })
+ return file_p2p_security_noise_pb_payload_proto_rawDescData
+}
+
+var file_p2p_security_noise_pb_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_p2p_security_noise_pb_payload_proto_goTypes = []any{
+ (*NoiseExtensions)(nil), // 0: pb.NoiseExtensions
+ (*NoiseHandshakePayload)(nil), // 1: pb.NoiseHandshakePayload
+}
+var file_p2p_security_noise_pb_payload_proto_depIdxs = []int32{
+ 0, // 0: pb.NoiseHandshakePayload.extensions:type_name -> pb.NoiseExtensions
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_p2p_security_noise_pb_payload_proto_init() }
+func file_p2p_security_noise_pb_payload_proto_init() {
+ if File_p2p_security_noise_pb_payload_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_security_noise_pb_payload_proto_rawDesc), len(file_p2p_security_noise_pb_payload_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_security_noise_pb_payload_proto_goTypes,
+ DependencyIndexes: file_p2p_security_noise_pb_payload_proto_depIdxs,
+ MessageInfos: file_p2p_security_noise_pb_payload_proto_msgTypes,
+ }.Build()
+ File_p2p_security_noise_pb_payload_proto = out.File
+ file_p2p_security_noise_pb_payload_proto_goTypes = nil
+ file_p2p_security_noise_pb_payload_proto_depIdxs = nil
+}
diff --git a/p2p/security/noise/pb/payload.proto b/p2p/security/noise/pb/payload.proto
new file mode 100644
index 0000000000..edcacd3bb5
--- /dev/null
+++ b/p2p/security/noise/pb/payload.proto
@@ -0,0 +1,15 @@
+syntax = "proto2";
+package pb;
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/security/noise/pb";
+
+message NoiseExtensions {
+ repeated bytes webtransport_certhashes = 1;
+ repeated string stream_muxers = 2;
+}
+
+message NoiseHandshakePayload {
+ optional bytes identity_key = 1;
+ optional bytes identity_sig = 2;
+ optional NoiseExtensions extensions = 4;
+}
diff --git a/p2p/security/noise/rw.go b/p2p/security/noise/rw.go
new file mode 100644
index 0000000000..d52768f485
--- /dev/null
+++ b/p2p/security/noise/rw.go
@@ -0,0 +1,155 @@
+package noise
+
+import (
+ "encoding/binary"
+ "io"
+
+ pool "github.com/libp2p/go-buffer-pool"
+ "golang.org/x/crypto/chacha20poly1305"
+)
+
+// MaxTransportMsgLength is the Noise-imposed maximum transport message length,
+// inclusive of the MAC size (16 bytes, Poly1305 for noise-libp2p).
+const MaxTransportMsgLength = 0xffff
+
+// MaxPlaintextLength is the maximum payload size. It is MaxTransportMsgLength
+// minus the MAC size. Payloads over this size will be automatically chunked.
+const MaxPlaintextLength = MaxTransportMsgLength - chacha20poly1305.Overhead
+
+// LengthPrefixLength is the length of the length prefix itself, which precedes
+// all transport messages in order to delimit them. In bytes.
+const LengthPrefixLength = 2
+
+// Read reads from the secure connection, returning plaintext data in `buf`.
+//
+// Honours io.Reader in terms of behaviour.
+func (s *secureSession) Read(buf []byte) (int, error) {
+ s.readLock.Lock()
+ defer s.readLock.Unlock()
+
+ // 1. If we have queued received bytes:
+ // 1a. If len(buf) < len(queued), saturate buf, update seek pointer, return.
+ // 1b. If len(buf) >= len(queued), copy remaining to buf, release queued buffer back into pool, return.
+ //
+ // 2. Else, read the next message off the wire; next_len is length prefix.
+ // 2a. If len(buf) >= next_len, copy the message to input buffer (zero-alloc path), and return.
+ // 2b. If len(buf) >= (next_len - length of Authentication Tag), get buffer from pool, read encrypted message into it.
+ // decrypt message directly into the input buffer and return the buffer obtained from the pool.
+ // 2c. If len(buf) < next_len, obtain buffer from pool, copy entire message into it, saturate buf, update seek pointer.
+ if s.qbuf != nil {
+ // we have queued bytes; copy as much as we can.
+ copied := copy(buf, s.qbuf[s.qseek:])
+ s.qseek += copied
+ if s.qseek == len(s.qbuf) {
+ // queued buffer is now empty, reset and release.
+ pool.Put(s.qbuf)
+ s.qseek, s.qbuf = 0, nil
+ }
+ return copied, nil
+ }
+
+ // length of the next encrypted message.
+ nextMsgLen, err := s.readNextInsecureMsgLen()
+ if err != nil {
+ return 0, err
+ }
+
+ // If the buffer is atleast as big as the encrypted message size,
+ // we can read AND decrypt in place.
+ if len(buf) >= nextMsgLen {
+ if err := s.readNextMsgInsecure(buf[:nextMsgLen]); err != nil {
+ return 0, err
+ }
+
+ dbuf, err := s.decrypt(buf[:0], buf[:nextMsgLen])
+ if err != nil {
+ return 0, err
+ }
+
+ return len(dbuf), nil
+ }
+
+ // otherwise, we get a buffer from the pool so we can read the message into it
+ // and then decrypt in place, since we're retaining the buffer (or a view thereof).
+ cbuf := pool.Get(nextMsgLen)
+ if err := s.readNextMsgInsecure(cbuf); err != nil {
+ return 0, err
+ }
+
+ if s.qbuf, err = s.decrypt(cbuf[:0], cbuf); err != nil {
+ return 0, err
+ }
+
+ // copy as many bytes as we can; update seek pointer.
+ s.qseek = copy(buf, s.qbuf)
+
+ return s.qseek, nil
+}
+
+// Write encrypts the plaintext `in` data and sends it on the
+// secure connection.
+func (s *secureSession) Write(data []byte) (int, error) {
+ s.writeLock.Lock()
+ defer s.writeLock.Unlock()
+
+ var (
+ written int
+ cbuf []byte
+ total = len(data)
+ )
+
+ if total < MaxPlaintextLength {
+ cbuf = pool.Get(total + chacha20poly1305.Overhead + LengthPrefixLength)
+ } else {
+ cbuf = pool.Get(MaxTransportMsgLength + LengthPrefixLength)
+ }
+
+ defer pool.Put(cbuf)
+
+ for written < total {
+ end := written + MaxPlaintextLength
+ if end > total {
+ end = total
+ }
+
+ b, err := s.encrypt(cbuf[:LengthPrefixLength], data[written:end])
+ if err != nil {
+ return 0, err
+ }
+
+ binary.BigEndian.PutUint16(b, uint16(len(b)-LengthPrefixLength))
+
+ _, err = s.writeMsgInsecure(b)
+ if err != nil {
+ return written, err
+ }
+ written = end
+ }
+ return written, nil
+}
+
+// readNextInsecureMsgLen reads the length of the next message on the insecureConn channel.
+func (s *secureSession) readNextInsecureMsgLen() (int, error) {
+ _, err := io.ReadFull(s.insecureReader, s.rlen[:])
+ if err != nil {
+ return 0, err
+ }
+
+ return int(binary.BigEndian.Uint16(s.rlen[:])), err
+}
+
+// readNextMsgInsecure tries to read exactly len(buf) bytes into buf from
+// the insecureConn channel and returns the error, if any.
+// Ideally, for reading a message, you'd first want to call `readNextInsecureMsgLen`
+// to determine the size of the next message to be read from the insecureConn channel and then call
+// this function with a buffer of exactly that size.
+func (s *secureSession) readNextMsgInsecure(buf []byte) error {
+ _, err := io.ReadFull(s.insecureReader, buf)
+ return err
+}
+
+// writeMsgInsecure writes to the insecureConn conn.
+// data will be prefixed with its length in bytes, written as a 16-bit uint in network order.
+func (s *secureSession) writeMsgInsecure(data []byte) (int, error) {
+ return s.insecureConn.Write(data)
+}
diff --git a/p2p/security/noise/session.go b/p2p/security/noise/session.go
new file mode 100644
index 0000000000..fa32ab8fa4
--- /dev/null
+++ b/p2p/security/noise/session.go
@@ -0,0 +1,140 @@
+package noise
+
+import (
+ "bufio"
+ "context"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/flynn/noise"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+)
+
+type secureSession struct {
+ initiator bool
+ checkPeerID bool
+
+ localID peer.ID
+ localKey crypto.PrivKey
+ remoteID peer.ID
+ remoteKey crypto.PubKey
+
+ readLock sync.Mutex
+ writeLock sync.Mutex
+
+ insecureConn net.Conn
+ insecureReader *bufio.Reader // to cushion io read syscalls
+ // we don't buffer writes to avoid introducing latency; optimisation possible. // TODO revisit
+
+ qseek int // queued bytes seek value.
+ qbuf []byte // queued bytes buffer.
+ rlen [2]byte // work buffer to read in the incoming message length.
+
+ enc *noise.CipherState
+ dec *noise.CipherState
+
+ // noise prologue
+ prologue []byte
+
+ initiatorEarlyDataHandler, responderEarlyDataHandler EarlyDataHandler
+
+ // ConnectionState holds state information releated to the secureSession entity.
+ connectionState network.ConnectionState
+}
+
+// newSecureSession creates a Noise session over the given insecureConn Conn, using
+// the libp2p identity keypair from the given Transport.
+func newSecureSession(tpt *Transport, ctx context.Context, insecure net.Conn, remote peer.ID, prologue []byte, initiatorEDH, responderEDH EarlyDataHandler, initiator, checkPeerID bool) (*secureSession, error) {
+ s := &secureSession{
+ insecureConn: insecure,
+ insecureReader: bufio.NewReader(insecure),
+ initiator: initiator,
+ localID: tpt.localID,
+ localKey: tpt.privateKey,
+ remoteID: remote,
+ prologue: prologue,
+ initiatorEarlyDataHandler: initiatorEDH,
+ responderEarlyDataHandler: responderEDH,
+ checkPeerID: checkPeerID,
+ }
+
+ // the go-routine we create to run the handshake will
+ // write the result of the handshake to the respCh.
+ respCh := make(chan error, 1)
+ go func() {
+ respCh <- s.runHandshake(ctx)
+ }()
+
+ select {
+ case err := <-respCh:
+ if err != nil {
+ _ = s.insecureConn.Close()
+ }
+ return s, err
+
+ case <-ctx.Done():
+ // If the context has been cancelled, we close the underlying connection.
+ // We then wait for the handshake to return because of the first error it encounters
+ // so we don't return without cleaning up the go-routine.
+ _ = s.insecureConn.Close()
+ <-respCh
+ return nil, ctx.Err()
+ }
+}
+
+func (s *secureSession) LocalAddr() net.Addr {
+ return s.insecureConn.LocalAddr()
+}
+
+func (s *secureSession) LocalPeer() peer.ID {
+ return s.localID
+}
+
+func (s *secureSession) LocalPublicKey() crypto.PubKey {
+ return s.localKey.GetPublic()
+}
+
+func (s *secureSession) RemoteAddr() net.Addr {
+ return s.insecureConn.RemoteAddr()
+}
+
+func (s *secureSession) RemotePeer() peer.ID {
+ return s.remoteID
+}
+
+func (s *secureSession) RemotePublicKey() crypto.PubKey {
+ return s.remoteKey
+}
+
+func (s *secureSession) ConnState() network.ConnectionState {
+ return s.connectionState
+}
+
+func (s *secureSession) SetDeadline(t time.Time) error {
+ return s.insecureConn.SetDeadline(t)
+}
+
+func (s *secureSession) SetReadDeadline(t time.Time) error {
+ return s.insecureConn.SetReadDeadline(t)
+}
+
+func (s *secureSession) SetWriteDeadline(t time.Time) error {
+ return s.insecureConn.SetWriteDeadline(t)
+}
+
+func (s *secureSession) Close() error {
+ return s.insecureConn.Close()
+}
+
+func SessionWithConnState(s *secureSession, muxer protocol.ID) *secureSession {
+ if s != nil {
+ s.connectionState.StreamMultiplexer = muxer
+ s.connectionState.UsedEarlyMuxerNegotiation = muxer != ""
+ }
+ return s
+}
diff --git a/p2p/security/noise/session_test.go b/p2p/security/noise/session_test.go
new file mode 100644
index 0000000000..85de01b2ba
--- /dev/null
+++ b/p2p/security/noise/session_test.go
@@ -0,0 +1,26 @@
+package noise
+
+import (
+ "context"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestContextCancellationRespected(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ init, resp := newConnPair(t)
+ defer init.Close()
+ defer resp.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+
+ _, err := initTransport.SecureOutbound(ctx, init, respTransport.localID)
+ require.Error(t, err)
+ require.Equal(t, ctx.Err(), err)
+}
diff --git a/p2p/security/noise/session_transport.go b/p2p/security/noise/session_transport.go
new file mode 100644
index 0000000000..0f26f3fa8e
--- /dev/null
+++ b/p2p/security/noise/session_transport.go
@@ -0,0 +1,101 @@
+package noise
+
+import (
+ "context"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/canonicallog"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type SessionOption = func(*SessionTransport) error
+
+// Prologue sets a prologue for the Noise session.
+// The handshake will only complete successfully if both parties set the same prologue.
+// See https://noiseprotocol.org/noise.html#prologue for details.
+func Prologue(prologue []byte) SessionOption {
+ return func(s *SessionTransport) error {
+ s.prologue = prologue
+ return nil
+ }
+}
+
+// EarlyDataHandler defines what the application payload is for either the second
+// (if responder) or third (if initiator) handshake message, and defines the
+// logic for handling the other side's early data. Note the early data in the
+// second handshake message is encrypted, but the peer is not authenticated at that point.
+type EarlyDataHandler interface {
+ // Send for the initiator is called for the client before sending the third
+ // handshake message. Defines the application payload for the third message.
+ // Send for the responder is called before sending the second handshake message.
+ Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions
+ // Received for the initiator is called when the second handshake message
+ // from the responder is received.
+ // Received for the responder is called when the third handshake message
+ // from the initiator is received.
+ Received(context.Context, net.Conn, *pb.NoiseExtensions) error
+}
+
+// EarlyData sets the `EarlyDataHandler` for the initiator and responder roles.
+// See `EarlyDataHandler` for more details.
+func EarlyData(initiator, responder EarlyDataHandler) SessionOption {
+ return func(s *SessionTransport) error {
+ s.initiatorEarlyDataHandler = initiator
+ s.responderEarlyDataHandler = responder
+ return nil
+ }
+}
+
+// DisablePeerIDCheck disables checking the remote peer ID for a noise connection.
+// For outbound connections, this is the equivalent of calling `SecureInbound` with an empty
+// peer ID. This is susceptible to MITM attacks since we do not verify the identity of the remote
+// peer.
+func DisablePeerIDCheck() SessionOption {
+ return func(s *SessionTransport) error {
+ s.disablePeerIDCheck = true
+ return nil
+ }
+}
+
+var _ sec.SecureTransport = &SessionTransport{}
+
+// SessionTransport can be used
+// to provide per-connection options
+type SessionTransport struct {
+ t *Transport
+ // options
+ prologue []byte
+ disablePeerIDCheck bool
+
+ protocolID protocol.ID
+
+ initiatorEarlyDataHandler, responderEarlyDataHandler EarlyDataHandler
+}
+
+// SecureInbound runs the Noise handshake as the responder.
+// If p is empty, connections from any peer are accepted.
+func (i *SessionTransport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ checkPeerID := !i.disablePeerIDCheck && p != ""
+ c, err := newSecureSession(i.t, ctx, insecure, p, i.prologue, i.initiatorEarlyDataHandler, i.responderEarlyDataHandler, false, checkPeerID)
+ if err != nil {
+ addr, maErr := manet.FromNetAddr(insecure.RemoteAddr())
+ if maErr == nil {
+ canonicallog.LogPeerStatus(100, p, addr, "handshake_failure", "noise", "err", err.Error())
+ }
+ }
+ return c, err
+}
+
+// SecureOutbound runs the Noise handshake as the initiator.
+func (i *SessionTransport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ return newSecureSession(i.t, ctx, insecure, p, i.prologue, i.initiatorEarlyDataHandler, i.responderEarlyDataHandler, true, !i.disablePeerIDCheck)
+}
+
+func (i *SessionTransport) ID() protocol.ID {
+ return i.protocolID
+}
diff --git a/p2p/security/noise/transport.go b/p2p/security/noise/transport.go
new file mode 100644
index 0000000000..e42cea1bf7
--- /dev/null
+++ b/p2p/security/noise/transport.go
@@ -0,0 +1,131 @@
+package noise
+
+import (
+ "context"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/canonicallog"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// ID is the protocol ID for noise
+const ID = "/noise"
+const maxProtoNum = 100
+
+type Transport struct {
+ protocolID protocol.ID
+ localID peer.ID
+ privateKey crypto.PrivKey
+ muxers []protocol.ID
+}
+
+var _ sec.SecureTransport = &Transport{}
+
+// New creates a new Noise transport using the given private key as its
+// libp2p identity key.
+func New(id protocol.ID, privkey crypto.PrivKey, muxers []tptu.StreamMuxer) (*Transport, error) {
+ localID, err := peer.IDFromPrivateKey(privkey)
+ if err != nil {
+ return nil, err
+ }
+
+ muxerIDs := make([]protocol.ID, 0, len(muxers))
+ for _, m := range muxers {
+ muxerIDs = append(muxerIDs, m.ID)
+ }
+
+ return &Transport{
+ protocolID: id,
+ localID: localID,
+ privateKey: privkey,
+ muxers: muxerIDs,
+ }, nil
+}
+
+// SecureInbound runs the Noise handshake as the responder.
+// If p is empty, connections from any peer are accepted.
+func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ responderEDH := newTransportEDH(t)
+ c, err := newSecureSession(t, ctx, insecure, p, nil, nil, responderEDH, false, p != "")
+ if err != nil {
+ addr, maErr := manet.FromNetAddr(insecure.RemoteAddr())
+ if maErr == nil {
+ canonicallog.LogPeerStatus(100, p, addr, "handshake_failure", "noise", "err", err.Error())
+ }
+ }
+ return SessionWithConnState(c, responderEDH.MatchMuxers(false)), err
+}
+
+// SecureOutbound runs the Noise handshake as the initiator.
+func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ initiatorEDH := newTransportEDH(t)
+ c, err := newSecureSession(t, ctx, insecure, p, nil, initiatorEDH, nil, true, true)
+ if err != nil {
+ return c, err
+ }
+ return SessionWithConnState(c, initiatorEDH.MatchMuxers(true)), err
+}
+
+func (t *Transport) WithSessionOptions(opts ...SessionOption) (*SessionTransport, error) {
+ st := &SessionTransport{t: t, protocolID: t.protocolID}
+ for _, opt := range opts {
+ if err := opt(st); err != nil {
+ return nil, err
+ }
+ }
+ return st, nil
+}
+
+func (t *Transport) ID() protocol.ID {
+ return t.protocolID
+}
+
+func matchMuxers(initiatorMuxers, responderMuxers []protocol.ID) protocol.ID {
+ for _, initMuxer := range initiatorMuxers {
+ for _, respMuxer := range responderMuxers {
+ if initMuxer == respMuxer {
+ return initMuxer
+ }
+ }
+ }
+ return ""
+}
+
+type transportEarlyDataHandler struct {
+ transport *Transport
+ receivedMuxers []protocol.ID
+}
+
+var _ EarlyDataHandler = &transportEarlyDataHandler{}
+
+func newTransportEDH(t *Transport) *transportEarlyDataHandler {
+ return &transportEarlyDataHandler{transport: t}
+}
+
+func (i *transportEarlyDataHandler) Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions {
+ return &pb.NoiseExtensions{
+ StreamMuxers: protocol.ConvertToStrings(i.transport.muxers),
+ }
+}
+
+func (i *transportEarlyDataHandler) Received(_ context.Context, _ net.Conn, extension *pb.NoiseExtensions) error {
+ // Discard messages with size or the number of protocols exceeding extension limit for security.
+ if extension != nil && len(extension.StreamMuxers) <= maxProtoNum {
+ i.receivedMuxers = protocol.ConvertFromStrings(extension.GetStreamMuxers())
+ }
+ return nil
+}
+
+func (i *transportEarlyDataHandler) MatchMuxers(isInitiator bool) protocol.ID {
+ if isInitiator {
+ return matchMuxers(i.transport.muxers, i.receivedMuxers)
+ }
+ return matchMuxers(i.receivedMuxers, i.transport.muxers)
+}
diff --git a/p2p/security/noise/transport_test.go b/p2p/security/noise/transport_test.go
new file mode 100644
index 0000000000..b80019d71e
--- /dev/null
+++ b/p2p/security/noise/transport_test.go
@@ -0,0 +1,715 @@
+package noise
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "io"
+ "math/rand"
+ "net"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/chacha20poly1305"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func newTestTransport(t *testing.T, typ, bits int) *Transport {
+ priv, pub, err := crypto.GenerateKeyPair(typ, bits)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id, err := peer.IDFromPublicKey(pub)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return &Transport{
+ localID: id,
+ privateKey: priv,
+ }
+}
+
+func newTestTransportWithMuxers(t *testing.T, typ, bits int, muxers []protocol.ID) *Transport {
+ transport := newTestTransport(t, typ, bits)
+ transport.muxers = muxers
+ return transport
+}
+
+// Create a new pair of connected TCP sockets.
+func newConnPair(t *testing.T) (net.Conn, net.Conn) {
+ lstnr, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatalf("Failed to listen: %v", err)
+ return nil, nil
+ }
+
+ var clientErr error
+ var client net.Conn
+ addr := lstnr.Addr()
+ done := make(chan struct{})
+
+ go func() {
+ defer close(done)
+ client, clientErr = net.Dial(addr.Network(), addr.String())
+ }()
+
+ server, err := lstnr.Accept()
+ <-done
+
+ lstnr.Close()
+
+ if err != nil {
+ t.Fatalf("Failed to accept: %v", err)
+ }
+
+ if clientErr != nil {
+ t.Fatalf("Failed to connect: %v", clientErr)
+ }
+
+ return client, server
+}
+
+func connect(t *testing.T, initTransport, respTransport *Transport) (*secureSession, *secureSession) {
+ init, resp := newConnPair(t)
+
+ var initConn sec.SecureConn
+ var initErr error
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ initConn, initErr = initTransport.SecureOutbound(context.Background(), init, respTransport.localID)
+ }()
+
+ respConn, respErr := respTransport.SecureInbound(context.Background(), resp, "")
+ <-done
+
+ if initErr != nil {
+ t.Fatal(initErr)
+ }
+
+ if respErr != nil {
+ t.Fatal(respErr)
+ }
+
+ return initConn.(*secureSession), respConn.(*secureSession)
+}
+
+func TestDeadlines(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ init, resp := newConnPair(t)
+ defer init.Close()
+ defer resp.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ _, err := initTransport.SecureOutbound(ctx, init, respTransport.localID)
+ if err == nil {
+ t.Fatalf("expected i/o timeout err; got: %s", err)
+ }
+
+ var neterr net.Error
+ if ok := errors.As(err, &neterr); !ok || !neterr.Timeout() {
+ t.Fatalf("expected i/o timeout err; got: %s", err)
+ }
+}
+
+func TestIDs(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ if initConn.LocalPeer() != initTransport.localID {
+ t.Fatal("Initiator Local Peer ID mismatch.")
+ }
+
+ if respConn.RemotePeer() != initTransport.localID {
+ t.Fatal("Responder Remote Peer ID mismatch.")
+ }
+
+ if initConn.LocalPeer() != respConn.RemotePeer() {
+ t.Fatal("Responder Local Peer ID mismatch.")
+ }
+
+ // TODO: check after stage 0 of handshake if updated
+ if initConn.RemotePeer() != respTransport.localID {
+ t.Errorf("Initiator Remote Peer ID mismatch. expected %x got %x", respTransport.localID, initConn.RemotePeer())
+ }
+}
+
+func TestKeys(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ pk1 := respConn.RemotePublicKey()
+ pk2 := initTransport.privateKey.GetPublic()
+ if !pk1.Equals(pk2) {
+ t.Errorf("Public key mismatch. expected %x got %x", pk1, pk2)
+ }
+
+ pk3 := initConn.RemotePublicKey()
+ pk4 := respTransport.privateKey.GetPublic()
+ if !pk3.Equals(pk4) {
+ t.Errorf("Public key mismatch. expected %x got %x", pk3, pk4)
+ }
+}
+
+func TestPeerIDMatch(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ init, resp := newConnPair(t)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ conn, err := initTransport.SecureOutbound(context.Background(), init, respTransport.localID)
+ assert.NoError(t, err)
+ assert.Equal(t, respTransport.localID, conn.RemotePeer())
+ b := make([]byte, 6)
+ _, err = conn.Read(b)
+ assert.NoError(t, err)
+ assert.Equal(t, []byte("foobar"), b)
+ }()
+
+ conn, err := respTransport.SecureInbound(context.Background(), resp, initTransport.localID)
+ require.NoError(t, err)
+ require.Equal(t, initTransport.localID, conn.RemotePeer())
+ _, err = conn.Write([]byte("foobar"))
+ require.NoError(t, err)
+}
+
+func TestPeerIDMismatchOutboundFailsHandshake(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ init, resp := newConnPair(t)
+
+ errChan := make(chan error)
+ go func() {
+ _, err := initTransport.SecureOutbound(context.Background(), init, "a-random-peer-id")
+ errChan <- err
+ }()
+
+ _, err := respTransport.SecureInbound(context.Background(), resp, "")
+ require.Error(t, err)
+
+ initErr := <-errChan
+ require.Error(t, initErr, "expected initiator to fail with peer ID mismatch error")
+ var mismatchErr sec.ErrPeerIDMismatch
+ require.ErrorAs(t, initErr, &mismatchErr)
+ require.Equal(t, mismatchErr.Expected, peer.ID("a-random-peer-id"))
+ require.Equal(t, mismatchErr.Actual, respTransport.localID)
+}
+
+func TestPeerIDMismatchInboundFailsHandshake(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ init, resp := newConnPair(t)
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ conn, err := initTransport.SecureOutbound(context.Background(), init, respTransport.localID)
+ assert.NoError(t, err)
+ _, err = conn.Read([]byte{0})
+ assert.Error(t, err)
+ }()
+
+ _, err := respTransport.SecureInbound(context.Background(), resp, "a-random-peer-id")
+ require.Error(t, err, "expected responder to fail with peer ID mismatch error")
+ var mismatchErr sec.ErrPeerIDMismatch
+ require.ErrorAs(t, err, &mismatchErr)
+ require.Equal(t, mismatchErr.Expected, peer.ID("a-random-peer-id"))
+ require.Equal(t, mismatchErr.Actual, initTransport.localID)
+ <-done
+}
+
+func TestPeerIDInboundCheckDisabled(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ init, resp := newConnPair(t)
+
+ initSessionTransport, err := initTransport.WithSessionOptions(DisablePeerIDCheck())
+ require.NoError(t, err)
+ errChan := make(chan error)
+ go func() {
+ _, err := initSessionTransport.SecureInbound(context.Background(), init, "test")
+ errChan <- err
+ }()
+ _, err = respTransport.SecureOutbound(context.Background(), resp, initTransport.localID)
+ require.NoError(t, err)
+ initErr := <-errChan
+ require.NoError(t, initErr)
+}
+
+func TestPeerIDOutboundNoCheck(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ init, resp := newConnPair(t)
+
+ initSessionTransport, err := initTransport.WithSessionOptions(DisablePeerIDCheck())
+ require.NoError(t, err)
+
+ errChan := make(chan error)
+ go func() {
+ _, err := initSessionTransport.SecureOutbound(context.Background(), init, "test")
+ errChan <- err
+ }()
+
+ _, err = respTransport.SecureInbound(context.Background(), resp, "")
+ require.NoError(t, err)
+ initErr := <-errChan
+ require.NoError(t, initErr)
+}
+
+func TestLargePayloads(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ // enough to require a couple Noise messages, with a size that
+ // isn't a neat multiple of Noise message size, just in case
+ rnd := rand.New(rand.NewSource(1234))
+ const size = 100000
+ before := make([]byte, size)
+ rnd.Read(before)
+
+ if _, err := initConn.Write(before); err != nil {
+ t.Fatal(err)
+ }
+
+ after := make([]byte, len(before))
+ afterLen, err := io.ReadFull(respConn, after)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(before) != afterLen {
+ t.Errorf("expected to read same amount of data as written. written=%d read=%d", len(before), afterLen)
+ }
+ if !bytes.Equal(before, after) {
+ t.Error("Message mismatch.")
+ }
+}
+
+// Tests XX handshake
+func TestHandshakeXX(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ before := []byte("hello world")
+ _, err := initConn.Write(before)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ after := make([]byte, len(before))
+ _, err = respConn.Read(after)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(before, after) {
+ t.Errorf("Message mismatch. %v != %v", before, after)
+ }
+}
+
+func TestBufferEqEncPayload(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ before := []byte("hello world")
+ _, err := initConn.Write(before)
+ require.NoError(t, err)
+
+ after := make([]byte, len(before)+chacha20poly1305.Overhead)
+ afterLen, err := respConn.Read(after)
+ require.NoError(t, err)
+
+ require.Len(t, before, afterLen)
+ require.Equal(t, before, after[:len(before)])
+}
+
+func TestBufferEqDecryptedPayload(t *testing.T) {
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ before := []byte("hello world")
+ _, err := initConn.Write(before)
+ require.NoError(t, err)
+
+ after := make([]byte, len(before)+1)
+ afterLen, err := respConn.Read(after)
+ require.NoError(t, err)
+
+ require.Len(t, before, afterLen)
+ require.Equal(t, before, after[:len(before)])
+}
+
+func TestReadUnencryptedFails(t *testing.T) {
+ // case1 buffer > len(msg)
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ before := []byte("hello world")
+ msg := make([]byte, len(before)+LengthPrefixLength)
+ binary.BigEndian.PutUint16(msg, uint16(len(before)))
+ copy(msg[LengthPrefixLength:], before)
+ n, err := initConn.insecureConn.Write(msg)
+ require.NoError(t, err)
+ require.Len(t, msg, n)
+
+ after := make([]byte, len(msg)+1)
+ afterLen, err := respConn.Read(after)
+ require.Error(t, err)
+ require.Equal(t, 0, afterLen)
+
+ // case2: buffer < len(msg)
+ initTransport = newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport = newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn = connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ before = []byte("hello world")
+ msg = make([]byte, len(before)+LengthPrefixLength)
+ binary.BigEndian.PutUint16(msg, uint16(len(before)))
+ copy(msg[LengthPrefixLength:], before)
+ n, err = initConn.insecureConn.Write(msg)
+ require.NoError(t, err)
+ require.Len(t, msg, n)
+
+ after = make([]byte, 1)
+ afterLen, err = respConn.Read(after)
+ require.Error(t, err)
+ require.Equal(t, 0, afterLen)
+}
+
+func TestPrologueMatches(t *testing.T) {
+ commonPrologue := []byte("test")
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := newConnPair(t)
+
+ done := make(chan struct{})
+
+ go func() {
+ defer close(done)
+ tpt, err := initTransport.
+ WithSessionOptions(Prologue(commonPrologue))
+ require.NoError(t, err)
+ conn, err := tpt.SecureOutbound(context.Background(), initConn, respTransport.localID)
+ require.NoError(t, err)
+ defer conn.Close()
+ }()
+
+ tpt, err := respTransport.
+ WithSessionOptions(Prologue(commonPrologue))
+ require.NoError(t, err)
+ conn, err := tpt.SecureInbound(context.Background(), respConn, "")
+ require.NoError(t, err)
+ defer conn.Close()
+ <-done
+}
+
+func TestPrologueDoesNotMatchFailsHandshake(t *testing.T) {
+ initPrologue, respPrologue := []byte("initPrologue"), []byte("respPrologue")
+ initTransport := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := newConnPair(t)
+
+ done := make(chan struct{})
+
+ go func() {
+ defer close(done)
+ tpt, err := initTransport.
+ WithSessionOptions(Prologue(initPrologue))
+ require.NoError(t, err)
+ _, err = tpt.SecureOutbound(context.Background(), initConn, respTransport.localID)
+ require.Error(t, err)
+ }()
+
+ tpt, err := respTransport.WithSessionOptions(Prologue(respPrologue))
+ require.NoError(t, err)
+
+ _, err = tpt.SecureInbound(context.Background(), respConn, "")
+ require.Error(t, err)
+ <-done
+}
+
+type earlyDataHandler struct {
+ send func(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions
+ received func(context.Context, net.Conn, *pb.NoiseExtensions) error
+}
+
+func (e *earlyDataHandler) Send(ctx context.Context, conn net.Conn, id peer.ID) *pb.NoiseExtensions {
+ if e.send == nil {
+ return nil
+ }
+ return e.send(ctx, conn, id)
+}
+
+func (e *earlyDataHandler) Received(ctx context.Context, conn net.Conn, ext *pb.NoiseExtensions) error {
+ if e.received == nil {
+ return nil
+ }
+ return e.received(ctx, conn, ext)
+}
+
+func TestEarlyDataAccepted(t *testing.T) {
+ handshake := func(t *testing.T, client, server EarlyDataHandler) {
+ t.Helper()
+ initTransport, err := newTestTransport(t, crypto.Ed25519, 2048).WithSessionOptions(EarlyData(client, nil))
+ require.NoError(t, err)
+ tpt := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport, err := tpt.WithSessionOptions(EarlyData(nil, server))
+ require.NoError(t, err)
+
+ initConn, respConn := newConnPair(t)
+
+ errChan := make(chan error)
+ go func() {
+ _, err := respTransport.SecureInbound(context.Background(), initConn, "")
+ errChan <- err
+ }()
+
+ conn, err := initTransport.SecureOutbound(context.Background(), respConn, tpt.localID)
+ require.NoError(t, err)
+ select {
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout")
+ case err := <-errChan:
+ require.NoError(t, err)
+ }
+ defer conn.Close()
+ }
+
+ var receivedExtensions *pb.NoiseExtensions
+ receivingEDH := &earlyDataHandler{
+ received: func(_ context.Context, _ net.Conn, ext *pb.NoiseExtensions) error {
+ receivedExtensions = ext
+ return nil
+ },
+ }
+ sendingEDH := &earlyDataHandler{
+ send: func(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions {
+ return &pb.NoiseExtensions{WebtransportCerthashes: [][]byte{[]byte("foobar")}}
+ },
+ }
+
+ t.Run("client sending", func(t *testing.T) {
+ handshake(t, sendingEDH, receivingEDH)
+ require.Equal(t, [][]byte{[]byte("foobar")}, receivedExtensions.WebtransportCerthashes)
+ receivedExtensions = nil
+ })
+
+ t.Run("server sending", func(t *testing.T) {
+ handshake(t, receivingEDH, sendingEDH)
+ require.Equal(t, [][]byte{[]byte("foobar")}, receivedExtensions.WebtransportCerthashes)
+ receivedExtensions = nil
+ })
+}
+
+func TestEarlyDataRejected(t *testing.T) {
+ handshake := func(t *testing.T, client, server EarlyDataHandler) (clientErr, serverErr error) {
+ initTransport, err := newTestTransport(t, crypto.Ed25519, 2048).WithSessionOptions(EarlyData(client, nil))
+ require.NoError(t, err)
+ tpt := newTestTransport(t, crypto.Ed25519, 2048)
+ respTransport, err := tpt.WithSessionOptions(EarlyData(nil, server))
+ require.NoError(t, err)
+
+ initConn, respConn := newConnPair(t)
+
+ errChan := make(chan error)
+ go func() {
+ _, err := respTransport.SecureInbound(context.Background(), initConn, "")
+ errChan <- err
+ }()
+
+ // As early data is sent with the last handshake message, the handshake will appear
+ // to succeed for the client.
+ var conn sec.SecureConn
+ conn, clientErr = initTransport.SecureOutbound(context.Background(), respConn, tpt.localID)
+ if clientErr == nil {
+ _, clientErr = conn.Read([]byte{0})
+ }
+
+ select {
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout")
+ case err := <-errChan:
+ serverErr = err
+ }
+ return
+ }
+
+ receivingEDH := &earlyDataHandler{
+ received: func(context.Context, net.Conn, *pb.NoiseExtensions) error { return errors.New("nope") },
+ }
+ sendingEDH := &earlyDataHandler{
+ send: func(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions {
+ return &pb.NoiseExtensions{WebtransportCerthashes: [][]byte{[]byte("foobar")}}
+ },
+ }
+
+ t.Run("client sending", func(t *testing.T) {
+ clientErr, serverErr := handshake(t, sendingEDH, receivingEDH)
+ require.Error(t, clientErr)
+ require.EqualError(t, serverErr, "nope")
+ })
+
+ t.Run("server sending", func(t *testing.T) {
+ clientErr, serverErr := handshake(t, receivingEDH, sendingEDH)
+ require.Error(t, serverErr)
+ require.EqualError(t, clientErr, "nope")
+ })
+}
+
+func TestEarlyfffDataAcceptedWithNoHandler(t *testing.T) {
+ clientEDH := &earlyDataHandler{
+ send: func(_ context.Context, _ net.Conn, _ peer.ID) *pb.NoiseExtensions {
+ return &pb.NoiseExtensions{WebtransportCerthashes: [][]byte{[]byte("foobar")}}
+ },
+ }
+ initTransport, err := newTestTransport(t, crypto.Ed25519, 2048).WithSessionOptions(EarlyData(clientEDH, nil))
+ require.NoError(t, err)
+ respTransport := newTestTransport(t, crypto.Ed25519, 2048)
+
+ initConn, respConn := newConnPair(t)
+
+ errChan := make(chan error)
+ go func() {
+ _, err := respTransport.SecureInbound(context.Background(), initConn, "")
+ errChan <- err
+ }()
+
+ conn, err := initTransport.SecureOutbound(context.Background(), respConn, respTransport.localID)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ select {
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout")
+ case err := <-errChan:
+ require.NoError(t, err)
+ }
+}
+
+type noiseEarlyDataTestCase struct {
+ clientProtos []protocol.ID
+ serverProtos []protocol.ID
+ expectedResult protocol.ID
+}
+
+func TestHandshakeWithTransportEarlyData(t *testing.T) {
+ tests := []noiseEarlyDataTestCase{
+ {
+ clientProtos: nil,
+ serverProtos: nil,
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1"},
+ serverProtos: []protocol.ID{"muxer1"},
+ expectedResult: "muxer1",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1"},
+ serverProtos: []protocol.ID{},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{},
+ serverProtos: []protocol.ID{"muxer2"},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer2"},
+ serverProtos: []protocol.ID{"muxer1"},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1", "muxer2"},
+ serverProtos: []protocol.ID{"muxer2", "muxer1"},
+ expectedResult: "muxer1",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer3", "muxer2", "muxer1"},
+ serverProtos: []protocol.ID{"muxer2", "muxer1"},
+ expectedResult: "muxer2",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1", "muxer2"},
+ serverProtos: []protocol.ID{"muxer3"},
+ expectedResult: "",
+ },
+ }
+
+ noiseHandshake := func(t *testing.T, initProtos, respProtos []protocol.ID, expectedProto protocol.ID) {
+ initTransport := newTestTransportWithMuxers(t, crypto.Ed25519, 2048, initProtos)
+ respTransport := newTestTransportWithMuxers(t, crypto.Ed25519, 2048, respProtos)
+
+ initConn, respConn := connect(t, initTransport, respTransport)
+ defer initConn.Close()
+ defer respConn.Close()
+
+ require.Equal(t, expectedProto, initConn.connectionState.StreamMultiplexer)
+ require.Equal(t, expectedProto != "", initConn.connectionState.UsedEarlyMuxerNegotiation)
+ require.Equal(t, expectedProto, respConn.connectionState.StreamMultiplexer)
+ require.Equal(t, expectedProto != "", respConn.connectionState.UsedEarlyMuxerNegotiation)
+
+ initData := []byte("Test data for noise transport")
+ _, err := initConn.Write(initData)
+ require.NoError(t, err)
+
+ respData := make([]byte, len(initData))
+ _, err = respConn.Read(respData)
+ require.NoError(t, err)
+ require.Equal(t, initData, respData)
+ }
+
+ for _, test := range tests {
+ t.Run("Transport EarlyData Test", func(t *testing.T) {
+ noiseHandshake(t, test.clientProtos, test.serverProtos, test.expectedResult)
+ })
+ }
+}
diff --git a/p2p/security/tls/cmd/README.md b/p2p/security/tls/cmd/README.md
new file mode 100644
index 0000000000..d0efa12ce6
--- /dev/null
+++ b/p2p/security/tls/cmd/README.md
@@ -0,0 +1,6 @@
+# TLS handshake example
+
+Run
+```bash
+go run cmd/tlsdiag.go server
+```
diff --git a/p2p/security/tls/cmd/tlsdiag.go b/p2p/security/tls/cmd/tlsdiag.go
new file mode 100644
index 0000000000..d6f7bac674
--- /dev/null
+++ b/p2p/security/tls/cmd/tlsdiag.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/libp2p/go-libp2p/p2p/security/tls/cmd/tlsdiag"
+)
+
+func main() {
+ if len(os.Args) <= 1 {
+ fmt.Println("missing argument: client / server")
+ return
+ }
+
+ role := os.Args[1]
+ // remove the role argument from os.Args
+ os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
+
+ var err error
+ switch role {
+ case "client":
+ err = tlsdiag.StartClient()
+ case "server":
+ err = tlsdiag.StartServer()
+ default:
+ fmt.Println("invalid argument. Expected client / server")
+ return
+ }
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/p2p/security/tls/cmd/tlsdiag/client.go b/p2p/security/tls/cmd/tlsdiag/client.go
new file mode 100644
index 0000000000..a29189a375
--- /dev/null
+++ b/p2p/security/tls/cmd/tlsdiag/client.go
@@ -0,0 +1,63 @@
+package tlsdiag
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "net"
+ "time"
+
+ libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+func StartClient() error {
+ port := flag.Int("p", 5533, "port")
+ peerIDString := flag.String("id", "", "peer ID")
+ keyType := flag.String("key", "ecdsa", "rsa, ecdsa, ed25519 or secp256k1")
+ flag.Parse()
+
+ priv, err := generateKey(*keyType)
+ if err != nil {
+ return err
+ }
+
+ peerID, err := peer.Decode(*peerIDString)
+ if err != nil {
+ return err
+ }
+
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ return err
+ }
+ fmt.Printf(" Peer ID: %s\n", id)
+ tp, err := libp2ptls.New(libp2ptls.ID, priv, nil)
+ if err != nil {
+ return err
+ }
+
+ remoteAddr := fmt.Sprintf("localhost:%d", *port)
+ fmt.Printf("Dialing %s\n", remoteAddr)
+ conn, err := net.Dial("tcp", remoteAddr)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Dialed raw connection to %s\n", conn.RemoteAddr())
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ sconn, err := tp.SecureOutbound(ctx, conn, peerID)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Authenticated server: %s\n", sconn.RemotePeer())
+ data, err := io.ReadAll(sconn)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Received message from server: %s\n", string(data))
+ return nil
+}
diff --git a/p2p/security/tls/cmd/tlsdiag/key.go b/p2p/security/tls/cmd/tlsdiag/key.go
new file mode 100644
index 0000000000..c192d221e6
--- /dev/null
+++ b/p2p/security/tls/cmd/tlsdiag/key.go
@@ -0,0 +1,28 @@
+package tlsdiag
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+)
+
+func generateKey(keyType string) (priv ic.PrivKey, err error) {
+ switch keyType {
+ case "rsa":
+ fmt.Printf("Generated new peer with an RSA key.")
+ priv, _, err = ic.GenerateRSAKeyPair(2048, rand.Reader)
+ case "ecdsa":
+ fmt.Printf("Generated new peer with an ECDSA key.")
+ priv, _, err = ic.GenerateECDSAKeyPair(rand.Reader)
+ case "ed25519":
+ fmt.Printf("Generated new peer with an Ed25519 key.")
+ priv, _, err = ic.GenerateEd25519Key(rand.Reader)
+ case "secp256k1":
+ fmt.Printf("Generated new peer with an Secp256k1 key.")
+ priv, _, err = ic.GenerateSecp256k1Key(rand.Reader)
+ default:
+ return nil, fmt.Errorf("unknown key type: %s", keyType)
+ }
+ return
+}
diff --git a/p2p/security/tls/cmd/tlsdiag/server.go b/p2p/security/tls/cmd/tlsdiag/server.go
new file mode 100644
index 0000000000..cd702a7334
--- /dev/null
+++ b/p2p/security/tls/cmd/tlsdiag/server.go
@@ -0,0 +1,68 @@
+package tlsdiag
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "net"
+ "time"
+
+ libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+func StartServer() error {
+ port := flag.Int("p", 5533, "port")
+ keyType := flag.String("key", "ecdsa", "rsa, ecdsa, ed25519 or secp256k1")
+ flag.Parse()
+
+ priv, err := generateKey(*keyType)
+ if err != nil {
+ return err
+ }
+
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ return err
+ }
+ fmt.Printf(" Peer ID: %s\n", id)
+ tp, err := libp2ptls.New(libp2ptls.ID, priv, nil)
+ if err != nil {
+ return err
+ }
+
+ ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Listening for new connections on %s\n", ln.Addr())
+ fmt.Printf("Now run the following command in a separate terminal:\n")
+ fmt.Printf("\tgo run cmd/tlsdiag.go client -p %d -id %s\n", *port, id)
+
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Accepted raw connection from %s\n", conn.RemoteAddr())
+ go func() {
+ if err := handleConn(tp, conn); err != nil {
+ fmt.Printf("Error handling connection from %s: %s\n", conn.RemoteAddr(), err)
+ }
+ }()
+ }
+}
+
+func handleConn(tp *libp2ptls.Transport, conn net.Conn) error {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ sconn, err := tp.SecureInbound(ctx, conn, "")
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Authenticated client: %s\n", sconn.RemotePeer())
+ fmt.Fprintf(sconn, "Hello client!")
+ fmt.Printf("Closing connection to %s\n", conn.RemoteAddr())
+ return sconn.Close()
+}
diff --git a/p2p/security/tls/conn.go b/p2p/security/tls/conn.go
new file mode 100644
index 0000000000..143da3921c
--- /dev/null
+++ b/p2p/security/tls/conn.go
@@ -0,0 +1,37 @@
+package libp2ptls
+
+import (
+ "crypto/tls"
+
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+)
+
+type conn struct {
+ *tls.Conn
+
+ localPeer peer.ID
+ remotePeer peer.ID
+ remotePubKey ci.PubKey
+ connectionState network.ConnectionState
+}
+
+var _ sec.SecureConn = &conn{}
+
+func (c *conn) LocalPeer() peer.ID {
+ return c.localPeer
+}
+
+func (c *conn) RemotePeer() peer.ID {
+ return c.remotePeer
+}
+
+func (c *conn) RemotePublicKey() ci.PubKey {
+ return c.remotePubKey
+}
+
+func (c *conn) ConnState() network.ConnectionState {
+ return c.connectionState
+}
diff --git a/p2p/security/tls/crypto.go b/p2p/security/tls/crypto.go
new file mode 100644
index 0000000000..70a594d060
--- /dev/null
+++ b/p2p/security/tls/crypto.go
@@ -0,0 +1,286 @@
+package libp2ptls
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "os"
+ "runtime/debug"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+)
+
+const certValidityPeriod = 100 * 365 * 24 * time.Hour // ~100 years
+const certificatePrefix = "libp2p-tls-handshake:"
+const alpn string = "libp2p"
+
+var extensionID = getPrefixedExtensionID([]int{1, 1})
+var extensionCritical bool // so we can mark the extension critical in tests
+
+type signedKey struct {
+ PubKey []byte
+ Signature []byte
+}
+
+// Identity is used to secure connections
+type Identity struct {
+ config tls.Config
+}
+
+// IdentityConfig is used to configure an Identity
+type IdentityConfig struct {
+ CertTemplate *x509.Certificate
+ KeyLogWriter io.Writer
+}
+
+// IdentityOption transforms an IdentityConfig to apply optional settings.
+type IdentityOption func(r *IdentityConfig)
+
+// WithCertTemplate specifies the template to use when generating a new certificate.
+func WithCertTemplate(template *x509.Certificate) IdentityOption {
+ return func(c *IdentityConfig) {
+ c.CertTemplate = template
+ }
+}
+
+// WithKeyLogWriter optionally specifies a destination for TLS master secrets
+// in NSS key log format that can be used to allow external programs
+// such as Wireshark to decrypt TLS connections.
+// See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format.
+// Use of KeyLogWriter compromises security and should only be
+// used for debugging.
+func WithKeyLogWriter(w io.Writer) IdentityOption {
+ return func(c *IdentityConfig) {
+ c.KeyLogWriter = w
+ }
+}
+
+// NewIdentity creates a new identity
+func NewIdentity(privKey ic.PrivKey, opts ...IdentityOption) (*Identity, error) {
+ config := IdentityConfig{}
+ for _, opt := range opts {
+ opt(&config)
+ }
+
+ var err error
+ if config.CertTemplate == nil {
+ config.CertTemplate, err = certTemplate()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cert, err := keyToCertificate(privKey, config.CertTemplate)
+ if err != nil {
+ return nil, err
+ }
+ return &Identity{
+ config: tls.Config{
+ MinVersion: tls.VersionTLS13,
+ InsecureSkipVerify: true, // This is not insecure here. We will verify the cert chain ourselves.
+ ClientAuth: tls.RequireAnyClientCert,
+ Certificates: []tls.Certificate{*cert},
+ VerifyPeerCertificate: func(_ [][]byte, _ [][]*x509.Certificate) error {
+ panic("tls config not specialized for peer")
+ },
+ NextProtos: []string{alpn},
+ SessionTicketsDisabled: true,
+ KeyLogWriter: config.KeyLogWriter,
+ },
+ }, nil
+}
+
+// ConfigForPeer creates a new single-use tls.Config that verifies the peer's
+// certificate chain and returns the peer's public key via the channel. If the
+// peer ID is empty, the returned config will accept any peer.
+//
+// It should be used to create a new tls.Config before securing either an
+// incoming or outgoing connection.
+func (i *Identity) ConfigForPeer(remote peer.ID) (*tls.Config, <-chan ic.PubKey) {
+ keyCh := make(chan ic.PubKey, 1)
+ // We need to check the peer ID in the VerifyPeerCertificate callback.
+ // The tls.Config it is also used for listening, and we might also have concurrent dials.
+ // Clone it so we can check for the specific peer ID we're dialing here.
+ conf := i.config.Clone()
+ // We're using InsecureSkipVerify, so the verifiedChains parameter will always be empty.
+ // We need to parse the certificates ourselves from the raw certs.
+ conf.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) (err error) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "panic when processing peer certificate in TLS handshake: %s\n%s\n", rerr, debug.Stack())
+ err = fmt.Errorf("panic when processing peer certificate in TLS handshake: %s", rerr)
+
+ }
+ }()
+
+ defer close(keyCh)
+
+ chain := make([]*x509.Certificate, len(rawCerts))
+ for i := 0; i < len(rawCerts); i++ {
+ cert, err := x509.ParseCertificate(rawCerts[i])
+ if err != nil {
+ return err
+ }
+ chain[i] = cert
+ }
+
+ pubKey, err := PubKeyFromCertChain(chain)
+ if err != nil {
+ return err
+ }
+ if remote != "" && !remote.MatchesPublicKey(pubKey) {
+ peerID, err := peer.IDFromPublicKey(pubKey)
+ if err != nil {
+ peerID = peer.ID(fmt.Sprintf("(not determined: %s)", err.Error()))
+ }
+ return sec.ErrPeerIDMismatch{Expected: remote, Actual: peerID}
+ }
+ keyCh <- pubKey
+ return nil
+ }
+ return conf, keyCh
+}
+
+// PubKeyFromCertChain verifies the certificate chain and extract the remote's public key.
+func PubKeyFromCertChain(chain []*x509.Certificate) (ic.PubKey, error) {
+ if len(chain) != 1 {
+ return nil, errors.New("expected one certificates in the chain")
+ }
+ cert := chain[0]
+ pool := x509.NewCertPool()
+ pool.AddCert(cert)
+ var found bool
+ var keyExt pkix.Extension
+ // find the libp2p key extension, skipping all unknown extensions
+ for _, ext := range cert.Extensions {
+ if extensionIDEqual(ext.Id, extensionID) {
+ keyExt = ext
+ found = true
+ for i, oident := range cert.UnhandledCriticalExtensions {
+ if oident.Equal(ext.Id) {
+ // delete the extension from UnhandledCriticalExtensions
+ cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[:i], cert.UnhandledCriticalExtensions[i+1:]...)
+ break
+ }
+ }
+ break
+ }
+ }
+ if !found {
+ return nil, errors.New("expected certificate to contain the key extension")
+ }
+ if _, err := cert.Verify(x509.VerifyOptions{Roots: pool}); err != nil {
+ // If we return an x509 error here, it will be sent on the wire.
+ // Wrap the error to avoid that.
+ return nil, fmt.Errorf("certificate verification failed: %s", err)
+ }
+
+ var sk signedKey
+ if _, err := asn1.Unmarshal(keyExt.Value, &sk); err != nil {
+ return nil, fmt.Errorf("unmarshalling signed certificate failed: %s", err)
+ }
+ pubKey, err := ic.UnmarshalPublicKey(sk.PubKey)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalling public key failed: %s", err)
+ }
+ certKeyPub, err := x509.MarshalPKIXPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ valid, err := pubKey.Verify(append([]byte(certificatePrefix), certKeyPub...), sk.Signature)
+ if err != nil {
+ return nil, fmt.Errorf("signature verification failed: %s", err)
+ }
+ if !valid {
+ return nil, errors.New("signature invalid")
+ }
+ return pubKey, nil
+}
+
+// GenerateSignedExtension uses the provided private key to sign the public key, and returns the
+// signature within a pkix.Extension.
+// This extension is included in a certificate to cryptographically tie it to the libp2p private key.
+func GenerateSignedExtension(sk ic.PrivKey, pubKey crypto.PublicKey) (pkix.Extension, error) {
+ keyBytes, err := ic.MarshalPublicKey(sk.GetPublic())
+ if err != nil {
+ return pkix.Extension{}, err
+ }
+ certKeyPub, err := x509.MarshalPKIXPublicKey(pubKey)
+ if err != nil {
+ return pkix.Extension{}, err
+ }
+ signature, err := sk.Sign(append([]byte(certificatePrefix), certKeyPub...))
+ if err != nil {
+ return pkix.Extension{}, err
+ }
+ value, err := asn1.Marshal(signedKey{
+ PubKey: keyBytes,
+ Signature: signature,
+ })
+ if err != nil {
+ return pkix.Extension{}, err
+ }
+
+ return pkix.Extension{Id: extensionID, Critical: extensionCritical, Value: value}, nil
+}
+
+// keyToCertificate generates a new ECDSA private key and corresponding x509 certificate.
+// The certificate includes an extension that cryptographically ties it to the provided libp2p
+// private key to authenticate TLS connections.
+func keyToCertificate(sk ic.PrivKey, certTmpl *x509.Certificate) (*tls.Certificate, error) {
+ certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ // after calling CreateCertificate, these will end up in Certificate.Extensions
+ extension, err := GenerateSignedExtension(sk, certKey.Public())
+ if err != nil {
+ return nil, err
+ }
+ certTmpl.ExtraExtensions = append(certTmpl.ExtraExtensions, extension)
+
+ certDER, err := x509.CreateCertificate(rand.Reader, certTmpl, certTmpl, certKey.Public(), certKey)
+ if err != nil {
+ return nil, err
+ }
+ return &tls.Certificate{
+ Certificate: [][]byte{certDER},
+ PrivateKey: certKey,
+ }, nil
+}
+
+// certTemplate returns the template for generating an Identity's TLS certificates.
+func certTemplate() (*x509.Certificate, error) {
+ bigNum := big.NewInt(1 << 62)
+ sn, err := rand.Int(rand.Reader, bigNum)
+ if err != nil {
+ return nil, err
+ }
+
+ subjectSN, err := rand.Int(rand.Reader, bigNum)
+ if err != nil {
+ return nil, err
+ }
+
+ return &x509.Certificate{
+ SerialNumber: sn,
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(certValidityPeriod),
+ // According to RFC 3280, the issuer field must be set,
+ // see https://datatracker.ietf.org/doc/html/rfc3280#section-4.1.2.4.
+ Subject: pkix.Name{SerialNumber: subjectSN.String()},
+ }, nil
+}
diff --git a/p2p/security/tls/crypto_test.go b/p2p/security/tls/crypto_test.go
new file mode 100644
index 0000000000..6171d178cd
--- /dev/null
+++ b/p2p/security/tls/crypto_test.go
@@ -0,0 +1,112 @@
+package libp2ptls
+
+import (
+ "crypto/x509"
+ "encoding/hex"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto/pb"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewIdentityCertificates(t *testing.T) {
+ _, key := createPeer(t)
+ cn := "a.test.name"
+ email := "unittest@example.com"
+
+ t.Run("NewIdentity with default template", func(t *testing.T) {
+ // Generate an identity using the default template
+ id, err := NewIdentity(key)
+ require.NoError(t, err)
+
+ // Extract the x509 certificate
+ x509Cert, err := x509.ParseCertificate(id.config.Certificates[0].Certificate[0])
+ require.NoError(t, err)
+
+ // verify the common name and email are not set
+ require.Empty(t, x509Cert.Subject.CommonName)
+ require.Empty(t, x509Cert.EmailAddresses)
+ })
+
+ t.Run("NewIdentity with custom template", func(t *testing.T) {
+ tmpl, err := certTemplate()
+ require.NoError(t, err)
+
+ tmpl.Subject.CommonName = cn
+ tmpl.EmailAddresses = []string{email}
+
+ // Generate an identity using the custom template
+ id, err := NewIdentity(key, WithCertTemplate(tmpl))
+ require.NoError(t, err)
+
+ // Extract the x509 certificate
+ x509Cert, err := x509.ParseCertificate(id.config.Certificates[0].Certificate[0])
+ require.NoError(t, err)
+
+ // verify the common name and email are set
+ assert.Equal(t, cn, x509Cert.Subject.CommonName)
+ assert.Equal(t, email, x509Cert.EmailAddresses[0])
+ })
+}
+
+func TestVectors(t *testing.T) {
+ type testcase struct {
+ name string
+ data string
+ peerID string
+ keyType pb.KeyType
+ error string
+ }
+
+ testcases := []testcase{
+ {
+ name: "ED25519 Peer ID",
+ data: "308201ae30820156a0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea37c307a3078060a2b0601040183a25a0101046a3068042408011220a77f1d92fedb59dddaea5a1c4abd1ac2fbde7d7b879ed364501809923d7c11b90440d90d2769db992d5e6195dbb08e706b6651e024fda6cfb8846694a435519941cac215a8207792e42849cccc6cd8136c6e4bde92a58c5e08cfd4206eb5fe0bf909300a06082a8648ce3d0403020346003043021f50f6b6c52711a881778718238f650c9fb48943ae6ee6d28427dc6071ae55e702203625f116a7a454db9c56986c82a25682f7248ea1cb764d322ea983ed36a31b77",
+ peerID: "12D3KooWM6CgA9iBFZmcYAHA6A2qvbAxqfkmrYiRQuz3XEsk4Ksv",
+ keyType: pb.KeyType_Ed25519,
+ },
+ {
+ name: "ECDSA Peer ID",
+ data: "308201f63082019da0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea381c23081bf3081bc060a2b0601040183a25a01010481ad3081aa045f0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004bf30511f909414ebdd3242178fd290f093a551cf75c973155de0bb5a96fedf6cb5d52da7563e794b512f66e60c7f55ba8a3acf3dd72a801980d205e8a1ad29f2044730450220064ea8124774caf8f50e57f436aa62350ce652418c019df5d98a3ac666c9386a022100aa59d704a931b5f72fb9222cb6cc51f954d04a4e2e5450f8805fe8918f71eaae300a06082a8648ce3d04030203470030440220799395b0b6c1e940a7e4484705f610ab51ed376f19ff9d7c16757cfbf61b8d4302206205c03fbb0f95205c779be86581d3e31c01871ad5d1f3435bcf375cb0e5088a",
+ peerID: "QmfXbAwNjJLXfesgztEHe8HwgVDCMMpZ9Eax1HYq6hn9uE",
+ keyType: pb.KeyType_ECDSA,
+ },
+ {
+ name: "secp256k1 Peer ID",
+ data: "308201ba3082015fa0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea38184308181307f060a2b0601040183a25a01010471306f0425080212210206dc6968726765b820f050263ececf7f71e4955892776c0970542efd689d2382044630440220145e15a991961f0d08cd15425bb95ec93f6ffa03c5a385eedc34ecf464c7a8ab022026b3109b8a3f40ef833169777eb2aa337cfb6282f188de0666d1bcec2a4690dd300a06082a8648ce3d0403020349003046022100e1a217eeef9ec9204b3f774a08b70849646b6a1e6b8b27f93dc00ed58545d9fe022100b00dafa549d0f03547878338c7b15e7502888f6d45db387e5ae6b5d46899cef0",
+ peerID: "16Uiu2HAkutTMoTzDw1tCvSRtu6YoixJwS46S1ZFxW8hSx9fWHiPs",
+ keyType: pb.KeyType_Secp256k1,
+ },
+ {
+ name: "Invalid certificate",
+ data: "308201f73082019da0030201020204499602d2300a06082a8648ce3d040302302031123010060355040a13096c69627032702e696f310a300806035504051301313020170d3735303130313133303030305a180f34303936303130313133303030305a302031123010060355040a13096c69627032702e696f310a300806035504051301313059301306072a8648ce3d020106082a8648ce3d030107034200040c901d423c831ca85e27c73c263ba132721bb9d7a84c4f0380b2a6756fd601331c8870234dec878504c174144fa4b14b66a651691606d8173e55bd37e381569ea381c23081bf3081bc060a2b0601040183a25a01010481ad3081aa045f0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004bf30511f909414ebdd3242178fd290f093a551cf75c973155de0bb5a96fedf6cb5d52da7563e794b512f66e60c7f55ba8a3acf3dd72a801980d205e8a1ad29f204473045022100bb6e03577b7cc7a3cd1558df0da2b117dfdcc0399bc2504ebe7de6f65cade72802206de96e2a5be9b6202adba24ee0362e490641ac45c240db71fe955f2c5cf8df6e300a06082a8648ce3d0403020348003045022100e847f267f43717358f850355bdcabbefb2cfbf8a3c043b203a14788a092fe8db022027c1d04a2d41fd6b57a7e8b3989e470325de4406e52e084e34a3fd56eef0d0df",
+ error: "signature invalid",
+ },
+ }
+
+ for _, tc := range testcases {
+ t.Run(tc.name, func(t *testing.T) {
+ data, err := hex.DecodeString(tc.data)
+ require.NoError(t, err)
+
+ cert, err := x509.ParseCertificate(data)
+ require.NoError(t, err)
+ key, err := PubKeyFromCertChain([]*x509.Certificate{cert})
+ if tc.error != "" {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.error)
+ return
+ }
+ require.NoError(t, err)
+ require.Equal(t, tc.keyType, key.Type())
+ id, err := peer.IDFromPublicKey(key)
+ require.NoError(t, err)
+ expectedID, err := peer.Decode(tc.peerID)
+ require.NoError(t, err)
+ require.Equal(t, expectedID, id)
+ })
+ }
+}
diff --git a/p2p/security/tls/extension.go b/p2p/security/tls/extension.go
new file mode 100644
index 0000000000..9472c77e83
--- /dev/null
+++ b/p2p/security/tls/extension.go
@@ -0,0 +1,22 @@
+package libp2ptls
+
+var extensionPrefix = []int{1, 3, 6, 1, 4, 1, 53594}
+
+// getPrefixedExtensionID returns an Object Identifier
+// that can be used in x509 Certificates.
+func getPrefixedExtensionID(suffix []int) []int {
+ return append(extensionPrefix, suffix...)
+}
+
+// extensionIDEqual compares two extension IDs.
+func extensionIDEqual(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/p2p/security/tls/extension_test.go b/p2p/security/tls/extension_test.go
new file mode 100644
index 0000000000..6bed72590d
--- /dev/null
+++ b/p2p/security/tls/extension_test.go
@@ -0,0 +1,18 @@
+package libp2ptls
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestExtensionGenerating(t *testing.T) {
+ require.Equal(t, []int{1, 3, 6, 1, 4, 1, 53594, 13, 37}, getPrefixedExtensionID([]int{13, 37}))
+}
+
+func TestExtensionComparison(t *testing.T) {
+ require.True(t, extensionIDEqual([]int{1, 2, 3, 4}, []int{1, 2, 3, 4}))
+ require.False(t, extensionIDEqual([]int{1, 2, 3, 4}, []int{1, 2, 3}))
+ require.False(t, extensionIDEqual([]int{1, 2, 3}, []int{1, 2, 3, 4}))
+ require.False(t, extensionIDEqual([]int{1, 2, 3, 4}, []int{4, 3, 2, 1}))
+}
diff --git a/p2p/security/tls/transport.go b/p2p/security/tls/transport.go
new file mode 100644
index 0000000000..0c494a7fdc
--- /dev/null
+++ b/p2p/security/tls/transport.go
@@ -0,0 +1,182 @@
+package libp2ptls
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "runtime/debug"
+
+ "github.com/libp2p/go-libp2p/core/canonicallog"
+ ci "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// ID is the protocol ID (used when negotiating with multistream)
+const ID = "/tls/1.0.0"
+
+// Transport constructs secure communication sessions for a peer.
+type Transport struct {
+ identity *Identity
+
+ localPeer peer.ID
+ privKey ci.PrivKey
+ muxers []protocol.ID
+ protocolID protocol.ID
+}
+
+var _ sec.SecureTransport = &Transport{}
+
+// New creates a TLS encrypted transport
+func New(id protocol.ID, key ci.PrivKey, muxers []tptu.StreamMuxer) (*Transport, error) {
+ localPeer, err := peer.IDFromPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ muxerIDs := make([]protocol.ID, 0, len(muxers))
+ for _, m := range muxers {
+ muxerIDs = append(muxerIDs, m.ID)
+ }
+ t := &Transport{
+ protocolID: id,
+ localPeer: localPeer,
+ privKey: key,
+ muxers: muxerIDs,
+ }
+
+ identity, err := NewIdentity(key)
+ if err != nil {
+ return nil, err
+ }
+ t.identity = identity
+ return t, nil
+}
+
+// SecureInbound runs the TLS handshake as a server.
+// If p is empty, connections from any peer are accepted.
+func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ config, keyCh := t.identity.ConfigForPeer(p)
+ muxers := make([]string, 0, len(t.muxers))
+ for _, muxer := range t.muxers {
+ muxers = append(muxers, string(muxer))
+ }
+ // TLS' ALPN selection lets the server select the protocol, preferring the server's preferences.
+ // We want to prefer the client's preference though.
+ getConfigForClient := config.GetConfigForClient
+ config.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
+ alpnLoop:
+ for _, proto := range info.SupportedProtos {
+ for _, m := range muxers {
+ if m == proto {
+ // Match found. Select this muxer, as it's the client's preference.
+ // There's no need to add the "libp2p" entry here.
+ config.NextProtos = []string{proto}
+ break alpnLoop
+ }
+ }
+ }
+ if getConfigForClient != nil {
+ return getConfigForClient(info)
+ }
+ return config, nil
+ }
+ config.NextProtos = append(muxers, config.NextProtos...)
+ cs, err := t.handshake(ctx, tls.Server(insecure, config), keyCh)
+ if err != nil {
+ addr, maErr := manet.FromNetAddr(insecure.RemoteAddr())
+ if maErr == nil {
+ canonicallog.LogPeerStatus(100, p, addr, "handshake_failure", "tls", "err", err.Error())
+ }
+ insecure.Close()
+ }
+ return cs, err
+}
+
+// SecureOutbound runs the TLS handshake as a client.
+// Note that SecureOutbound will not return an error if the server doesn't
+// accept the certificate. This is due to the fact that in TLS 1.3, the client
+// sends its certificate and the ClientFinished in the same flight, and can send
+// application data immediately afterwards.
+// If the handshake fails, the server will close the connection. The client will
+// notice this after 1 RTT when calling Read.
+func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) {
+ config, keyCh := t.identity.ConfigForPeer(p)
+ muxers := make([]string, 0, len(t.muxers))
+ for _, muxer := range t.muxers {
+ muxers = append(muxers, (string)(muxer))
+ }
+ // Prepend the preferred muxers list to TLS config.
+ config.NextProtos = append(muxers, config.NextProtos...)
+ cs, err := t.handshake(ctx, tls.Client(insecure, config), keyCh)
+ if err != nil {
+ insecure.Close()
+ }
+ return cs, err
+}
+
+func (t *Transport) handshake(ctx context.Context, tlsConn *tls.Conn, keyCh <-chan ci.PubKey) (_sconn sec.SecureConn, err error) {
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ fmt.Fprintf(os.Stderr, "panic in TLS handshake: %s\n%s\n", rerr, debug.Stack())
+ err = fmt.Errorf("panic in TLS handshake: %s", rerr)
+
+ }
+ }()
+
+ // handshaking...
+ if err := tlsConn.HandshakeContext(ctx); err != nil {
+ return nil, err
+ }
+
+ // Should be ready by this point, don't block.
+ var remotePubKey ci.PubKey
+ select {
+ case remotePubKey = <-keyCh:
+ default:
+ }
+ if remotePubKey == nil {
+ return nil, errors.New("go-libp2p tls BUG: expected remote pub key to be set")
+ }
+
+ return t.setupConn(tlsConn, remotePubKey)
+}
+
+func (t *Transport) setupConn(tlsConn *tls.Conn, remotePubKey ci.PubKey) (sec.SecureConn, error) {
+ remotePeerID, err := peer.IDFromPublicKey(remotePubKey)
+ if err != nil {
+ return nil, err
+ }
+
+ nextProto := tlsConn.ConnectionState().NegotiatedProtocol
+ // The special ALPN extension value "libp2p" is used by libp2p versions
+ // that don't support early muxer negotiation. If we see this sepcial
+ // value selected, that means we are handshaking with a version that does
+ // not support early muxer negotiation. In this case return empty nextProto
+ // to indicate no muxer is selected.
+ if nextProto == "libp2p" {
+ nextProto = ""
+ }
+
+ return &conn{
+ Conn: tlsConn,
+ localPeer: t.localPeer,
+ remotePeer: remotePeerID,
+ remotePubKey: remotePubKey,
+ connectionState: network.ConnectionState{
+ StreamMultiplexer: protocol.ID(nextProto),
+ UsedEarlyMuxerNegotiation: nextProto != "",
+ },
+ }, nil
+}
+
+func (t *Transport) ID() protocol.ID {
+ return t.protocolID
+}
diff --git a/p2p/security/tls/transport_test.go b/p2p/security/tls/transport_test.go
new file mode 100644
index 0000000000..b53d9bf0f6
--- /dev/null
+++ b/p2p/security/tls/transport_test.go
@@ -0,0 +1,711 @@
+package libp2ptls
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "fmt"
+ "math/big"
+ mrand "math/rand"
+ "net"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func createPeer(t *testing.T) (peer.ID, ic.PrivKey) {
+ var priv ic.PrivKey
+ var err error
+ switch mrand.Int() % 4 {
+ case 0:
+ priv, _, err = ic.GenerateECDSAKeyPair(rand.Reader)
+ case 1:
+ priv, _, err = ic.GenerateRSAKeyPair(2048, rand.Reader)
+ case 2:
+ priv, _, err = ic.GenerateEd25519Key(rand.Reader)
+ case 3:
+ priv, _, err = ic.GenerateSecp256k1Key(rand.Reader)
+ }
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ t.Logf("using a %s key: %s", priv.Type(), id)
+ return id, priv
+}
+
+func connect(t *testing.T) (net.Conn, net.Conn) {
+ ln, err := net.ListenTCP("tcp", nil)
+ require.NoError(t, err)
+ defer ln.Close()
+ serverConnChan := make(chan *net.TCPConn)
+ go func() {
+ conn, err := ln.Accept()
+ assert.NoError(t, err)
+ sconn := conn.(*net.TCPConn)
+ serverConnChan <- sconn
+ }()
+ conn, err := net.DialTCP("tcp", nil, ln.Addr().(*net.TCPAddr))
+ require.NoError(t, err)
+ sconn := <-serverConnChan
+ // On Windows we have to set linger to 0, otherwise we'll occasionally run into errors like the following:
+ // "connectex: Only one usage of each socket address (protocol/network address/port) is normally permitted."
+ // See https://github.com/libp2p/go-libp2p/issues/1529.
+ conn.SetLinger(0)
+ sconn.SetLinger(0)
+ t.Cleanup(func() {
+ conn.Close()
+ sconn.Close()
+ })
+ return conn, sconn
+}
+
+func isWindowsTCPCloseError(err error) bool {
+ if runtime.GOOS != "windows" {
+ return false
+ }
+ return strings.Contains(err.Error(), "wsarecv: An existing connection was forcibly closed by the remote host")
+}
+
+func TestHandshakeSucceeds(t *testing.T) {
+ clientID, clientKey := createPeer(t)
+ serverID, serverKey := createPeer(t)
+
+ handshake := func(t *testing.T, clientTransport *Transport, serverTransport *Transport) {
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ serverConnChan := make(chan sec.SecureConn)
+ go func() {
+ serverConn, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, "")
+ require.NoError(t, err)
+ serverConnChan <- serverConn
+ }()
+
+ clientConn, err := clientTransport.SecureOutbound(context.Background(), clientInsecureConn, serverID)
+ require.NoError(t, err)
+ defer clientConn.Close()
+
+ var serverConn sec.SecureConn
+ select {
+ case serverConn = <-serverConnChan:
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected the server to accept a connection")
+ }
+ defer serverConn.Close()
+
+ require.Equal(t, clientConn.LocalPeer(), clientID)
+ require.Equal(t, serverConn.LocalPeer(), serverID)
+ require.Equal(t, clientConn.RemotePeer(), serverID)
+ require.Equal(t, serverConn.RemotePeer(), clientID)
+ require.True(t, clientConn.RemotePublicKey().Equals(serverKey.GetPublic()), "server public key mismatch")
+ require.True(t, serverConn.RemotePublicKey().Equals(clientKey.GetPublic()), "client public key mismatch")
+ // exchange some data
+ _, err = serverConn.Write([]byte("foobar"))
+ require.NoError(t, err)
+ b := make([]byte, 6)
+ _, err = clientConn.Read(b)
+ require.NoError(t, err)
+ require.Equal(t, "foobar", string(b))
+ }
+
+ // Use standard transports with default TLS configuration
+ clientTransport, err := New(ID, clientKey, nil)
+ require.NoError(t, err)
+ serverTransport, err := New(ID, serverKey, nil)
+ require.NoError(t, err)
+
+ t.Run("standard TLS with extension not critical", func(t *testing.T) {
+ handshake(t, clientTransport, serverTransport)
+ })
+
+ t.Run("standard TLS with extension critical", func(t *testing.T) {
+ extensionCritical = true
+ t.Cleanup(func() { extensionCritical = false })
+
+ handshake(t, clientTransport, serverTransport)
+ })
+
+ // Use transports with custom TLS certificates
+
+ // override client identity to use a custom certificate
+ clientCertTmlp, err := certTemplate()
+ require.NoError(t, err)
+
+ clientCertTmlp.Subject.CommonName = "client.test.name"
+ clientCertTmlp.EmailAddresses = []string{"client-unittest@example.com"}
+
+ clientTransport.identity, err = NewIdentity(clientKey, WithCertTemplate(clientCertTmlp))
+ require.NoError(t, err)
+
+ // override server identity to use a custom certificate
+ serverCertTmpl, err := certTemplate()
+ require.NoError(t, err)
+
+ serverCertTmpl.Subject.CommonName = "server.test.name"
+ serverCertTmpl.EmailAddresses = []string{"server-unittest@example.com"}
+
+ serverTransport.identity, err = NewIdentity(serverKey, WithCertTemplate(serverCertTmpl))
+ require.NoError(t, err)
+
+ t.Run("custom TLS with extension not critical", func(t *testing.T) {
+ handshake(t, clientTransport, serverTransport)
+ })
+
+ t.Run("custom TLS with extension critical", func(t *testing.T) {
+ extensionCritical = true
+ t.Cleanup(func() { extensionCritical = false })
+
+ handshake(t, clientTransport, serverTransport)
+ })
+}
+
+type testcase struct {
+ clientProtos []protocol.ID
+ serverProtos []protocol.ID
+ expectedResult protocol.ID
+}
+
+func TestHandshakeWithNextProtoSucceeds(t *testing.T) {
+ tests := []testcase{
+ {
+ clientProtos: []protocol.ID{"muxer1", "muxer2"},
+ serverProtos: []protocol.ID{"muxer2", "muxer1"},
+ expectedResult: "muxer1",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1", "muxer2", "libp2p"},
+ serverProtos: []protocol.ID{"muxer2", "muxer1", "libp2p"},
+ expectedResult: "muxer1",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1", "libp2p"},
+ serverProtos: []protocol.ID{"libp2p"},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{"libp2p"},
+ serverProtos: []protocol.ID{"libp2p"},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer1"},
+ serverProtos: []protocol.ID{},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{},
+ serverProtos: []protocol.ID{"muxer1"},
+ expectedResult: "",
+ },
+ {
+ clientProtos: []protocol.ID{"muxer2"},
+ serverProtos: []protocol.ID{"muxer1"},
+ expectedResult: "",
+ },
+ }
+
+ clientID, clientKey := createPeer(t)
+ serverID, serverKey := createPeer(t)
+
+ handshake := func(t *testing.T, clientTransport *Transport, serverTransport *Transport, expectedMuxer protocol.ID) {
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ serverConnChan := make(chan sec.SecureConn)
+ go func() {
+ serverConn, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, "")
+ require.NoError(t, err)
+ serverConnChan <- serverConn
+ }()
+
+ clientConn, err := clientTransport.SecureOutbound(context.Background(), clientInsecureConn, serverID)
+ require.NoError(t, err)
+ defer clientConn.Close()
+
+ var serverConn sec.SecureConn
+ select {
+ case serverConn = <-serverConnChan:
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected the server to accept a connection")
+ }
+ defer serverConn.Close()
+
+ require.Equal(t, clientID, clientConn.LocalPeer())
+ require.Equal(t, serverID, serverConn.LocalPeer())
+ require.Equal(t, serverID, clientConn.RemotePeer())
+ require.Equal(t, clientID, serverConn.RemotePeer())
+ require.True(t, clientConn.RemotePublicKey().Equals(serverKey.GetPublic()), "server public key mismatch")
+ require.True(t, serverConn.RemotePublicKey().Equals(clientKey.GetPublic()), "client public key mismatch")
+ require.Equal(t, expectedMuxer, clientConn.ConnState().StreamMultiplexer)
+ require.Equal(t, expectedMuxer != "", clientConn.ConnState().UsedEarlyMuxerNegotiation)
+ // exchange some data
+ _, err = serverConn.Write([]byte("foobar"))
+ require.NoError(t, err)
+ b := make([]byte, 6)
+ _, err = clientConn.Read(b)
+ require.NoError(t, err)
+ require.Equal(t, "foobar", string(b))
+ }
+
+ // Iterate through the StreamMultiplexer combinations.
+ for _, test := range tests {
+ clientMuxers := make([]tptu.StreamMuxer, 0, len(test.clientProtos))
+ for _, id := range test.clientProtos {
+ clientMuxers = append(clientMuxers, tptu.StreamMuxer{ID: id})
+ }
+ clientTransport, err := New(ID, clientKey, clientMuxers)
+ require.NoError(t, err)
+ serverMuxers := make([]tptu.StreamMuxer, 0, len(test.clientProtos))
+ for _, id := range test.serverProtos {
+ serverMuxers = append(serverMuxers, tptu.StreamMuxer{ID: id})
+ }
+ serverTransport, err := New(ID, serverKey, serverMuxers)
+ require.NoError(t, err)
+
+ t.Run("TLS handshake with ALPN extension", func(t *testing.T) {
+ handshake(t, clientTransport, serverTransport, test.expectedResult)
+ })
+ }
+}
+
+// crypto/tls' cancellation logic works by spinning up a separate Go routine that watches the ctx.
+// If the ctx is canceled, it kills the handshake.
+// We need to make sure that the handshake doesn't complete before that Go routine picks up the cancellation.
+type delayedConn struct {
+ net.Conn
+ delay time.Duration
+}
+
+func (c *delayedConn) Read(b []byte) (int, error) {
+ time.Sleep(c.delay)
+ return c.Conn.Read(b)
+}
+
+func TestHandshakeConnectionCancellations(t *testing.T) {
+ _, clientKey := createPeer(t)
+ serverID, serverKey := createPeer(t)
+
+ clientTransport, err := New(ID, clientKey, nil)
+ require.NoError(t, err)
+ serverTransport, err := New(ID, serverKey, nil)
+ require.NoError(t, err)
+
+ t.Run("cancel outgoing connection", func(t *testing.T) {
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ errChan := make(chan error)
+ go func() {
+ conn, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, "")
+ // crypto/tls' context handling works by spinning up a separate Go routine that watches the context,
+ // and closes the underlying connection when that context is canceled.
+ // It is therefore not guaranteed (but very likely) that this happens _during_ the TLS handshake.
+ if err == nil {
+ _, err = conn.Read([]byte{0})
+ }
+ errChan <- err
+ }()
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ _, err = clientTransport.SecureOutbound(ctx, &delayedConn{Conn: clientInsecureConn, delay: 5 * time.Millisecond}, serverID)
+ require.ErrorIs(t, err, context.Canceled)
+ require.Error(t, <-errChan)
+ })
+
+ t.Run("cancel incoming connection", func(t *testing.T) {
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ errChan := make(chan error)
+ go func() {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ conn, err := serverTransport.SecureInbound(ctx, &delayedConn{Conn: serverInsecureConn, delay: 5 * time.Millisecond}, "")
+ // crypto/tls' context handling works by spinning up a separate Go routine that watches the context,
+ // and closes the underlying connection when that context is canceled.
+ // It is therefore not guaranteed (but very likely) that this happens _during_ the TLS handshake.
+ if err == nil {
+ _, err = conn.Read([]byte{0})
+ }
+ errChan <- err
+ }()
+ _, err = clientTransport.SecureOutbound(context.Background(), clientInsecureConn, serverID)
+ require.Error(t, err)
+ require.ErrorIs(t, <-errChan, context.Canceled)
+ })
+}
+
+func TestPeerIDMismatch(t *testing.T) {
+ _, clientKey := createPeer(t)
+ serverID, serverKey := createPeer(t)
+
+ serverTransport, err := New(ID, serverKey, nil)
+ require.NoError(t, err)
+ clientTransport, err := New(ID, clientKey, nil)
+ require.NoError(t, err)
+
+ t.Run("for outgoing connections", func(t *testing.T) {
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ errChan := make(chan error)
+ go func() {
+ conn, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, "")
+ // crypto/tls' context handling works by spinning up a separate Go routine that watches the context,
+ // and closes the underlying connection when that context is canceled.
+ // It is therefore not guaranteed (but very likely) that this happens _during_ the TLS handshake.
+ if err == nil {
+ _, err = conn.Read([]byte{0})
+ }
+ errChan <- err
+ }()
+
+ // dial, but expect the wrong peer ID
+ thirdPartyID, _ := createPeer(t)
+ _, err = clientTransport.SecureOutbound(context.Background(), clientInsecureConn, thirdPartyID)
+ require.Error(t, err)
+ var mismatchErr sec.ErrPeerIDMismatch
+ require.ErrorAs(t, err, &mismatchErr)
+ require.Equal(t, mismatchErr.Expected, thirdPartyID)
+ require.Equal(t, mismatchErr.Actual, serverID)
+
+ var serverErr error
+ select {
+ case serverErr = <-errChan:
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected handshake to return on the server side")
+ }
+ require.Error(t, serverErr)
+ require.Contains(t, serverErr.Error(), "tls: bad certificate")
+ })
+
+ t.Run("for incoming connections", func(t *testing.T) {
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ errChan := make(chan error)
+ thirdPartyID, _ := createPeer(t)
+ go func() {
+ // expect the wrong peer ID
+ _, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, thirdPartyID)
+ errChan <- err
+ }()
+
+ conn, err := clientTransport.SecureOutbound(context.Background(), clientInsecureConn, serverID)
+ require.NoError(t, err)
+ _, err = conn.Read([]byte{0})
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "tls: bad certificate")
+
+ var serverErr error
+ select {
+ case serverErr = <-errChan:
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected handshake to return on the server side")
+ }
+ require.Error(t, serverErr)
+ var mismatchErr sec.ErrPeerIDMismatch
+ require.ErrorAs(t, serverErr, &mismatchErr)
+ require.Equal(t, mismatchErr.Expected, thirdPartyID)
+ require.Equal(t, mismatchErr.Actual, clientTransport.localPeer)
+ })
+}
+
+func TestInvalidCerts(t *testing.T) {
+ _, clientKey := createPeer(t)
+ serverID, serverKey := createPeer(t)
+
+ type transform struct {
+ name string
+ apply func(*Identity)
+ checkErr func(*testing.T, error) // the error that the side validating the chain gets
+ }
+
+ invalidateCertChain := func(identity *Identity) {
+ switch identity.config.Certificates[0].PrivateKey.(type) {
+ case *rsa.PrivateKey:
+ key, err := rsa.GenerateKey(rand.Reader, 2048)
+ require.NoError(t, err)
+ identity.config.Certificates[0].PrivateKey = key
+ case *ecdsa.PrivateKey:
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ identity.config.Certificates[0].PrivateKey = key
+ default:
+ t.Fatal("unexpected private key type")
+ }
+ }
+
+ twoCerts := func(identity *Identity) {
+ tmpl := &x509.Certificate{SerialNumber: big.NewInt(1)}
+ key1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ key2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ cert1DER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key1.Public(), key1)
+ require.NoError(t, err)
+ cert1, err := x509.ParseCertificate(cert1DER)
+ require.NoError(t, err)
+ cert2DER, err := x509.CreateCertificate(rand.Reader, tmpl, cert1, key2.Public(), key1)
+ require.NoError(t, err)
+ identity.config.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{cert2DER, cert1DER},
+ PrivateKey: key2,
+ }}
+ }
+
+ getCertWithKey := func(key crypto.Signer, tmpl *x509.Certificate) tls.Certificate {
+ cert, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
+ require.NoError(t, err)
+ return tls.Certificate{
+ Certificate: [][]byte{cert},
+ PrivateKey: key,
+ }
+ }
+
+ getCert := func(tmpl *x509.Certificate) tls.Certificate {
+ key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ return getCertWithKey(key, tmpl)
+ }
+
+ expiredCert := func(identity *Identity) {
+ cert := getCert(&x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(-time.Minute),
+ ExtraExtensions: []pkix.Extension{
+ {Id: extensionID, Value: []byte("foobar")},
+ },
+ })
+ identity.config.Certificates = []tls.Certificate{cert}
+ }
+
+ noKeyExtension := func(identity *Identity) {
+ cert := getCert(&x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(time.Hour),
+ })
+ identity.config.Certificates = []tls.Certificate{cert}
+ }
+
+ unparseableKeyExtension := func(identity *Identity) {
+ cert := getCert(&x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(time.Hour),
+ ExtraExtensions: []pkix.Extension{
+ {Id: extensionID, Value: []byte("foobar")},
+ },
+ })
+ identity.config.Certificates = []tls.Certificate{cert}
+ }
+
+ unparseableKey := func(identity *Identity) {
+ data, err := asn1.Marshal(signedKey{PubKey: []byte("foobar")})
+ require.NoError(t, err)
+ cert := getCert(&x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(time.Hour),
+ ExtraExtensions: []pkix.Extension{
+ {Id: extensionID, Value: data},
+ },
+ })
+ identity.config.Certificates = []tls.Certificate{cert}
+ }
+
+ tooShortSignature := func(identity *Identity) {
+ key, _, err := ic.GenerateSecp256k1Key(rand.Reader)
+ require.NoError(t, err)
+ keyBytes, err := ic.MarshalPublicKey(key.GetPublic())
+ require.NoError(t, err)
+ data, err := asn1.Marshal(signedKey{
+ PubKey: keyBytes,
+ Signature: []byte("foobar"),
+ })
+ require.NoError(t, err)
+ cert := getCert(&x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(time.Hour),
+ ExtraExtensions: []pkix.Extension{
+ {Id: extensionID, Value: data},
+ },
+ })
+ identity.config.Certificates = []tls.Certificate{cert}
+ }
+
+ invalidSignature := func(identity *Identity) {
+ key, _, err := ic.GenerateSecp256k1Key(rand.Reader)
+ require.NoError(t, err)
+ keyBytes, err := ic.MarshalPublicKey(key.GetPublic())
+ require.NoError(t, err)
+ signature, err := key.Sign([]byte("foobar"))
+ require.NoError(t, err)
+ data, err := asn1.Marshal(signedKey{
+ PubKey: keyBytes,
+ Signature: signature,
+ })
+ require.NoError(t, err)
+ cert := getCert(&x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ NotBefore: time.Now().Add(-time.Hour),
+ NotAfter: time.Now().Add(time.Hour),
+ ExtraExtensions: []pkix.Extension{
+ {Id: extensionID, Value: data},
+ },
+ })
+ identity.config.Certificates = []tls.Certificate{cert}
+ }
+
+ transforms := []transform{
+ {
+ name: "private key used in the TLS handshake doesn't match the public key in the cert",
+ apply: invalidateCertChain,
+ checkErr: func(t *testing.T, err error) {
+ if err.Error() != "tls: invalid signature by the client certificate: ECDSA verification failure" &&
+ err.Error() != "tls: invalid signature by the server certificate: ECDSA verification failure" {
+ t.Fatalf("unexpected error message: %s", err)
+ }
+ },
+ },
+ {
+ name: "certificate chain contains 2 certs",
+ apply: twoCerts,
+ checkErr: func(t *testing.T, err error) {
+ require.EqualError(t, err, "expected one certificates in the chain")
+ },
+ },
+ {
+ name: "cert is expired",
+ apply: expiredCert,
+ checkErr: func(t *testing.T, err error) {
+ require.Contains(t, err.Error(), "certificate has expired or is not yet valid")
+ },
+ },
+ {
+ name: "cert doesn't have the key extension",
+ apply: noKeyExtension,
+ checkErr: func(t *testing.T, err error) {
+ require.EqualError(t, err, "expected certificate to contain the key extension")
+ },
+ },
+ {
+ name: "key extension not parseable",
+ apply: unparseableKeyExtension,
+ checkErr: func(t *testing.T, err error) { require.Contains(t, err.Error(), "asn1") },
+ },
+ {
+ name: "key protobuf not parseable",
+ apply: unparseableKey,
+ checkErr: func(t *testing.T, err error) {
+ require.Contains(t, err.Error(), "unmarshalling public key failed: proto:")
+ },
+ },
+ {
+ name: "signature is malformed",
+ apply: tooShortSignature,
+ checkErr: func(t *testing.T, err error) {
+ require.Contains(t, err.Error(), "signature verification failed:")
+ },
+ },
+ {
+ name: "signature is invalid",
+ apply: invalidSignature,
+ checkErr: func(t *testing.T, err error) {
+ require.Contains(t, err.Error(), "signature invalid")
+ },
+ },
+ }
+
+ for i := range transforms {
+ tr := transforms[i]
+
+ t.Run(fmt.Sprintf("client offending: %s", tr.name), func(t *testing.T) {
+ serverTransport, err := New(ID, serverKey, nil)
+ require.NoError(t, err)
+ clientTransport, err := New(ID, clientKey, nil)
+ require.NoError(t, err)
+ tr.apply(clientTransport.identity)
+
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ serverErrChan := make(chan error)
+ go func() {
+ _, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, "")
+ serverErrChan <- err
+ }()
+
+ conn, err := clientTransport.SecureOutbound(context.Background(), clientInsecureConn, serverID)
+ require.NoError(t, err)
+ clientErrChan := make(chan error)
+ go func() {
+ _, err := conn.Read([]byte{0})
+ clientErrChan <- err
+ }()
+ select {
+ case err := <-clientErrChan:
+ require.Error(t, err)
+ if err.Error() != "remote error: tls: error decrypting message" &&
+ err.Error() != "remote error: tls: bad certificate" &&
+ !isWindowsTCPCloseError(err) {
+ t.Errorf("unexpected error: %s", err.Error())
+ }
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected the server handshake to return")
+ }
+
+ select {
+ case err := <-serverErrChan:
+ require.Error(t, err)
+ tr.checkErr(t, err)
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected the server handshake to return")
+ }
+ })
+
+ t.Run(fmt.Sprintf("server offending: %s", tr.name), func(t *testing.T) {
+ serverTransport, err := New(ID, serverKey, nil)
+ require.NoError(t, err)
+ tr.apply(serverTransport.identity)
+ clientTransport, err := New(ID, clientKey, nil)
+ require.NoError(t, err)
+
+ clientInsecureConn, serverInsecureConn := connect(t)
+
+ errChan := make(chan error)
+ go func() {
+ _, err := serverTransport.SecureInbound(context.Background(), serverInsecureConn, "")
+ errChan <- err
+ }()
+
+ _, err = clientTransport.SecureOutbound(context.Background(), clientInsecureConn, serverID)
+ require.Error(t, err)
+ tr.checkErr(t, err)
+
+ var serverErr error
+ select {
+ case serverErr = <-errChan:
+ case <-time.After(250 * time.Millisecond):
+ t.Fatal("expected the server handshake to return")
+ }
+ require.Error(t, serverErr)
+ if !isWindowsTCPCloseError(serverErr) {
+ require.Contains(t, serverErr.Error(), "remote error: tls:")
+ }
+ })
+ }
+}
diff --git a/p2p/test/backpressure/backpressure_test.go b/p2p/test/backpressure/backpressure_test.go
index 8e783118db..b60f15d152 100644
--- a/p2p/test/backpressure/backpressure_test.go
+++ b/p2p/test/backpressure/backpressure_test.go
@@ -2,380 +2,59 @@ package backpressure_tests
import (
"context"
- "io"
- "math/rand"
+ "os"
"testing"
"time"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
- u "github.com/ipfs/go-ipfs-util"
- logging "github.com/ipfs/go-log"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- testutil "github.com/libp2p/go-libp2p-netutil"
- peer "github.com/libp2p/go-libp2p-peer"
- protocol "github.com/libp2p/go-libp2p-protocol"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/stretchr/testify/require"
)
var log = logging.Logger("backpressure")
-// TestBackpressureStreamHandler tests whether mux handler
-// ratelimiting works. Meaning, since the handler is sequential
-// it should block senders.
-//
-// Important note: spdystream (which peerstream uses) has a set
-// of n workers (n=spdsystream.FRAME_WORKERS) which handle new
-// frames, including those starting new streams. So all of them
-// can be in the handler at one time. Also, the sending side
-// does not rate limit unless we call stream.Wait()
-//
-//
-// Note: right now, this happens muxer-wide. the muxer should
-// learn to flow control, so handlers cant block each other.
-func TestBackpressureStreamHandler(t *testing.T) {
- t.Skip(`Sadly, as cool as this test is, it doesn't work
-Because spdystream doesnt handle stream open backpressure
-well IMO. I'll see about rewriting that part when it becomes
-a problem.
-`)
-
- // a number of concurrent request handlers
- limit := 10
-
- // our way to signal that we're done with 1 request
- requestHandled := make(chan struct{})
-
- // handler rate limiting
- receiverRatelimit := make(chan struct{}, limit)
- for i := 0; i < limit; i++ {
- receiverRatelimit <- struct{}{}
- }
-
- // sender counter of successfully opened streams
- senderOpened := make(chan struct{}, limit*100)
-
- // sender signals it's done (errored out)
- senderDone := make(chan struct{})
-
- // the receiver handles requests with some rate limiting
- receiver := func(s inet.Stream) {
- log.Debug("receiver received a stream")
-
- <-receiverRatelimit // acquire
- go func() {
- // our request handler. can do stuff here. we
- // simulate something taking time by waiting
- // on requestHandled
- log.Debug("request worker handling...")
- <-requestHandled
- log.Debug("request worker done!")
- receiverRatelimit <- struct{}{} // release
- }()
- }
-
- // the sender opens streams as fast as possible
- sender := func(host host.Host, remote peer.ID) {
- var s inet.Stream
- var err error
- defer func() {
- t.Error(err)
- log.Debug("sender error. exiting.")
- senderDone <- struct{}{}
- }()
-
- for {
- s, err = host.NewStream(context.Background(), remote, protocol.TestingID)
- if err != nil {
- return
- }
-
- _ = s
- // if err = s.SwarmStream().Stream().Wait(); err != nil {
- // return
- // }
-
- // "count" another successfully opened stream
- // (large buffer so shouldn't block in normal operation)
- log.Debug("sender opened another stream!")
- senderOpened <- struct{}{}
- }
- }
-
- // count our senderOpened events
- countStreamsOpenedBySender := func(min int) int {
- opened := 0
- for opened < min {
- log.Debugf("countStreamsOpenedBySender got %d (min %d)", opened, min)
- select {
- case <-senderOpened:
- opened++
- case <-time.After(10 * time.Millisecond):
- }
- }
- return opened
- }
-
- // count our received events
- // waitForNReceivedStreams := func(n int) {
- // for n > 0 {
- // log.Debugf("waiting for %d received streams...", n)
- // select {
- // case <-receiverRatelimit:
- // n--
- // }
- // }
- // }
-
- testStreamsOpened := func(expected int) {
- log.Debugf("testing rate limited to %d streams", expected)
- if n := countStreamsOpenedBySender(expected); n != expected {
- t.Fatalf("rate limiting did not work :( -- %d != %d", expected, n)
- }
- }
-
- // ok that's enough setup. let's do it!
-
- ctx := context.Background()
- h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
-
- // setup receiver handler
- h1.SetStreamHandler(protocol.TestingID, receiver)
-
- h2pi := h2.Peerstore().PeerInfo(h2.ID())
- log.Debugf("dialing %s", h2pi.Addrs)
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal("Failed to connect:", err)
- }
-
- // launch sender!
- go sender(h2, h1.ID())
-
- // ok, what do we expect to happen? the receiver should
- // receive 10 requests and stop receiving, blocking the sender.
- // we can test this by counting 10x senderOpened requests
-
- <-senderOpened // wait for the sender to successfully open some.
- testStreamsOpened(limit - 1)
-
- // let's "handle" 3 requests.
- <-requestHandled
- <-requestHandled
- <-requestHandled
- // the sender should've now been able to open exactly 3 more.
-
- testStreamsOpened(3)
-
- // shouldn't have opened anything more
- testStreamsOpened(0)
-
- // let's "handle" 100 requests in batches of 5
- for i := 0; i < 20; i++ {
- <-requestHandled
- <-requestHandled
- <-requestHandled
- <-requestHandled
- <-requestHandled
- testStreamsOpened(5)
- }
-
- // success!
-
- // now for the sugar on top: let's tear down the receiver. it should
- // exit the sender.
- h1.Close()
-
- // shouldn't have opened anything more
- testStreamsOpened(0)
-
- select {
- case <-time.After(100 * time.Millisecond):
- t.Error("receiver shutdown failed to exit sender")
- case <-senderDone:
- log.Info("handler backpressure works!")
- }
-}
-
// TestStBackpressureStreamWrite tests whether streams see proper
// backpressure when writing data over the network streams.
func TestStBackpressureStreamWrite(t *testing.T) {
-
- // senderWrote signals that the sender wrote bytes to remote.
- // the value is the count of bytes written.
- senderWrote := make(chan int, 10000)
-
- // sender signals it's done (errored out)
- senderDone := make(chan struct{})
-
- // writeStats lets us listen to all the writes and return
- // how many happened and how much was written
- writeStats := func() (int, int) {
- writes := 0
- bytes := 0
- for {
- select {
- case n := <-senderWrote:
- writes++
- bytes = bytes + n
- default:
- log.Debugf("stats: sender wrote %d bytes, %d writes", bytes, writes)
- return bytes, writes
- }
- }
- }
-
- // sender attempts to write as fast as possible, signaling on the
- // completion of every write. This makes it possible to see how
- // fast it's actually writing. We pair this with a receiver
- // that waits for a signal to read.
- sender := func(s inet.Stream) {
- defer func() {
- s.Close()
- senderDone <- struct{}{}
- }()
-
- // ready a buffer of random data
- buf := make([]byte, 65536)
- u.NewTimeSeededRand().Read(buf)
-
- for {
- // send a randomly sized subchunk
- from := rand.Intn(len(buf) / 2)
- to := rand.Intn(len(buf) / 2)
- sendbuf := buf[from : from+to]
-
- n, err := s.Write(sendbuf)
- if err != nil {
- log.Debug("sender error. exiting:", err)
- return
- }
-
- log.Debugf("sender wrote %d bytes", n)
- senderWrote <- n
- }
- }
-
- // receive a number of bytes from a stream.
- // returns the number of bytes written.
- receive := func(s inet.Stream, expect int) {
- log.Debugf("receiver to read %d bytes", expect)
- rbuf := make([]byte, expect)
- n, err := io.ReadFull(s, rbuf)
- if err != nil {
- t.Error("read failed:", err)
- }
- if expect != n {
- t.Errorf("read len differs: %d != %d", expect, n)
- }
- }
-
- // ok let's do it!
-
- // setup the networks
- ctx := context.Background()
- h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
-
- // setup sender handler on 1
- h1.SetStreamHandler(protocol.TestingID, sender)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ h1, err := bhost.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h1.Start()
+ h2, err := bhost.NewHost(swarmt.GenSwarm(t), nil)
+ require.NoError(t, err)
+ h2.Start()
+
+ // setup sender handler on 2
+ h2.SetStreamHandler(protocol.TestingID, func(s network.Stream) {
+ defer s.Reset()
+ <-ctx.Done()
+ })
h2pi := h2.Peerstore().PeerInfo(h2.ID())
- log.Debugf("dialing %s", h2pi.Addrs)
+ log.Debug("dialing", "addrs", h2pi.Addrs)
if err := h1.Connect(ctx, h2pi); err != nil {
t.Fatal("Failed to connect:", err)
}
- // open a stream, from 2->1, this is our reader
- s, err := h2.NewStream(context.Background(), h1.ID(), protocol.TestingID)
- if err != nil {
- t.Fatal(err)
- }
-
- // let's make sure r/w works.
- testSenderWrote := func(bytesE int) {
- bytesA, writesA := writeStats()
- if bytesA != bytesE {
- t.Errorf("numbers failed: %d =?= %d bytes, via %d writes", bytesA, bytesE, writesA)
- }
- }
-
- // trigger lazy connection handshaking
- _, err = s.Read(nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // 500ms rounds of lockstep write + drain
- roundsStart := time.Now()
- roundsTotal := 0
- for roundsTotal < (2 << 20) {
- // let the sender fill its buffers, it will stop sending.
- <-time.After(300 * time.Millisecond)
- b, _ := writeStats()
- testSenderWrote(0)
- testSenderWrote(0)
-
- // drain it all, wait again
- receive(s, b)
- roundsTotal = roundsTotal + b
- }
- roundsTime := time.Since(roundsStart)
-
- // now read continously, while we measure stats.
- stop := make(chan struct{})
- contStart := time.Now()
+ // open a stream, from 1->2, this is our reader
+ s, err := h1.NewStream(ctx, h2.ID(), protocol.TestingID)
+ require.NoError(t, err)
+ defer s.Reset()
- go func() {
- for {
- select {
- case <-stop:
- return
- default:
- receive(s, 2<<15)
- }
+ // If nobody is reading, we should eventually time out.
+ require.NoError(t, s.SetWriteDeadline(time.Now().Add(100*time.Millisecond)))
+ data := make([]byte, 16*1024)
+ for i := 0; i < 5*1024; i++ { // write at most 100MiB
+ if _, err := s.Write(data); err != nil {
+ require.True(t, os.IsTimeout(err), err)
+ return
}
- }()
-
- contTotal := 0
- for contTotal < (2 << 20) {
- n := <-senderWrote
- contTotal += n
- }
- stop <- struct{}{}
- contTime := time.Since(contStart)
-
- // now compare! continuous should've been faster AND larger
- if roundsTime < contTime {
- t.Error("continuous should have been faster")
- }
-
- if roundsTotal < contTotal {
- t.Error("continuous should have been larger, too!")
}
-
- // and a couple rounds more for good measure ;)
- for i := 0; i < 3; i++ {
- // let the sender fill its buffers, it will stop sending.
- <-time.After(300 * time.Millisecond)
- b, _ := writeStats()
- testSenderWrote(0)
- testSenderWrote(0)
-
- // drain it all, wait again
- receive(s, b)
- }
-
- // this doesn't work :(:
- // // now for the sugar on top: let's tear down the receiver. it should
- // // exit the sender.
- // n1.Close()
- // testSenderWrote(0)
- // testSenderWrote(0)
- // select {
- // case <-time.After(2 * time.Second):
- // t.Error("receiver shutdown failed to exit sender")
- // case <-senderDone:
- // log.Info("handler backpressure works!")
- // }
+ t.Fatal("should have timed out")
}
diff --git a/p2p/test/basichost/basic_host_test.go b/p2p/test/basichost/basic_host_test.go
new file mode 100644
index 0000000000..0197387b1b
--- /dev/null
+++ b/p2p/test/basichost/basic_host_test.go
@@ -0,0 +1,270 @@
+package basichost
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNoStreamOverTransientConnection(t *testing.T) {
+ h1, err := libp2p.New(
+ libp2p.NoListenAddrs,
+ libp2p.EnableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+ require.NoError(t, err)
+
+ h2, err := libp2p.New(
+ libp2p.NoListenAddrs,
+ libp2p.EnableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+ require.NoError(t, err)
+
+ relay1, err := libp2p.New()
+ require.NoError(t, err)
+
+ _, err = relay.New(relay1)
+ require.NoError(t, err)
+
+ relay1info := peer.AddrInfo{
+ ID: relay1.ID(),
+ Addrs: relay1.Addrs(),
+ }
+ err = h1.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ err = h2.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ h2.SetStreamHandler("/testprotocol", func(s network.Stream) {
+ fmt.Println("testprotocol")
+
+ // End the example
+ s.Close()
+ })
+
+ _, err = client.Reserve(context.Background(), h2, relay1info)
+ require.NoError(t, err)
+
+ relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String())
+
+ h2Info := peer.AddrInfo{
+ ID: h2.ID(),
+ Addrs: []ma.Multiaddr{relayaddr},
+ }
+ err = h1.Connect(context.Background(), h2Info)
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ ctx = network.WithNoDial(ctx, "test")
+ _, err = h1.NewStream(ctx, h2.ID(), "/testprotocol")
+
+ require.Error(t, err)
+
+ _, err = h1.NewStream(network.WithAllowLimitedConn(context.Background(), "test"), h2.ID(), "/testprotocol")
+ require.NoError(t, err)
+}
+
+func TestNewStreamTransientConnection(t *testing.T) {
+ h1, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ libp2p.EnableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+ require.NoError(t, err)
+
+ h2, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ libp2p.EnableRelay(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+ require.NoError(t, err)
+
+ relay1, err := libp2p.New()
+ require.NoError(t, err)
+
+ _, err = relay.New(relay1)
+ require.NoError(t, err)
+
+ relay1info := peer.AddrInfo{
+ ID: relay1.ID(),
+ Addrs: relay1.Addrs(),
+ }
+ err = h1.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ err = h2.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ h2.SetStreamHandler("/testprotocol", func(s network.Stream) {
+ fmt.Println("testprotocol")
+
+ // End the example
+ s.Close()
+ })
+
+ _, err = client.Reserve(context.Background(), h2, relay1info)
+ require.NoError(t, err)
+
+ relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String())
+
+ h1.Peerstore().AddAddr(h2.ID(), relayaddr, peerstore.TempAddrTTL)
+
+ // NewStream should block transient connections till we have a direct connection
+ ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel()
+ s, err := h1.NewStream(ctx, h2.ID(), "/testprotocol")
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ require.Nil(t, s)
+
+ // NewStream should return a stream if a direct connection is established
+ // while waiting
+ done := make(chan bool, 2)
+ go func() {
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.TempAddrTTL)
+ ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ ctx = network.WithNoDial(ctx, "test")
+ s, err = h1.NewStream(ctx, h2.ID(), "/testprotocol")
+ require.NoError(t, err)
+ require.NotNil(t, s)
+ defer s.Close()
+ require.Equal(t, network.DirInbound, s.Conn().Stat().Direction)
+ done <- true
+ }()
+ go func() {
+ // connect h2 to h1 simulating connection reversal
+ h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), peerstore.TempAddrTTL)
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+ ctx = network.WithForceDirectDial(ctx, "test")
+ err := h2.Connect(ctx, peer.AddrInfo{ID: h1.ID()})
+ assert.NoError(t, err)
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+func TestAddrFactorCertHashAppend(t *testing.T) {
+ wtAddr := "/ip4/1.2.3.4/udp/1/quic-v1/webtransport"
+ webrtcAddr := "/ip4/1.2.3.4/udp/2/webrtc-direct"
+ addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr {
+ return append(addrs,
+ ma.StringCast(wtAddr),
+ ma.StringCast(webrtcAddr),
+ )
+ }
+ h, err := libp2p.New(
+ libp2p.AddrsFactory(addrsFactory),
+ libp2p.Transport(libp2pwebrtc.New),
+ libp2p.Transport(libp2pwebtransport.New),
+ libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/udp/0/quic-v1/webtransport",
+ "/ip4/0.0.0.0/udp/0/webrtc-direct",
+ ),
+ )
+ require.NoError(t, err)
+ require.Eventually(t, func() bool {
+ addrs := h.Addrs()
+ var hasWebRTC, hasWebTransport bool
+ for _, addr := range addrs {
+ if strings.HasPrefix(addr.String(), webrtcAddr) {
+ if _, err := addr.ValueForProtocol(ma.P_CERTHASH); err == nil {
+ hasWebRTC = true
+ }
+ }
+ if strings.HasPrefix(addr.String(), wtAddr) {
+ if _, err := addr.ValueForProtocol(ma.P_CERTHASH); err == nil {
+ hasWebTransport = true
+ }
+ }
+ }
+ return hasWebRTC && hasWebTransport
+ }, 5*time.Second, 100*time.Millisecond)
+}
+
+func TestOnlyWebRTCDirectDialNoDelay(t *testing.T) {
+ // This tests that only webrtc-direct dials are dialled immediately
+ // and not delayed by dial ranker.
+ h1, err := libp2p.New(
+ libp2p.Transport(libp2pwebrtc.New),
+ libp2p.ListenAddrStrings(
+ "/ip4/0.0.0.0/udp/0/webrtc-direct",
+ ),
+ )
+ require.NoError(t, err)
+ h2, err := libp2p.New(
+ libp2p.Transport(libp2pwebrtc.New),
+ libp2p.NoListenAddrs,
+ )
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), swarm.PrivateOtherDelay-10*time.Millisecond)
+ defer cancel()
+ err = h2.Connect(ctx, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()})
+ require.NoError(t, err)
+}
+
+func TestWebRTCWithQUICManyConnections(t *testing.T) {
+ // Correctly fixes: https://github.com/libp2p/js-libp2p/issues/2805
+
+ // The server has both /quic-v1 and /webrtc-direct listen addresses
+ h, err := libp2p.New(
+ libp2p.Transport(libp2pquic.NewTransport),
+ libp2p.Transport(libp2pwebrtc.New),
+ libp2p.ListenAddrStrings("/ip4/0.0.0.0/udp/0/quic-v1"),
+ libp2p.ListenAddrStrings("/ip4/0.0.0.0/udp/0/webrtc-direct"),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+
+ const N = 200
+ // These N dialers have both /quic-v1 and /webrtc-direct transports
+ var dialers [N]host.Host
+ for i := 0; i < N; i++ {
+ dialers[i], err = libp2p.New(libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ defer dialers[i].Close()
+ }
+ // This dialer has only /webrtc-direct transport
+ d, err := libp2p.New(libp2p.Transport(libp2pwebrtc.New), libp2p.NoListenAddrs)
+ require.NoError(t, err)
+ defer d.Close()
+
+ for i := 0; i < N; i++ {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ // With happy eyeballs these dialers will connect over only /quic-v1
+ // and not stall the /webrtc-direct handshake goroutines.
+ // it is fine if the dial fails, we just want to ensure that there's space
+ // in the /webrtc-direct listen queue
+ _ = dialers[i].Connect(ctx, peer.AddrInfo{ID: h.ID(), Addrs: h.Addrs()})
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ // The webrtc only dialer should be able to connect to the peer
+ err = d.Connect(ctx, peer.AddrInfo{ID: h.ID(), Addrs: h.Addrs()})
+ require.NoError(t, err)
+}
diff --git a/p2p/test/negotiation/muxer_test.go b/p2p/test/negotiation/muxer_test.go
new file mode 100644
index 0000000000..52c662236c
--- /dev/null
+++ b/p2p/test/negotiation/muxer_test.go
@@ -0,0 +1,125 @@
+package negotiation
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "testing"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ yamuxOpt = libp2p.Muxer("/yamux", yamux.DefaultTransport)
+ anotherYamuxOpt = libp2p.Muxer("/another-yamux", yamux.DefaultTransport)
+)
+
+type testcase struct {
+ Name string
+ ServerPreference []libp2p.Option
+ ClientPreference []libp2p.Option
+
+ Error string
+ Expected protocol.ID
+}
+
+type security struct {
+ Name string
+ Option libp2p.Option
+}
+
+func TestMuxerNegotiation(t *testing.T) {
+ testcases := []testcase{
+ {
+ Name: "server and client have the same preference",
+ ServerPreference: []libp2p.Option{yamuxOpt, anotherYamuxOpt},
+ ClientPreference: []libp2p.Option{yamuxOpt, anotherYamuxOpt},
+ Expected: "/yamux",
+ },
+ {
+ Name: "client only supports one muxer",
+ ServerPreference: []libp2p.Option{yamuxOpt, anotherYamuxOpt},
+ ClientPreference: []libp2p.Option{yamuxOpt},
+ Expected: "/yamux",
+ },
+ {
+ Name: "server only supports one muxer",
+ ServerPreference: []libp2p.Option{yamuxOpt},
+ ClientPreference: []libp2p.Option{anotherYamuxOpt, yamuxOpt},
+ Expected: "/yamux",
+ },
+ {
+ Name: "client preference preferred",
+ ServerPreference: []libp2p.Option{yamuxOpt, anotherYamuxOpt},
+ ClientPreference: []libp2p.Option{anotherYamuxOpt, yamuxOpt},
+ Expected: "/another-yamux",
+ },
+ {
+ Name: "no preference overlap",
+ ServerPreference: []libp2p.Option{yamuxOpt},
+ ClientPreference: []libp2p.Option{anotherYamuxOpt},
+ Error: "failed to negotiate stream multiplexer: protocols not supported",
+ },
+ }
+
+ clientID, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ serverID, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+
+ securities := []security{
+ {Name: "noise", Option: libp2p.Security("/noise", noise.New)},
+ {Name: "tls", Option: libp2p.Security("/tls", tls.New)},
+ {Name: "insecure", Option: libp2p.Security("/insecure", insecure.NewWithIdentity)},
+ }
+
+ for _, tc := range testcases {
+ tc := tc
+
+ for _, sec := range securities {
+ sec := sec
+
+ t.Run(fmt.Sprintf("%s: %s", sec.Name, tc.Name), func(t *testing.T) {
+ server, err := libp2p.New(
+ libp2p.Identity(serverID),
+ sec.Option,
+ libp2p.ChainOptions(tc.ServerPreference...),
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
+ )
+ require.NoError(t, err)
+
+ client, err := libp2p.New(
+ libp2p.Identity(clientID),
+ sec.Option,
+ libp2p.ChainOptions(tc.ClientPreference...),
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.NoListenAddrs,
+ )
+ require.NoError(t, err)
+
+ err = client.Connect(context.Background(), peer.AddrInfo{ID: server.ID(), Addrs: server.Addrs()})
+ if tc.Error != "" {
+ require.Error(t, err)
+ require.ErrorContains(t, err, tc.Error)
+ return
+ }
+
+ require.NoError(t, err)
+ conns := client.Network().ConnsToPeer(server.ID())
+ require.Len(t, conns, 1, "expected exactly one connection")
+ require.Equal(t, tc.Expected, conns[0].ConnState().StreamMultiplexer)
+ })
+ }
+ }
+}
diff --git a/p2p/test/negotiation/security_test.go b/p2p/test/negotiation/security_test.go
new file mode 100644
index 0000000000..b7324744bf
--- /dev/null
+++ b/p2p/test/negotiation/security_test.go
@@ -0,0 +1,89 @@
+package negotiation
+
+import (
+ "context"
+ "crypto/rand"
+ "testing"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ noiseOpt = libp2p.Security("/noise", noise.New)
+ tlsOpt = libp2p.Security("/tls", tls.New)
+)
+
+func TestSecurityNegotiation(t *testing.T) {
+ testcases := []testcase{
+ {
+ Name: "server and client have the same preference",
+ ServerPreference: []libp2p.Option{tlsOpt, noiseOpt},
+ ClientPreference: []libp2p.Option{tlsOpt, noiseOpt},
+ Expected: "/tls",
+ },
+ {
+ Name: "client only supports one security",
+ ServerPreference: []libp2p.Option{tlsOpt, noiseOpt},
+ ClientPreference: []libp2p.Option{noiseOpt},
+ Expected: "/noise",
+ },
+ {
+ Name: "server only supports one security",
+ ServerPreference: []libp2p.Option{noiseOpt},
+ ClientPreference: []libp2p.Option{tlsOpt, noiseOpt},
+ Expected: "/noise",
+ },
+ {
+ Name: "no overlap",
+ ServerPreference: []libp2p.Option{noiseOpt},
+ ClientPreference: []libp2p.Option{tlsOpt},
+ Error: "failed to negotiate security protocol: protocols not supported",
+ },
+ }
+
+ clientID, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ serverID, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+
+ for _, tc := range testcases {
+ tc := tc
+
+ t.Run(tc.Name, func(t *testing.T) {
+ server, err := libp2p.New(
+ libp2p.Identity(serverID),
+ libp2p.ChainOptions(tc.ServerPreference...),
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
+ )
+ require.NoError(t, err)
+
+ client, err := libp2p.New(
+ libp2p.Identity(clientID),
+ libp2p.ChainOptions(tc.ClientPreference...),
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.NoListenAddrs,
+ )
+ require.NoError(t, err)
+
+ err = client.Connect(context.Background(), peer.AddrInfo{ID: server.ID(), Addrs: server.Addrs()})
+ if tc.Error != "" {
+ require.Error(t, err)
+ require.ErrorContains(t, err, tc.Error)
+ return
+ }
+
+ require.NoError(t, err)
+ conns := client.Network().ConnsToPeer(server.ID())
+ require.Len(t, conns, 1, "expected exactly one connection")
+ require.Equal(t, tc.Expected, conns[0].ConnState().Security)
+ })
+ }
+}
diff --git a/p2p/test/notifications/notification_test.go b/p2p/test/notifications/notification_test.go
new file mode 100644
index 0000000000..eb26a3fe5e
--- /dev/null
+++ b/p2p/test/notifications/notification_test.go
@@ -0,0 +1,90 @@
+package notifications
+
+import (
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/event"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func portFromString(t *testing.T, s string) int {
+ t.Helper()
+ p, err := strconv.ParseInt(s, 10, 32)
+ require.NoError(t, err)
+ return int(p)
+}
+
+func TestListenAddressNotif(t *testing.T) {
+ h, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"),
+ libp2p.Transport(tcp.NewTCPTransport),
+ libp2p.Transport(libp2pquic.NewTransport),
+ libp2p.DisableRelay(),
+ )
+ require.NoError(t, err)
+ defer h.Close()
+ sub, err := h.EventBus().Subscribe(&event.EvtLocalAddressesUpdated{})
+ require.NoError(t, err)
+ defer sub.Close()
+
+ var initialAddr ma.Multiaddr
+ // make sure the event is emitted for the initial listen address
+ select {
+ case e := <-sub.Out():
+ ev := e.(event.EvtLocalAddressesUpdated)
+ require.Empty(t, ev.Removed)
+ require.Len(t, ev.Current, 1)
+ require.Equal(t, event.Added, ev.Current[0].Action)
+ initialAddr = ev.Current[0].Address
+ portStr, err := initialAddr.ValueForProtocol(ma.P_TCP)
+ require.NoError(t, err)
+ require.NotZero(t, portFromString(t, portStr))
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout")
+ }
+ listenAddrs, err := h.Network().InterfaceListenAddresses()
+ require.NoError(t, err)
+ require.Equal(t, []ma.Multiaddr{initialAddr}, listenAddrs)
+
+ // now start listening on another address
+ require.NoError(t, h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")))
+ var addedAddr ma.Multiaddr
+ select {
+ case e := <-sub.Out():
+ ev := e.(event.EvtLocalAddressesUpdated)
+ require.Empty(t, ev.Removed)
+ require.Len(t, ev.Current, 2)
+ var maintainedAddr ma.Multiaddr
+ for _, e := range ev.Current {
+ switch e.Action {
+ case event.Added:
+ addedAddr = e.Address
+ case event.Maintained:
+ maintainedAddr = e.Address
+ default:
+ t.Fatal("unexpected action")
+ }
+ }
+ require.Equal(t, initialAddr, maintainedAddr)
+ _, err = addedAddr.ValueForProtocol(ma.P_QUIC_V1)
+ require.NoError(t, err)
+ portStr, err := addedAddr.ValueForProtocol(ma.P_UDP)
+ require.NoError(t, err)
+ require.NotZero(t, portFromString(t, portStr))
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout")
+ }
+
+ listenAddrs, err = h.Network().InterfaceListenAddresses()
+ require.NoError(t, err)
+ require.Len(t, listenAddrs, 2)
+ require.Contains(t, listenAddrs, initialAddr)
+ require.Contains(t, listenAddrs, addedAddr)
+}
diff --git a/p2p/test/quic/quic_test.go b/p2p/test/quic/quic_test.go
new file mode 100644
index 0000000000..fe52119b89
--- /dev/null
+++ b/p2p/test/quic/quic_test.go
@@ -0,0 +1,90 @@
+package quic_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func getQUICMultiaddrCode(addr ma.Multiaddr) int {
+ if _, err := addr.ValueForProtocol(ma.P_QUIC); err == nil {
+ return ma.P_QUIC
+ }
+ if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ return ma.P_QUIC_V1
+ }
+ return 0
+}
+
+func TestQUICAndWebTransport(t *testing.T) {
+ h1, err := libp2p.New(
+ libp2p.QUICReuse(quicreuse.NewConnManager),
+ libp2p.Transport(libp2pquic.NewTransport),
+ libp2p.Transport(webtransport.New),
+ libp2p.ListenAddrStrings(
+ "/ip4/127.0.0.1/udp/12347/quic-v1",
+ "/ip4/127.0.0.1/udp/12347/quic-v1/webtransport",
+ ),
+ )
+ require.NoError(t, err)
+ defer h1.Close()
+
+ addrs := h1.Addrs()
+ require.Len(t, addrs, 2)
+ var quicV1Addr, webtransportAddr ma.Multiaddr
+ for _, addr := range addrs {
+ if _, err := addr.ValueForProtocol(ma.P_WEBTRANSPORT); err == nil {
+ webtransportAddr = addr
+ } else if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ quicV1Addr = addr
+ }
+ }
+ require.NotNil(t, webtransportAddr, "expected to have a WebTransport address")
+ require.NotNil(t, quicV1Addr, "expected to have a QUIC v1 address")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ // first test that we can dial a QUIC v1
+ h2, err := libp2p.New(
+ libp2p.Transport(libp2pquic.NewTransport),
+ libp2p.NoListenAddrs,
+ )
+ require.NoError(t, err)
+ require.NoError(t, h2.Connect(ctx, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()}))
+ for _, conns := range [][]network.Conn{h2.Network().ConnsToPeer(h1.ID()), h1.Network().ConnsToPeer(h2.ID())} {
+ require.Len(t, conns, 1)
+ if _, err := conns[0].LocalMultiaddr().ValueForProtocol(ma.P_WEBTRANSPORT); err == nil {
+ t.Fatalf("expected a QUIC connection, got a WebTransport connection (%s <-> %s)", conns[0].LocalMultiaddr(), conns[0].RemoteMultiaddr())
+ }
+ require.Equal(t, ma.P_QUIC_V1, getQUICMultiaddrCode(conns[0].LocalMultiaddr()))
+ require.Equal(t, ma.P_QUIC_V1, getQUICMultiaddrCode(conns[0].RemoteMultiaddr()))
+ }
+ h2.Close()
+
+ // finally, test that we can dial a WebTransport connection
+ h3, err := libp2p.New(
+ libp2p.Transport(webtransport.New),
+ libp2p.NoListenAddrs,
+ )
+ require.NoError(t, err)
+ require.NoError(t, h3.Connect(ctx, peer.AddrInfo{ID: h1.ID(), Addrs: h1.Addrs()}))
+ for _, conns := range [][]network.Conn{h3.Network().ConnsToPeer(h1.ID()), h1.Network().ConnsToPeer(h3.ID())} {
+ require.Len(t, conns, 1)
+ if _, err := conns[0].LocalMultiaddr().ValueForProtocol(ma.P_WEBTRANSPORT); err != nil {
+ t.Fatalf("expected a WebTransport connection, got a QUIC connection (%s <-> %s)", conns[0].LocalMultiaddr(), conns[0].RemoteMultiaddr())
+ }
+ require.Equal(t, ma.P_QUIC_V1, getQUICMultiaddrCode(conns[0].LocalMultiaddr()))
+ require.Equal(t, ma.P_QUIC_V1, getQUICMultiaddrCode(conns[0].RemoteMultiaddr()))
+ }
+ h3.Close()
+}
diff --git a/p2p/test/reconnects/reconnect_test.go b/p2p/test/reconnects/reconnect_test.go
index e8851efe8c..cf05c80f37 100644
--- a/p2p/test/reconnects/reconnect_test.go
+++ b/p2p/test/reconnects/reconnect_test.go
@@ -4,236 +4,116 @@ import (
"context"
"io"
"math/rand"
+ "runtime"
"sync"
"testing"
"time"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/protocol"
bhost "github.com/libp2p/go-libp2p/p2p/host/basic"
+ swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
- u "github.com/ipfs/go-ipfs-util"
- logging "github.com/ipfs/go-log"
- host "github.com/libp2p/go-libp2p-host"
- inet "github.com/libp2p/go-libp2p-net"
- testutil "github.com/libp2p/go-libp2p-netutil"
- protocol "github.com/libp2p/go-libp2p-protocol"
- swarm "github.com/libp2p/go-libp2p-swarm"
- ps "github.com/libp2p/go-peerstream"
+ "github.com/stretchr/testify/require"
)
-func init() {
- // change the garbage collect timeout for testing.
- ps.GarbageCollectTimeout = 10 * time.Millisecond
-}
-
-var log = logging.Logger("reconnect")
-
-func EchoStreamHandler(stream inet.Stream) {
- c := stream.Conn()
- log.Debugf("%s echoing %s", c.LocalPeer(), c.RemotePeer())
- go func() {
- _, err := io.Copy(stream, stream)
- if err == nil {
- stream.Close()
- } else {
- stream.Reset()
- }
- }()
-}
-
-type sendChans struct {
- send chan struct{}
- sent chan struct{}
- read chan struct{}
- close_ chan struct{}
- closed chan struct{}
-}
-
-func newSendChans() sendChans {
- return sendChans{
- send: make(chan struct{}),
- sent: make(chan struct{}),
- read: make(chan struct{}),
- close_: make(chan struct{}),
- closed: make(chan struct{}),
+func EchoStreamHandler(stream network.Stream) {
+ _, err := io.CopyBuffer(stream, stream, make([]byte, 64)) // use a small buffer here to avoid problems with flow control
+ if err == nil {
+ stream.Close()
+ } else {
+ stream.Reset()
}
}
-func newSender() (chan sendChans, func(s inet.Stream)) {
- scc := make(chan sendChans)
- return scc, func(s inet.Stream) {
- sc := newSendChans()
- scc <- sc
-
- defer func() {
- s.Close()
- sc.closed <- struct{}{}
- }()
-
- buf := make([]byte, 65536)
- buf2 := make([]byte, 65536)
- u.NewTimeSeededRand().Read(buf)
-
- for {
- select {
- case <-sc.close_:
- return
- case <-sc.send:
- }
-
- // send a randomly sized subchunk
- from := rand.Intn(len(buf) / 2)
- to := rand.Intn(len(buf) / 2)
- sendbuf := buf[from : from+to]
-
- log.Debugf("sender sending %d bytes", len(sendbuf))
- n, err := s.Write(sendbuf)
- if err != nil {
- log.Debug("sender error. exiting:", err)
- return
- }
-
- log.Debugf("sender wrote %d bytes", n)
- sc.sent <- struct{}{}
-
- if n, err = io.ReadFull(s, buf2[:len(sendbuf)]); err != nil {
- log.Debug("sender error. failed to read:", err)
- return
- }
-
- log.Debugf("sender read %d bytes", n)
- sc.read <- struct{}{}
+func TestReconnect5(t *testing.T) {
+ runTest := func(t *testing.T, swarmOpt swarmt.Option) {
+ t.Helper()
+ const num = 5
+ hosts := make([]host.Host, 0, num)
+
+ for i := 0; i < num; i++ {
+ h, err := bhost.NewHost(swarmt.GenSwarm(t, swarmOpt), nil)
+ require.NoError(t, err)
+ defer h.Close()
+ h.Start()
+ hosts = append(hosts, h)
+ h.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
}
- }
-}
-
-// TestReconnect tests whether hosts are able to disconnect and reconnect.
-func TestReconnect2(t *testing.T) {
- ctx := context.Background()
- h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- hosts := []host.Host{h1, h2}
- h1.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
- h2.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
-
- rounds := 8
- if testing.Short() {
- rounds = 4
- }
- for i := 0; i < rounds; i++ {
- log.Debugf("TestReconnect: %d/%d\n", i, rounds)
- SubtestConnSendDisc(t, hosts)
+ for i := 0; i < 4; i++ {
+ runRound(t, hosts)
+ }
}
-}
-
-// TestReconnect tests whether hosts are able to disconnect and reconnect.
-func TestReconnect5(t *testing.T) {
- ctx := context.Background()
- h1 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h2 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h3 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h4 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- h5 := bhost.New(testutil.GenSwarmNetwork(t, ctx))
- hosts := []host.Host{h1, h2, h3, h4, h5}
- h1.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
- h2.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
- h3.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
- h4.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
- h5.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
+ t.Run("using TCP", func(t *testing.T) {
+ if runtime.GOOS == "darwin" {
+ t.Skip("TCP RST handling is flaky in OSX, see https://github.com/golang/go/issues/50254")
+ }
+ runTest(t, swarmt.OptDisableQUIC)
+ })
- rounds := 4
- if testing.Short() {
- rounds = 2
- }
- for i := 0; i < rounds; i++ {
- log.Debugf("TestReconnect: %d/%d\n", i, rounds)
- SubtestConnSendDisc(t, hosts)
- }
+ t.Run("using QUIC", func(t *testing.T) {
+ runTest(t, swarmt.OptDisableTCP)
+ })
}
-func SubtestConnSendDisc(t *testing.T, hosts []host.Host) {
-
- ctx := context.Background()
- numStreams := 3 * len(hosts)
- numMsgs := 10
+func runRound(t *testing.T, hosts []host.Host) {
+ for _, h1 := range hosts {
+ h1.SetStreamHandler(protocol.TestingID, EchoStreamHandler)
- if testing.Short() {
- numStreams = 5 * len(hosts)
- numMsgs = 4
+ for _, h2 := range hosts {
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), time.Hour)
+ }
}
- ss, sF := newSender()
-
+ const (
+ numStreams = 5
+ maxDataLen = 64 << 10
+ )
+ rnd := rand.New(rand.NewSource(12345))
+ // exchange some data
for _, h1 := range hosts {
for _, h2 := range hosts {
- if h1.ID() >= h2.ID() {
+ if h1 == h2 {
continue
}
-
- h2pi := h2.Peerstore().PeerInfo(h2.ID())
- log.Debugf("dialing %s", h2pi.Addrs)
- if err := h1.Connect(ctx, h2pi); err != nil {
- t.Fatal("Failed to connect:", err)
+ var wg sync.WaitGroup
+ wg.Add(numStreams)
+ for i := 0; i < numStreams; i++ {
+ data := make([]byte, rand.Intn(maxDataLen)+1)
+ rnd.Read(data)
+ go func() {
+ defer wg.Done()
+ str, err := h1.NewStream(context.Background(), h2.ID(), protocol.TestingID)
+ require.NoError(t, err)
+ defer str.Close()
+ _, err = str.Write(data)
+ require.NoError(t, err)
+ }()
}
+ wg.Wait()
}
}
- var wg sync.WaitGroup
- for i := 0; i < numStreams; i++ {
- h1 := hosts[i%len(hosts)]
- h2 := hosts[(i+1)%len(hosts)]
- s, err := h1.NewStream(context.Background(), h2.ID(), protocol.TestingID)
- if err != nil {
- t.Error(err)
- }
-
- wg.Add(1)
- go func(j int) {
- defer wg.Done()
-
- go sF(s)
- log.Debugf("getting handle %d", j)
- sc := <-ss // wait to get handle.
- log.Debugf("spawning worker %d", j)
-
- for k := 0; k < numMsgs; k++ {
- sc.send <- struct{}{}
- <-sc.sent
- log.Debugf("%d sent %d", j, k)
- <-sc.read
- log.Debugf("%d read %d", j, k)
- }
- sc.close_ <- struct{}{}
- <-sc.closed
- log.Debugf("closed %d", j)
- }(i)
- }
- wg.Wait()
-
- for i, h1 := range hosts {
- log.Debugf("host %d has %d conns", i, len(h1.Network().Conns()))
- }
-
+ // disconnect all hosts
for _, h1 := range hosts {
// close connection
cs := h1.Network().Conns()
for _, c := range cs {
- sc := c.(*swarm.Conn)
- if sc.LocalPeer() > sc.RemotePeer() {
- continue // only close it on one side.
- }
-
- log.Debugf("closing: %s", sc.RawConn())
- sc.Close()
+ c.Close()
}
}
- <-time.After(20 * time.Millisecond)
-
- for i, h := range hosts {
- if len(h.Network().Conns()) > 0 {
- t.Fatalf("host %d %s has %d conns! not zero.", i, h.ID(), len(h.Network().Conns()))
+ require.Eventually(t, func() bool {
+ for _, h1 := range hosts {
+ for _, h2 := range hosts {
+ if len(h1.Network().ConnsToPeer(h2.ID())) > 0 {
+ return false
+ }
+ }
}
- }
+ return true
+ }, 5000*time.Millisecond, 10*time.Millisecond)
}
diff --git a/p2p/test/resource-manager/echo.go b/p2p/test/resource-manager/echo.go
new file mode 100644
index 0000000000..c6a8ed4632
--- /dev/null
+++ b/p2p/test/resource-manager/echo.go
@@ -0,0 +1,293 @@
+package itest
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+)
+
+const (
+ EchoService = "test.echo"
+ EchoProtoID = "/test/echo"
+)
+
+var (
+ echoLog = logging.Logger("echo")
+)
+
+type Echo struct {
+ Host host.Host
+
+ mx sync.Mutex
+ status EchoStatus
+
+ beforeReserve, beforeRead, beforeWrite, beforeDone func() error
+ done func()
+}
+
+type EchoStatus struct {
+ StreamsIn int
+ EchosIn, EchosOut int
+ IOErrors int
+ ResourceServiceErrors int
+ ResourceReservationErrors int
+}
+
+func NewEcho(h host.Host) *Echo {
+ e := &Echo{Host: h}
+ h.SetStreamHandler(EchoProtoID, e.handleStream)
+ return e
+}
+
+func (e *Echo) Status() EchoStatus {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ return e.status
+}
+
+func (e *Echo) BeforeReserve(f func() error) {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ e.beforeReserve = f
+}
+
+func (e *Echo) BeforeRead(f func() error) {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ e.beforeRead = f
+}
+
+func (e *Echo) BeforeWrite(f func() error) {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ e.beforeWrite = f
+}
+
+func (e *Echo) BeforeDone(f func() error) {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ e.beforeDone = f
+}
+
+func (e *Echo) Done(f func()) {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ e.done = f
+}
+
+func (e *Echo) getBeforeReserve() func() error {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ return e.beforeReserve
+}
+
+func (e *Echo) getBeforeRead() func() error {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ return e.beforeRead
+}
+
+func (e *Echo) getBeforeWrite() func() error {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ return e.beforeWrite
+}
+
+func (e *Echo) getBeforeDone() func() error {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ return e.beforeDone
+}
+
+func (e *Echo) getDone() func() {
+ e.mx.Lock()
+ defer e.mx.Unlock()
+
+ return e.done
+}
+
+func (e *Echo) handleStream(s network.Stream) {
+ defer s.Close()
+
+ if done := e.getDone(); done != nil {
+ defer done()
+ }
+
+ e.mx.Lock()
+ e.status.StreamsIn++
+ e.mx.Unlock()
+
+ if beforeReserve := e.getBeforeReserve(); beforeReserve != nil {
+ if err := beforeReserve(); err != nil {
+ echoLog.Debug("error syncing before reserve", "err", err)
+
+ s.Reset()
+ return
+ }
+ }
+
+ if err := s.Scope().SetService(EchoService); err != nil {
+ echoLog.Debug("error attaching stream to echo service", "err", err)
+
+ e.mx.Lock()
+ e.status.ResourceServiceErrors++
+ e.mx.Unlock()
+
+ s.Reset()
+ return
+ }
+
+ if err := s.Scope().ReserveMemory(4096, network.ReservationPriorityAlways); err != nil {
+ echoLog.Debug("error reserving memory", "err", err)
+
+ e.mx.Lock()
+ e.status.ResourceReservationErrors++
+ e.mx.Unlock()
+
+ s.Reset()
+ return
+ }
+
+ if beforeRead := e.getBeforeRead(); beforeRead != nil {
+ if err := beforeRead(); err != nil {
+ echoLog.Debug("error syncing before read", "err", err)
+
+ s.Reset()
+ return
+ }
+ }
+
+ buf := make([]byte, 4096)
+
+ s.SetReadDeadline(time.Now().Add(5 * time.Second))
+ n, err := s.Read(buf)
+ switch {
+ case err == io.EOF:
+ if n == 0 {
+ return
+ }
+
+ case err != nil:
+ echoLog.Debug("I/O error", "err", err)
+
+ e.mx.Lock()
+ e.status.IOErrors++
+ e.mx.Unlock()
+
+ s.Reset()
+ return
+ }
+
+ e.mx.Lock()
+ e.status.EchosIn++
+ e.mx.Unlock()
+
+ if beforeWrite := e.getBeforeWrite(); beforeWrite != nil {
+ if err := beforeWrite(); err != nil {
+ echoLog.Debug("error syncing before write", "err", err)
+
+ s.Reset()
+ return
+ }
+ }
+
+ s.SetWriteDeadline(time.Now().Add(5 * time.Second))
+ _, err = s.Write(buf[:n])
+ if err != nil {
+ echoLog.Debug("I/O error", "err", err)
+
+ e.mx.Lock()
+ e.status.IOErrors++
+ e.mx.Unlock()
+
+ s.Reset()
+ return
+ }
+
+ e.mx.Lock()
+ e.status.EchosOut++
+ e.mx.Unlock()
+
+ s.CloseWrite()
+
+ if beforeDone := e.getBeforeDone(); beforeDone != nil {
+ if err := beforeDone(); err != nil {
+ echoLog.Debug("error syncing before done", "err", err)
+
+ s.Reset()
+ }
+ }
+}
+
+func (e *Echo) Echo(p peer.ID, what string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+
+ s, err := e.Host.NewStream(ctx, p, EchoProtoID)
+ if err != nil {
+ return err
+ }
+ defer s.Close()
+
+ if err := s.Scope().SetService(EchoService); err != nil {
+ echoLog.Debug("error attaching stream to echo service", "err", err)
+
+ s.Reset()
+ return err
+ }
+
+ if err := s.Scope().ReserveMemory(4096, network.ReservationPriorityAlways); err != nil {
+ echoLog.Debug("error reserving memory", "err", err)
+
+ s.Reset()
+ return err
+ }
+
+ s.SetWriteDeadline(time.Now().Add(5 * time.Second))
+ _, err = s.Write([]byte(what))
+ if err != nil {
+ return err
+ }
+ s.CloseWrite()
+
+ buf := make([]byte, 4096)
+
+ s.SetReadDeadline(time.Now().Add(5 * time.Second))
+ n, err := s.Read(buf)
+ switch {
+ case err == io.EOF:
+ if n == 0 {
+ return err
+ }
+
+ case err != nil:
+ echoLog.Debug("I/O error", "err", err)
+
+ s.Reset()
+ return err
+ }
+
+ if what != string(buf[:n]) {
+ return fmt.Errorf("echo output doesn't match input")
+ }
+
+ return nil
+}
diff --git a/p2p/test/resource-manager/echo_test.go b/p2p/test/resource-manager/echo_test.go
new file mode 100644
index 0000000000..d6896fbdef
--- /dev/null
+++ b/p2p/test/resource-manager/echo_test.go
@@ -0,0 +1,76 @@
+package itest
+
+import (
+ "context"
+ "testing"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+
+ "github.com/stretchr/testify/require"
+)
+
+func createEchos(t *testing.T, count int, makeOpts ...func(int) libp2p.Option) []*Echo {
+ result := make([]*Echo, 0, count)
+
+ for i := 0; i < count; i++ {
+ opts := make([]libp2p.Option, 0, len(makeOpts)+2)
+ // only use a single transport, otherwise we might end up with a TCP and a QUIC connection to the same host
+ opts = append(opts, libp2p.Transport(tcp.NewTCPTransport), libp2p.DefaultListenAddrs)
+ for _, makeOpt := range makeOpts {
+ opts = append(opts, makeOpt(i))
+ }
+
+ h, err := libp2p.New(opts...)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ e := NewEcho(h)
+ result = append(result, e)
+ }
+
+ for i := 0; i < count; i++ {
+ for j := 0; j < count; j++ {
+ if i == j {
+ continue
+ }
+
+ result[i].Host.Peerstore().AddAddrs(result[j].Host.ID(), result[j].Host.Addrs(), peerstore.PermanentAddrTTL)
+ }
+ }
+
+ return result
+}
+
+func closeEchos(echos []*Echo) {
+ for _, e := range echos {
+ e.Host.Close()
+ }
+}
+
+func checkEchoStatus(t *testing.T, e *Echo, expected EchoStatus) {
+ t.Helper()
+ require.Equal(t, expected, e.Status())
+}
+
+func TestEcho(t *testing.T) {
+ echos := createEchos(t, 2)
+ defer closeEchos(echos)
+
+ if err := echos[0].Host.Connect(context.TODO(), peer.AddrInfo{ID: echos[1].Host.ID()}); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := echos[0].Echo(echos[1].Host.ID(), "hello libp2p"); err != nil {
+ t.Fatal(err)
+ }
+
+ checkEchoStatus(t, echos[1], EchoStatus{
+ StreamsIn: 1,
+ EchosIn: 1,
+ EchosOut: 1,
+ })
+}
diff --git a/p2p/test/resource-manager/rcmgr_test.go b/p2p/test/resource-manager/rcmgr_test.go
new file mode 100644
index 0000000000..816c58da0a
--- /dev/null
+++ b/p2p/test/resource-manager/rcmgr_test.go
@@ -0,0 +1,334 @@
+package itest
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+
+ "github.com/stretchr/testify/require"
+)
+
+func makeRcmgrOption(t *testing.T, cfg rcmgr.ConcreteLimitConfig) func(int) libp2p.Option {
+ return func(i int) libp2p.Option {
+ var opts []rcmgr.Option
+ if os.Getenv("LIBP2P_TEST_RCMGR_TRACE") == "1" {
+ opts = append(opts, rcmgr.WithTrace(fmt.Sprintf("%s-%d.json.gz", t.Name(), i)))
+ }
+
+ mgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(cfg), opts...)
+ require.NoError(t, err)
+ return libp2p.ResourceManager(mgr)
+ }
+}
+
+func closeRcmgrs(echos []*Echo) {
+ for _, e := range echos {
+ e.Host.Network().ResourceManager().Close()
+ }
+}
+
+func waitForConnection(t *testing.T, src, dest *Echo) {
+ require.Eventually(t, func() bool {
+ return src.Host.Network().Connectedness(dest.Host.ID()) == network.Connected &&
+ dest.Host.Network().Connectedness(src.Host.ID()) == network.Connected
+ }, time.Second, 10*time.Millisecond)
+}
+
+func TestResourceManagerConnInbound(t *testing.T) {
+ // this test checks that we can not exceed the inbound conn limit at system level
+ // we specify: 1 conn per peer, 3 conns total, and we try to create 4 conns
+ cfg := rcmgr.PartialLimitConfig{
+ System: rcmgr.ResourceLimits{
+ ConnsInbound: 3,
+ ConnsOutbound: 1024,
+ Conns: 1024,
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ PeerDefault: rcmgr.ResourceLimits{
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ },
+ }.Build(rcmgr.DefaultLimits.AutoScale())
+
+ echos := createEchos(t, 5, makeRcmgrOption(t, cfg))
+ defer closeEchos(echos)
+ defer closeRcmgrs(echos)
+
+ for i := 1; i < 4; i++ {
+ err := echos[i].Host.Connect(context.Background(), peer.AddrInfo{ID: echos[0].Host.ID()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ waitForConnection(t, echos[i], echos[0])
+ }
+
+ for i := 1; i < 4; i++ {
+ count := len(echos[i].Host.Network().ConnsToPeer(echos[0].Host.ID()))
+ if count != 1 {
+ t.Fatalf("expected %d connections to peer, got %d", 1, count)
+ }
+ }
+
+ err := echos[4].Host.Connect(context.Background(), peer.AddrInfo{ID: echos[0].Host.ID()})
+ if err == nil {
+ t.Fatal("expected ResourceManager to block incoming connection")
+ }
+}
+
+func TestResourceManagerConnOutbound(t *testing.T) {
+ // this test checks that we can not exceed the inbound conn limit at system level
+ // we specify: 1 conn per peer, 3 conns total, and we try to create 4 conns
+ cfg := rcmgr.PartialLimitConfig{
+ System: rcmgr.ResourceLimits{
+ ConnsInbound: 1024,
+ ConnsOutbound: 3,
+ Conns: 1024,
+ },
+ PeerDefault: rcmgr.ResourceLimits{
+ ConnsInbound: 1,
+ ConnsOutbound: 1,
+ Conns: 1,
+ },
+ }.Build(rcmgr.DefaultLimits.AutoScale())
+ echos := createEchos(t, 5, makeRcmgrOption(t, cfg))
+ defer closeEchos(echos)
+ defer closeRcmgrs(echos)
+
+ for i := 1; i < 4; i++ {
+ err := echos[0].Host.Connect(context.Background(), peer.AddrInfo{ID: echos[i].Host.ID()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ waitForConnection(t, echos[0], echos[i])
+ }
+
+ for i := 1; i < 4; i++ {
+ count := len(echos[i].Host.Network().ConnsToPeer(echos[0].Host.ID()))
+ if count != 1 {
+ t.Fatalf("expected %d connections to peer, got %d", 1, count)
+ }
+ }
+
+ err := echos[0].Host.Connect(context.Background(), peer.AddrInfo{ID: echos[4].Host.ID()})
+ if err == nil {
+ t.Fatal("expected ResourceManager to block incoming connection")
+ }
+}
+
+func TestResourceManagerServiceInbound(t *testing.T) {
+ // this test checks that we can not exceed the inbound stream limit at service level
+ // we specify: 3 streams for the service, and we try to create 4 streams
+ cfg := rcmgr.PartialLimitConfig{
+ ServiceDefault: rcmgr.ResourceLimits{
+ StreamsInbound: 3,
+ StreamsOutbound: 1024,
+ Streams: 1024,
+ },
+ }.Build(rcmgr.DefaultLimits.AutoScale())
+ echos := createEchos(t, 5, makeRcmgrOption(t, cfg))
+ defer closeEchos(echos)
+ defer closeRcmgrs(echos)
+
+ for i := 1; i < 5; i++ {
+ err := echos[i].Host.Connect(context.Background(), peer.AddrInfo{ID: echos[0].Host.ID()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ waitForConnection(t, echos[i], echos[0])
+ }
+
+ ready := make(chan struct{})
+ echos[0].BeforeDone(waitForChannel(ready, time.Minute))
+
+ var eg sync.WaitGroup
+ echos[0].Done(eg.Done)
+
+ var once sync.Once
+ var wg sync.WaitGroup
+ for i := 1; i < 5; i++ {
+ eg.Add(1)
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ err := echos[i].Echo(echos[0].Host.ID(), "hello libp2p")
+ if err != nil {
+ t.Log(err)
+ once.Do(func() {
+ close(ready)
+ })
+ }
+ }(i)
+ }
+ wg.Wait()
+ eg.Wait()
+
+ checkEchoStatus(t, echos[0], EchoStatus{
+ StreamsIn: 4,
+ EchosIn: 3,
+ EchosOut: 3,
+ ResourceServiceErrors: 1,
+ })
+}
+
+func TestResourceManagerServicePeerInbound(t *testing.T) {
+ // this test checks that we cannot exceed the per peer inbound stream limit at service level
+ // we specify: 2 streams per peer for echo, and we try to create 3 streams
+ cfg := rcmgr.DefaultLimits
+ cfg.AddServicePeerLimit(
+ EchoService,
+ rcmgr.BaseLimit{StreamsInbound: 2, StreamsOutbound: 1024, Streams: 1024, Memory: 9999999},
+ rcmgr.BaseLimitIncrease{},
+ )
+ limits := cfg.AutoScale()
+
+ echos := createEchos(t, 5, makeRcmgrOption(t, limits))
+ defer closeEchos(echos)
+ defer closeRcmgrs(echos)
+
+ for i := 1; i < 5; i++ {
+ err := echos[i].Host.Connect(context.Background(), peer.AddrInfo{ID: echos[0].Host.ID()})
+ if err != nil {
+ t.Fatal(err)
+ }
+ waitForConnection(t, echos[i], echos[0])
+ }
+
+ echos[0].BeforeDone(waitForBarrier(4, time.Minute))
+
+ var eg sync.WaitGroup
+ echos[0].Done(eg.Done)
+
+ var wg sync.WaitGroup
+ for i := 1; i < 5; i++ {
+ eg.Add(1)
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ err := echos[i].Echo(echos[0].Host.ID(), "hello libp2p")
+ if err != nil {
+ t.Log(err)
+ }
+ }(i)
+ }
+ wg.Wait()
+ eg.Wait()
+
+ checkEchoStatus(t, echos[0], EchoStatus{
+ StreamsIn: 4,
+ EchosIn: 4,
+ EchosOut: 4,
+ ResourceServiceErrors: 0,
+ })
+
+ ready := make(chan struct{})
+ echos[0].BeforeDone(waitForChannel(ready, time.Minute))
+
+ var once sync.Once
+ for i := 0; i < 3; i++ {
+ eg.Add(1)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ err := echos[2].Echo(echos[0].Host.ID(), "hello libp2p")
+ if err != nil {
+ t.Log(err)
+ once.Do(func() {
+ close(ready)
+ })
+ }
+ }()
+ }
+ wg.Wait()
+ eg.Wait()
+
+ checkEchoStatus(t, echos[0], EchoStatus{
+ StreamsIn: 7,
+ EchosIn: 6,
+ EchosOut: 6,
+ ResourceServiceErrors: 1,
+ })
+}
+
+func waitForBarrier(count int32, timeout time.Duration) func() error {
+ ready := make(chan struct{})
+ var wait atomic.Int32
+ wait.Store(count)
+ return func() error {
+ if wait.Add(-1) == 0 {
+ close(ready)
+ }
+
+ select {
+ case <-ready:
+ return nil
+ case <-time.After(timeout):
+ return fmt.Errorf("timeout")
+ }
+ }
+}
+
+func waitForChannel(ready chan struct{}, timeout time.Duration) func() error {
+ return func() error {
+ select {
+ case <-ready:
+ return nil
+ case <-time.After(timeout):
+ return fmt.Errorf("timeout")
+ }
+ }
+}
+
+func TestReadmeExample(_ *testing.T) {
+ // Start with the default scaling limits.
+ scalingLimits := rcmgr.DefaultLimits
+
+ // Add limits around included libp2p protocols
+ libp2p.SetDefaultServiceLimits(&scalingLimits)
+
+ // Turn the scaling limits into a concrete set of limits using `.AutoScale`. This
+ // scales the limits proportional to your system memory.
+ scaledDefaultLimits := scalingLimits.AutoScale()
+
+ // Tweak certain settings
+ cfg := rcmgr.PartialLimitConfig{
+ System: rcmgr.ResourceLimits{
+ // Allow unlimited outbound streams
+ StreamsOutbound: rcmgr.Unlimited,
+ },
+ // Everything else is default. The exact values will come from `scaledDefaultLimits` above.
+ }
+
+ // Create our limits by using our cfg and replacing the default values with values from `scaledDefaultLimits`
+ limits := cfg.Build(scaledDefaultLimits)
+
+ // The resource manager expects a limiter, se we create one from our limits.
+ limiter := rcmgr.NewFixedLimiter(limits)
+
+ // Metrics are enabled by default. If you want to disable metrics, use the
+ // WithMetricsDisabled option
+ // Initialize the resource manager
+ rm, err := rcmgr.NewResourceManager(limiter, rcmgr.WithMetricsDisabled())
+ if err != nil {
+ panic(err)
+ }
+
+ // Create a libp2p host
+ host, err := libp2p.New(libp2p.ResourceManager(rm))
+ if err != nil {
+ panic(err)
+ }
+ host.Close()
+}
diff --git a/p2p/test/security/bench_test.go b/p2p/test/security/bench_test.go
new file mode 100644
index 0000000000..fb50e3fa99
--- /dev/null
+++ b/p2p/test/security/bench_test.go
@@ -0,0 +1,130 @@
+package benchmark
+
+import (
+ "context"
+ crand "crypto/rand"
+ "io"
+ "net"
+ "sync"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ tls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/stretchr/testify/assert"
+)
+
+type Factory func(*testing.B, crypto.PrivKey) sec.SecureTransport
+
+func benchmarkThroughput(b *testing.B, size int, factory Factory) {
+ privA, pubA, err := crypto.GenerateEd25519Key(crand.Reader)
+ assert.NoError(b, err)
+ idA, err := peer.IDFromPublicKey(pubA)
+ assert.NoError(b, err)
+ tptA := factory(b, privA)
+
+ privB, pubB, err := crypto.GenerateEd25519Key(crand.Reader)
+ assert.NoError(b, err)
+ idB, err := peer.IDFromPublicKey(pubB)
+ assert.NoError(b, err)
+ tptB := factory(b, privB)
+
+ // pipe here serialize the decryption and encryption, we might want both parallelised to reduce context switching impact on the benchmark.
+ // https://github.com/golang/go/issues/34502 would be ideal for the parallel usecase.
+ p1, p2 := net.Pipe()
+ var ready sync.Mutex // wait for completed handshake
+ var finished sync.Mutex // wait until all data has been received
+ ready.Lock()
+ finished.Lock()
+ go func() {
+ defer finished.Unlock()
+ conn, err := tptB.SecureInbound(context.Background(), p2, idA)
+ assert.NoError(b, err)
+ ready.Unlock()
+
+ _, err = io.Copy(io.Discard, conn)
+ assert.NoError(b, err)
+ }()
+
+ conn, err := tptA.SecureOutbound(context.Background(), p1, idB)
+ assert.NoError(b, err)
+ ready.Lock()
+
+ buf := make([]byte, size)
+ b.SetBytes(int64(len(buf)))
+ b.ResetTimer()
+
+ for i := b.N; i != 0; i-- {
+ _, err = conn.Write(buf[:])
+ assert.NoError(b, err)
+ }
+ conn.Close()
+
+ finished.Lock()
+}
+func benchmarkHandshakes(b *testing.B, factory Factory) {
+ privA, pubA, err := crypto.GenerateEd25519Key(crand.Reader)
+ assert.NoError(b, err)
+ idA, err := peer.IDFromPublicKey(pubA)
+ assert.NoError(b, err)
+ tptA := factory(b, privA)
+
+ privB, pubB, err := crypto.GenerateEd25519Key(crand.Reader)
+ assert.NoError(b, err)
+ idB, err := peer.IDFromPublicKey(pubB)
+ assert.NoError(b, err)
+ tptB := factory(b, privB)
+
+ pipes := make(chan net.Conn, 1)
+
+ var finished sync.Mutex // wait until all data has been transferred
+ finished.Lock()
+ go func() {
+ defer finished.Unlock()
+ var throwAway [1]byte
+ for p := range pipes {
+ conn, err := tptB.SecureInbound(context.Background(), p, idA)
+ assert.NoError(b, err)
+ _, err = conn.Read(throwAway[:]) // read because currently the tls transport handshake when calling Read.
+ assert.ErrorIs(b, err, io.EOF)
+ }
+ }()
+ b.ResetTimer()
+
+ for i := b.N; i != 0; i-- {
+ p1, p2 := net.Pipe()
+ pipes <- p2
+ conn, err := tptA.SecureOutbound(context.Background(), p1, idB)
+ assert.NoError(b, err)
+ assert.NoError(b, conn.Close())
+ }
+ close(pipes)
+
+ finished.Lock()
+}
+
+func bench(b *testing.B, factory Factory) {
+ b.Run("throughput", func(b *testing.B) {
+ b.Run("32KiB", func(b *testing.B) { benchmarkThroughput(b, 32*1024, factory) })
+ b.Run("1MiB", func(b *testing.B) { benchmarkThroughput(b, 1024*1024, factory) })
+ })
+ b.Run("handshakes", func(b *testing.B) { benchmarkHandshakes(b, factory) })
+}
+
+func BenchmarkNoise(b *testing.B) {
+ bench(b, func(b *testing.B, priv crypto.PrivKey) sec.SecureTransport {
+ tpt, err := noise.New("", priv, nil)
+ assert.NoError(b, err)
+ return tpt
+ })
+}
+
+func BenchmarkTLS(b *testing.B) {
+ bench(b, func(b *testing.B, priv crypto.PrivKey) sec.SecureTransport {
+ tpt, err := tls.New("", priv, nil)
+ assert.NoError(b, err)
+ return tpt
+ })
+}
diff --git a/p2p/test/swarm/swarm_test.go b/p2p/test/swarm/swarm_test.go
new file mode 100644
index 0000000000..10298f5139
--- /dev/null
+++ b/p2p/test/swarm/swarm_test.go
@@ -0,0 +1,245 @@
+package swarm_test
+
+import (
+ "context"
+ "io"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client"
+ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDialPeerTransientConnection(t *testing.T) {
+ h1, err := libp2p.New(
+ libp2p.NoListenAddrs,
+ libp2p.EnableRelay(),
+ )
+ require.NoError(t, err)
+
+ h2, err := libp2p.New(
+ libp2p.NoListenAddrs,
+ libp2p.EnableRelay(),
+ )
+ require.NoError(t, err)
+
+ relay1, err := libp2p.New()
+ require.NoError(t, err)
+
+ _, err = relay.New(relay1)
+ require.NoError(t, err)
+
+ relay1info := peer.AddrInfo{
+ ID: relay1.ID(),
+ Addrs: relay1.Addrs(),
+ }
+ err = h1.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ err = h2.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ _, err = client.Reserve(context.Background(), h2, relay1info)
+ require.NoError(t, err)
+
+ relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String())
+
+ h1.Peerstore().AddAddr(h2.ID(), relayaddr, peerstore.TempAddrTTL)
+
+ // swarm.DialPeer should connect over transient connections
+ conn1, err := h1.Network().DialPeer(context.Background(), h2.ID())
+ require.NoError(t, err)
+ require.NotNil(t, conn1)
+
+ // Test that repeated calls return the same connection.
+ conn2, err := h1.Network().DialPeer(context.Background(), h2.ID())
+ require.NoError(t, err)
+ require.NotNil(t, conn2)
+
+ require.Equal(t, conn1, conn2)
+
+ // swarm.DialPeer should fail if forceDirect is used
+ ctx := network.WithForceDirectDial(context.Background(), "test")
+ conn, err := h1.Network().DialPeer(ctx, h2.ID())
+ require.Error(t, err)
+ require.Nil(t, conn)
+}
+
+func TestNewStreamTransientConnection(t *testing.T) {
+ h1, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ libp2p.EnableRelay(),
+ )
+ require.NoError(t, err)
+
+ h2, err := libp2p.New(
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ libp2p.EnableRelay(),
+ )
+ require.NoError(t, err)
+
+ relay1, err := libp2p.New()
+ require.NoError(t, err)
+
+ _, err = relay.New(relay1)
+ require.NoError(t, err)
+
+ relay1info := peer.AddrInfo{
+ ID: relay1.ID(),
+ Addrs: relay1.Addrs(),
+ }
+ err = h1.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ err = h2.Connect(context.Background(), relay1info)
+ require.NoError(t, err)
+
+ _, err = client.Reserve(context.Background(), h2, relay1info)
+ require.NoError(t, err)
+
+ relayaddr := ma.StringCast("/p2p/" + relay1info.ID.String() + "/p2p-circuit/p2p/" + h2.ID().String())
+
+ h1.Peerstore().AddAddr(h2.ID(), relayaddr, peerstore.TempAddrTTL)
+
+ // WithAllowLimitedConn should succeed
+ ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel()
+ ctx = network.WithAllowLimitedConn(ctx, "test")
+ s, err := h1.Network().NewStream(ctx, h2.ID())
+ require.NoError(t, err)
+ require.NotNil(t, s)
+ defer s.Close()
+
+ // Without WithAllowLimitedConn should fail with context deadline exceeded
+ ctx, cancel = context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel()
+ s, err = h1.Network().NewStream(ctx, h2.ID())
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ require.Nil(t, s)
+
+ // Provide h2's direct address to h1.
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.TempAddrTTL)
+ // network.NoDial should also fail
+ ctx, cancel = context.WithTimeout(context.Background(), 200*time.Millisecond)
+ defer cancel()
+ ctx = network.WithNoDial(ctx, "test")
+ s, err = h1.Network().NewStream(ctx, h2.ID())
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+ require.Nil(t, s)
+
+ done := make(chan bool, 2)
+ // NewStream should return a stream if an incoming direct connection is established
+ go func() {
+ ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ ctx = network.WithNoDial(ctx, "test")
+ s, err = h1.Network().NewStream(ctx, h2.ID())
+ assert.NoError(t, err)
+ assert.NotNil(t, s)
+ defer s.Close()
+ require.Equal(t, network.DirInbound, s.Conn().Stat().Direction)
+ done <- true
+ }()
+ go func() {
+ // connect h2 to h1 simulating connection reversal
+ h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), peerstore.TempAddrTTL)
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+ ctx = network.WithForceDirectDial(ctx, "test")
+ err := h2.Connect(ctx, peer.AddrInfo{ID: h1.ID()})
+ assert.NoError(t, err)
+ done <- true
+ }()
+
+ <-done
+ <-done
+}
+
+func TestLimitStreamsWhenHangingHandlers(t *testing.T) {
+ var partial rcmgr.PartialLimitConfig
+ const streamLimit = 10
+ partial.System.Streams = streamLimit
+ mgr, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(partial.Build(rcmgr.InfiniteLimits)))
+ require.NoError(t, err)
+
+ maddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic-v1")
+ require.NoError(t, err)
+
+ receiver, err := libp2p.New(
+ libp2p.ResourceManager(mgr),
+ libp2p.ListenAddrs(maddr),
+ )
+ require.NoError(t, err)
+ t.Cleanup(func() { receiver.Close() })
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ const pid = "/test"
+ receiver.SetStreamHandler(pid, func(s network.Stream) {
+ defer s.Close()
+ s.Write([]byte{42})
+ wg.Wait()
+ })
+
+ // Open streamLimit streams
+ success := 0
+ // we make a lot of tries because identify and identify push take up a few streams
+ for i := 0; i < 1000 && success < streamLimit; i++ {
+ mgr, err = rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.InfiniteLimits))
+ require.NoError(t, err)
+
+ sender, err := libp2p.New(libp2p.ResourceManager(mgr))
+ require.NoError(t, err)
+ t.Cleanup(func() { sender.Close() })
+
+ sender.Peerstore().AddAddrs(receiver.ID(), receiver.Addrs(), peerstore.PermanentAddrTTL)
+
+ s, err := sender.NewStream(context.Background(), receiver.ID(), pid)
+ if err != nil {
+ continue
+ }
+
+ var b [1]byte
+ _, err = io.ReadFull(s, b[:])
+ if err == nil {
+ success++
+ }
+ sender.Close()
+ }
+ require.Equal(t, streamLimit, success)
+ // We have the maximum number of streams open. Next call should fail.
+ mgr, err = rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(rcmgr.InfiniteLimits))
+ require.NoError(t, err)
+
+ sender, err := libp2p.New(libp2p.ResourceManager(mgr))
+ require.NoError(t, err)
+ t.Cleanup(func() { sender.Close() })
+
+ sender.Peerstore().AddAddrs(receiver.ID(), receiver.Addrs(), peerstore.PermanentAddrTTL)
+
+ _, err = sender.NewStream(context.Background(), receiver.ID(), pid)
+ require.Error(t, err)
+
+ // Close the open streams
+ wg.Done()
+
+ // Next call should succeed
+ require.Eventually(t, func() bool {
+ s, err := sender.NewStream(context.Background(), receiver.ID(), pid)
+ if err == nil {
+ s.Close()
+ return true
+ }
+ return false
+ }, 5*time.Second, 100*time.Millisecond)
+}
diff --git a/p2p/test/transport/deadline_test.go b/p2p/test/transport/deadline_test.go
new file mode 100644
index 0000000000..55fa7a4fbc
--- /dev/null
+++ b/p2p/test/transport/deadline_test.go
@@ -0,0 +1,94 @@
+package transport_integration
+
+import (
+ "context"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReadWriteDeadlines(t *testing.T) {
+ // Send a lot of data so that writes have to flush (can't just buffer it all)
+ sendBuf := make([]byte, 10<<20)
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ listener := tc.HostGenerator(t, TransportTestCaseOpts{})
+ defer listener.Close()
+ dialer := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer dialer.Close()
+
+ require.NoError(t, dialer.Connect(context.Background(), peer.AddrInfo{
+ ID: listener.ID(),
+ Addrs: listener.Addrs(),
+ }))
+
+ // This simply stalls
+ listener.SetStreamHandler("/stall", func(s network.Stream) {
+ time.Sleep(time.Hour)
+ s.Close()
+ })
+
+ t.Run("ReadDeadline", func(t *testing.T) {
+ s, err := dialer.NewStream(context.Background(), listener.ID(), "/stall")
+ require.NoError(t, err)
+ defer s.Close()
+
+ start := time.Now()
+ // Set a deadline
+ s.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
+ buf := make([]byte, 1)
+ _, err = s.Read(buf)
+ require.Error(t, err)
+ var nerr net.Error
+ require.ErrorAs(t, err, &nerr)
+ require.True(t, nerr.Timeout())
+ require.Less(t, time.Since(start), 1*time.Second)
+ })
+
+ t.Run("WriteDeadline", func(t *testing.T) {
+ s, err := dialer.NewStream(context.Background(), listener.ID(), "/stall")
+ require.NoError(t, err)
+ defer s.Close()
+
+ // Set a deadline
+ s.SetWriteDeadline(time.Now().Add(10 * time.Millisecond))
+ start := time.Now()
+ _, err = s.Write(sendBuf)
+ require.Error(t, err)
+ require.True(t, err.(net.Error).Timeout())
+ require.Less(t, time.Since(start), 1*time.Second)
+ })
+
+ // Like the above, but with SetDeadline
+ t.Run("SetDeadline", func(t *testing.T) {
+ for _, op := range []string{"Read", "Write"} {
+ t.Run(op, func(t *testing.T) {
+ s, err := dialer.NewStream(context.Background(), listener.ID(), "/stall")
+ require.NoError(t, err)
+ defer s.Close()
+
+ // Set a deadline
+ s.SetDeadline(time.Now().Add(10 * time.Millisecond))
+ start := time.Now()
+
+ if op == "Read" {
+ buf := make([]byte, 1)
+ _, err = s.Read(buf)
+ } else {
+ _, err = s.Write(sendBuf)
+ }
+ require.Error(t, err)
+ var nerr net.Error
+ require.ErrorAs(t, err, &nerr)
+ require.True(t, nerr.Timeout())
+ require.Less(t, time.Since(start), 1*time.Second)
+ })
+ }
+ })
+ })
+ }
+}
diff --git a/p2p/test/transport/gating_test.go b/p2p/test/transport/gating_test.go
new file mode 100644
index 0000000000..c9ae4fd80d
--- /dev/null
+++ b/p2p/test/transport/gating_test.go
@@ -0,0 +1,296 @@
+package transport_integration
+
+import (
+ "context"
+ "encoding/binary"
+ "net/netip"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/protocol"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+
+ "github.com/libp2p/go-libp2p-testing/race"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multiaddr/matest"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+//go:generate go run go.uber.org/mock/mockgen -package transport_integration -destination mock_connection_gater_test.go github.com/libp2p/go-libp2p/core/connmgr ConnectionGater
+
+// normalize removes the certhash and replaces /wss with /tls/ws
+func normalize(addr ma.Multiaddr) ma.Multiaddr {
+ for {
+ if _, err := addr.ValueForProtocol(ma.P_CERTHASH); err != nil {
+ break
+ }
+ addr, _ = ma.SplitLast(addr)
+ }
+
+ // replace /wss with /tls/ws
+ var components ma.Multiaddr
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_WSS {
+ components = append(components, ma.StringCast("/tls/ws")...)
+ } else {
+ components = append(components, c)
+ }
+ return true
+ })
+ return components
+}
+
+func addrPort(addr ma.Multiaddr) netip.AddrPort {
+ a := netip.Addr{}
+ p := uint16(0)
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_IP4 || c.Protocol().Code == ma.P_IP6 {
+ a, _ = netip.AddrFromSlice(c.RawValue())
+ return false
+ }
+ if c.Protocol().Code == ma.P_UDP || c.Protocol().Code == ma.P_TCP {
+ p = binary.BigEndian.Uint16(c.RawValue())
+ return true
+ }
+ return false
+ })
+ return netip.AddrPortFrom(a, p)
+}
+
+func TestInterceptPeerDial(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, ConnGater: connGater})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ connGater.EXPECT().InterceptPeerDial(h2.ID())
+ require.ErrorIs(t, h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}), swarm.ErrGaterDisallowedConnection)
+ })
+ }
+}
+
+func TestInterceptAddrDial(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, ConnGater: connGater})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ gomock.InOrder(
+ connGater.EXPECT().InterceptPeerDial(h2.ID()).Return(true),
+ connGater.EXPECT().InterceptAddrDial(h2.ID(), matest.MultiaddrMatcher{Multiaddr: h2.Addrs()[0]}),
+ )
+ require.ErrorIs(t, h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}), swarm.ErrNoGoodAddresses)
+ })
+ }
+}
+
+func TestInterceptSecuredOutgoing(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, ConnGater: connGater})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ defer h1.Close()
+ defer h2.Close()
+ require.Len(t, h2.Addrs(), 1)
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ gomock.InOrder(
+ connGater.EXPECT().InterceptPeerDial(h2.ID()).Return(true),
+ connGater.EXPECT().InterceptAddrDial(h2.ID(), gomock.Any()).Return(true),
+ connGater.EXPECT().InterceptSecured(network.DirOutbound, h2.ID(), gomock.Any()).Do(func(_ network.Direction, _ peer.ID, addrs network.ConnMultiaddrs) {
+ require.Equal(t, normalize(h2.Addrs()[0]), normalize(addrs.RemoteMultiaddr()))
+ }),
+ )
+ err := h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
+ require.Error(t, err)
+ require.NotErrorIs(t, err, context.DeadlineExceeded)
+ })
+ }
+}
+
+func TestInterceptUpgradedOutgoing(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, ConnGater: connGater})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ defer h1.Close()
+ defer h2.Close()
+ require.Len(t, h2.Addrs(), 1)
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ gomock.InOrder(
+ connGater.EXPECT().InterceptPeerDial(h2.ID()).Return(true),
+ connGater.EXPECT().InterceptAddrDial(h2.ID(), gomock.Any()).Return(true),
+ connGater.EXPECT().InterceptSecured(network.DirOutbound, h2.ID(), gomock.Any()).Return(true),
+ connGater.EXPECT().InterceptUpgraded(gomock.Any()).Do(func(c network.Conn) {
+ // remove the certhash component from WebTransport addresses
+ require.Equal(t, normalize(h2.Addrs()[0]).String(), normalize(c.RemoteMultiaddr()).String())
+ require.Equal(t, h1.ID(), c.LocalPeer())
+ require.Equal(t, h2.ID(), c.RemotePeer())
+ }))
+ err := h1.Connect(ctx, peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()})
+ require.Error(t, err)
+ require.NotErrorIs(t, err, context.DeadlineExceeded)
+ })
+ }
+}
+
+func TestInterceptAccept(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{ConnGater: connGater})
+ defer h1.Close()
+ defer h2.Close()
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ // The basic host dials the first connection.
+ if strings.Contains(tc.Name, "WebRTC") {
+ // In WebRTC, retransmissions of the STUN packet might cause us to create multiple connections,
+ // if the first connection attempt is rejected.
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Do(func(addrs network.ConnMultiaddrs) {
+ require.Equal(t, normalize(h2.Addrs()[0]), normalize(addrs.LocalMultiaddr()))
+ }).AnyTimes()
+ } else if strings.Contains(tc.Name, "WebSocket") {
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Do(func(addrs network.ConnMultiaddrs) {
+ require.Equal(t, addrPort(h2.Addrs()[0]), addrPort(addrs.LocalMultiaddr()))
+ })
+ } else {
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Do(func(addrs network.ConnMultiaddrs) {
+ // remove the certhash component from WebTransport addresses
+ matest.AssertEqualMultiaddr(t, normalize(h2.Addrs()[0]), normalize(addrs.LocalMultiaddr()))
+ })
+ }
+
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), time.Hour)
+ _, err := h1.NewStream(ctx, h2.ID(), protocol.TestingID)
+ require.Error(t, err)
+ if _, err := h2.Addrs()[0].ValueForProtocol(ma.P_WEBRTC_DIRECT); err != nil {
+ // WebRTC rejects connection attempt before an error can be sent to the client.
+ // This means that the connection attempt will time out.
+ require.NotErrorIs(t, err, context.DeadlineExceeded)
+ }
+ })
+ }
+}
+
+func TestInterceptSecuredIncoming(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{ConnGater: connGater})
+ defer h1.Close()
+ defer h2.Close()
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ gomock.InOrder(
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Return(true),
+ connGater.EXPECT().InterceptSecured(network.DirInbound, h1.ID(), gomock.Any()).Do(func(_ network.Direction, _ peer.ID, addrs network.ConnMultiaddrs) {
+ // remove the certhash component from WebTransport addresses
+ matest.AssertEqualMultiaddr(t, normalize(h2.Addrs()[0]), normalize(addrs.LocalMultiaddr()))
+ }),
+ )
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), time.Hour)
+ _, err := h1.NewStream(ctx, h2.ID(), protocol.TestingID)
+ require.Error(t, err)
+ require.NotErrorIs(t, err, context.DeadlineExceeded)
+ })
+ }
+}
+
+func TestInterceptUpgradedIncoming(t *testing.T) {
+ if race.WithRace() {
+ t.Skip("The upgrader spawns a new Go routine, which leads to race conditions when using GoMock.")
+ }
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{ConnGater: connGater})
+ defer h1.Close()
+ defer h2.Close()
+ require.Len(t, h2.Addrs(), 1)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ gomock.InOrder(
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Return(true),
+ connGater.EXPECT().InterceptSecured(network.DirInbound, h1.ID(), gomock.Any()).Return(true),
+ connGater.EXPECT().InterceptUpgraded(gomock.Any()).Do(func(c network.Conn) {
+ // remove the certhash component from WebTransport addresses
+ require.Equal(t, normalize(h2.Addrs()[0]).String(), normalize(c.LocalMultiaddr()).String())
+ require.Equal(t, h1.ID(), c.RemotePeer())
+ require.Equal(t, h2.ID(), c.LocalPeer())
+ }),
+ )
+ h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), time.Hour)
+ _, err := h1.NewStream(ctx, h2.ID(), protocol.TestingID)
+ require.Error(t, err)
+ require.NotErrorIs(t, err, context.DeadlineExceeded)
+ })
+ }
+}
diff --git a/p2p/test/transport/mock_connection_gater_test.go b/p2p/test/transport/mock_connection_gater_test.go
new file mode 100644
index 0000000000..5e13863f54
--- /dev/null
+++ b/p2p/test/transport/mock_connection_gater_test.go
@@ -0,0 +1,115 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/connmgr (interfaces: ConnectionGater)
+//
+// Generated by this command:
+//
+// mockgen -package transport_integration -destination mock_connection_gater_test.go github.com/libp2p/go-libp2p/core/connmgr ConnectionGater
+//
+
+// Package transport_integration is a generated GoMock package.
+package transport_integration
+
+import (
+ reflect "reflect"
+
+ control "github.com/libp2p/go-libp2p/core/control"
+ network "github.com/libp2p/go-libp2p/core/network"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ multiaddr "github.com/multiformats/go-multiaddr"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockConnectionGater is a mock of ConnectionGater interface.
+type MockConnectionGater struct {
+ ctrl *gomock.Controller
+ recorder *MockConnectionGaterMockRecorder
+ isgomock struct{}
+}
+
+// MockConnectionGaterMockRecorder is the mock recorder for MockConnectionGater.
+type MockConnectionGaterMockRecorder struct {
+ mock *MockConnectionGater
+}
+
+// NewMockConnectionGater creates a new mock instance.
+func NewMockConnectionGater(ctrl *gomock.Controller) *MockConnectionGater {
+ mock := &MockConnectionGater{ctrl: ctrl}
+ mock.recorder = &MockConnectionGaterMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockConnectionGater) EXPECT() *MockConnectionGaterMockRecorder {
+ return m.recorder
+}
+
+// InterceptAccept mocks base method.
+func (m *MockConnectionGater) InterceptAccept(arg0 network.ConnMultiaddrs) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptAccept", arg0)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptAccept indicates an expected call of InterceptAccept.
+func (mr *MockConnectionGaterMockRecorder) InterceptAccept(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptAccept", reflect.TypeOf((*MockConnectionGater)(nil).InterceptAccept), arg0)
+}
+
+// InterceptAddrDial mocks base method.
+func (m *MockConnectionGater) InterceptAddrDial(arg0 peer.ID, arg1 multiaddr.Multiaddr) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptAddrDial", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptAddrDial indicates an expected call of InterceptAddrDial.
+func (mr *MockConnectionGaterMockRecorder) InterceptAddrDial(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptAddrDial", reflect.TypeOf((*MockConnectionGater)(nil).InterceptAddrDial), arg0, arg1)
+}
+
+// InterceptPeerDial mocks base method.
+func (m *MockConnectionGater) InterceptPeerDial(p peer.ID) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptPeerDial", p)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptPeerDial indicates an expected call of InterceptPeerDial.
+func (mr *MockConnectionGaterMockRecorder) InterceptPeerDial(p any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptPeerDial", reflect.TypeOf((*MockConnectionGater)(nil).InterceptPeerDial), p)
+}
+
+// InterceptSecured mocks base method.
+func (m *MockConnectionGater) InterceptSecured(arg0 network.Direction, arg1 peer.ID, arg2 network.ConnMultiaddrs) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptSecured", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptSecured indicates an expected call of InterceptSecured.
+func (mr *MockConnectionGaterMockRecorder) InterceptSecured(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptSecured", reflect.TypeOf((*MockConnectionGater)(nil).InterceptSecured), arg0, arg1, arg2)
+}
+
+// InterceptUpgraded mocks base method.
+func (m *MockConnectionGater) InterceptUpgraded(arg0 network.Conn) (bool, control.DisconnectReason) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptUpgraded", arg0)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(control.DisconnectReason)
+ return ret0, ret1
+}
+
+// InterceptUpgraded indicates an expected call of InterceptUpgraded.
+func (mr *MockConnectionGaterMockRecorder) InterceptUpgraded(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptUpgraded", reflect.TypeOf((*MockConnectionGater)(nil).InterceptUpgraded), arg0)
+}
diff --git a/p2p/test/transport/rcmgr_test.go b/p2p/test/transport/rcmgr_test.go
new file mode 100644
index 0000000000..cd24a8e876
--- /dev/null
+++ b/p2p/test/transport/rcmgr_test.go
@@ -0,0 +1,149 @@
+package transport_integration
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/multiformats/go-multiaddr/matest"
+
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+func TestResourceManagerIsUsed(t *testing.T) {
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ for _, testDialer := range []bool{true, false} {
+ t.Run(tc.Name+fmt.Sprintf(" test_dialer=%v", testDialer), func(t *testing.T) {
+
+ var reservedMemory, releasedMemory atomic.Int32
+ defer func() {
+ require.Equal(t, reservedMemory.Load(), releasedMemory.Load())
+ require.NotEqual(t, 0, reservedMemory.Load())
+ }()
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ rcmgr.EXPECT().Close()
+
+ var listener, dialer host.Host
+ var expectedPeer peer.ID
+ var expectedDir network.Direction
+ var expectedAddr gomock.Matcher
+ if testDialer {
+ listener = tc.HostGenerator(t, TransportTestCaseOpts{NoRcmgr: true})
+ dialer = tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, ResourceManager: rcmgr})
+ expectedPeer = listener.ID()
+ expectedDir = network.DirOutbound
+ expectedAddr = matest.MultiaddrMatcher{Multiaddr: listener.Addrs()[0]}
+ } else {
+ listener = tc.HostGenerator(t, TransportTestCaseOpts{ResourceManager: rcmgr})
+ dialer = tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, NoRcmgr: true})
+ expectedPeer = dialer.ID()
+ expectedDir = network.DirInbound
+ expectedAddr = gomock.Any()
+ }
+
+ peerScope := mocknetwork.NewMockPeerScope(ctrl)
+ peerScope.EXPECT().ReserveMemory(gomock.Any(), gomock.Any()).AnyTimes().Do(func(amount int, _ uint8) {
+ reservedMemory.Add(int32(amount))
+ })
+ peerScope.EXPECT().ReleaseMemory(gomock.Any()).AnyTimes().Do(func(amount int) {
+ releasedMemory.Add(int32(amount))
+ })
+ peerScope.EXPECT().BeginSpan().AnyTimes().DoAndReturn(func() (network.ResourceScopeSpan, error) {
+ s := mocknetwork.NewMockResourceScopeSpan(ctrl)
+ s.EXPECT().BeginSpan().AnyTimes().Return(mocknetwork.NewMockResourceScopeSpan(ctrl), nil)
+ // No need to track these memory reservations since we assert that Done is called
+ s.EXPECT().ReserveMemory(gomock.Any(), gomock.Any())
+ s.EXPECT().Done()
+ return s, nil
+ })
+ var calledSetPeer atomic.Bool
+
+ connScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ connScope.EXPECT().SetPeer(expectedPeer).Do(func(peer.ID) {
+ calledSetPeer.Store(true)
+ })
+ connScope.EXPECT().PeerScope().AnyTimes().DoAndReturn(func() network.PeerScope {
+ if calledSetPeer.Load() {
+ return peerScope
+ }
+ return nil
+ })
+ if tc.Name == "WebRTC" {
+ // webrtc receive buffer is a fix sized buffer allocated up front
+ connScope.EXPECT().ReserveMemory(gomock.Any(), gomock.Any())
+ }
+ connScope.EXPECT().Done().MinTimes(1)
+ // udp transports won't have FD
+ udpTransportRegex := regexp.MustCompile(`QUIC|WebTransport|WebRTC`)
+ expectFd := !udpTransportRegex.MatchString(tc.Name)
+
+ if !testDialer && (strings.Contains(tc.Name, "QUIC") || strings.Contains(tc.Name, "WebTransport")) {
+ rcmgr.EXPECT().VerifySourceAddress(gomock.Any()).Return(false)
+ }
+ rcmgr.EXPECT().OpenConnection(expectedDir, expectFd, expectedAddr).Return(connScope, nil)
+
+ var allStreamsDone sync.WaitGroup
+ rcmgr.EXPECT().OpenStream(expectedPeer, gomock.Any()).AnyTimes().DoAndReturn(func(_ peer.ID, _ network.Direction) (network.StreamManagementScope, error) {
+ allStreamsDone.Add(1)
+ streamScope := mocknetwork.NewMockStreamManagementScope(ctrl)
+ // No need to track these memory reservations since we assert that Done is called
+ streamScope.EXPECT().ReserveMemory(gomock.Any(), gomock.Any()).AnyTimes()
+ streamScope.EXPECT().ReleaseMemory(gomock.Any()).AnyTimes()
+ streamScope.EXPECT().BeginSpan().AnyTimes().DoAndReturn(func() (network.ResourceScopeSpan, error) {
+ s := mocknetwork.NewMockResourceScopeSpan(ctrl)
+ s.EXPECT().BeginSpan().AnyTimes().Return(mocknetwork.NewMockResourceScopeSpan(ctrl), nil)
+ s.EXPECT().Done()
+ return s, nil
+ })
+
+ streamScope.EXPECT().SetService(gomock.Any()).MaxTimes(1)
+ streamScope.EXPECT().SetProtocol(gomock.Any())
+
+ streamScope.EXPECT().Done().Do(func() {
+ allStreamsDone.Done()
+ })
+ return streamScope, nil
+ })
+
+ require.NoError(t, dialer.Connect(context.Background(), peer.AddrInfo{
+ ID: listener.ID(),
+ Addrs: listener.Addrs(),
+ }))
+ // Wait for any in progress identifies to finish.
+ // We shouldn't have to do this, but basic host currently
+ // always does an identify.
+ <-dialer.(interface{ IDService() identify.IDService }).IDService().IdentifyWait(dialer.Network().ConnsToPeer(listener.ID())[0])
+ <-listener.(interface{ IDService() identify.IDService }).IDService().IdentifyWait(listener.Network().ConnsToPeer(dialer.ID())[0])
+ <-ping.Ping(context.Background(), dialer, listener.ID())
+ err := dialer.Network().ClosePeer(listener.ID())
+ require.NoError(t, err)
+
+ // Wait a bit for any pending .Adds before we call .Wait to avoid a data race.
+ // This shouldn't be necessary since it should be impossible
+ // for an OpenStream to happen *after* a ClosePeer, however
+ // in practice it does and leads to test flakiness.
+ time.Sleep(10 * time.Millisecond)
+ allStreamsDone.Wait()
+ dialer.Close()
+ listener.Close()
+ })
+ }
+ })
+ }
+}
diff --git a/p2p/test/transport/transport_test.go b/p2p/test/transport/transport_test.go
new file mode 100644
index 0000000000..f529488f05
--- /dev/null
+++ b/p2p/test/transport/transport_test.go
@@ -0,0 +1,1166 @@
+package transport_integration
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "net"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/config"
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ "github.com/libp2p/go-libp2p/core/event"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ "github.com/libp2p/go-libp2p/core/sec"
+ rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+ "go.uber.org/mock/gomock"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type TransportTestCase struct {
+ Name string
+ HostGenerator func(t *testing.T, opts TransportTestCaseOpts) host.Host
+}
+
+type TransportTestCaseOpts struct {
+ NoListen bool
+ NoRcmgr bool
+ ConnGater connmgr.ConnectionGater
+ ResourceManager network.ResourceManager
+}
+
+func transformOpts(opts TransportTestCaseOpts) []config.Option {
+ var libp2pOpts []libp2p.Option
+
+ if opts.NoRcmgr {
+ libp2pOpts = append(libp2pOpts, libp2p.ResourceManager(&network.NullResourceManager{}))
+ }
+ if opts.ConnGater != nil {
+ libp2pOpts = append(libp2pOpts, libp2p.ConnectionGater(opts.ConnGater))
+ }
+
+ if opts.ResourceManager != nil {
+ libp2pOpts = append(libp2pOpts, libp2p.ResourceManager(opts.ResourceManager))
+ }
+ return libp2pOpts
+}
+
+func selfSignedTLSConfig(t *testing.T) *tls.Config {
+ t.Helper()
+ priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+
+ notBefore := time.Now()
+ notAfter := notBefore.Add(365 * 24 * time.Hour)
+
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ require.NoError(t, err)
+
+ certTemplate := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ Organization: []string{"Test"},
+ },
+ NotBefore: notBefore,
+ NotAfter: notAfter,
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &priv.PublicKey, priv)
+ require.NoError(t, err)
+
+ cert := tls.Certificate{
+ Certificate: [][]byte{derBytes},
+ PrivateKey: priv,
+ }
+
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+ return tlsConfig
+}
+
+var transportsToTest = []TransportTestCase{
+ {
+ Name: "TCP / Noise / Yamux",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.Security(noise.ID, noise.New))
+ libp2pOpts = append(libp2pOpts, libp2p.Muxer(yamux.ID, yamux.DefaultTransport))
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "TCP / TLS / Yamux",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.Security(libp2ptls.ID, libp2ptls.New))
+ libp2pOpts = append(libp2pOpts, libp2p.Muxer(yamux.ID, yamux.DefaultTransport))
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "TCP-Shared / TLS / Yamux",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.ShareTCPListener())
+ libp2pOpts = append(libp2pOpts, libp2p.Security(libp2ptls.ID, libp2ptls.New))
+ libp2pOpts = append(libp2pOpts, libp2p.Muxer(yamux.ID, yamux.DefaultTransport))
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "TCP-Shared-WithMetrics / TLS / Yamux",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.ShareTCPListener())
+ libp2pOpts = append(libp2pOpts, libp2p.Security(libp2ptls.ID, libp2ptls.New))
+ libp2pOpts = append(libp2pOpts, libp2p.Muxer(yamux.ID, yamux.DefaultTransport))
+ libp2pOpts = append(libp2pOpts, libp2p.Transport(tcp.NewTCPTransport, tcp.WithMetrics()))
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "TCP-WithMetrics / TLS / Yamux",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.Security(libp2ptls.ID, libp2ptls.New))
+ libp2pOpts = append(libp2pOpts, libp2p.Muxer(yamux.ID, yamux.DefaultTransport))
+ libp2pOpts = append(libp2pOpts, libp2p.Transport(tcp.NewTCPTransport, tcp.WithMetrics()))
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebSocket-Shared",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.ShareTCPListener())
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0/ws"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebSocket-Secured-Shared",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.ShareTCPListener())
+ if opts.NoListen {
+ config := tls.Config{InsecureSkipVerify: true}
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs, libp2p.Transport(websocket.New, websocket.WithTLSClientConfig(&config)))
+ } else {
+ config := selfSignedTLSConfig(t)
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0/sni/localhost/tls/ws"), libp2p.Transport(websocket.New, websocket.WithTLSConfig(config)))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebSocket",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0/ws"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebSocket-Secured",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ if opts.NoListen {
+ config := tls.Config{InsecureSkipVerify: true}
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs, libp2p.Transport(websocket.New, websocket.WithTLSClientConfig(&config)))
+ } else {
+ config := selfSignedTLSConfig(t)
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0/sni/localhost/tls/ws"), libp2p.Transport(websocket.New, websocket.WithTLSConfig(config)))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "QUIC",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "QUIC-CustomReuse",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs, libp2p.QUICReuse(quicreuse.NewConnManager))
+ } else {
+ qr := libp2p.QUICReuse(quicreuse.NewConnManager)
+ if !opts.NoRcmgr && opts.ResourceManager != nil {
+ qr = libp2p.QUICReuse(
+ quicreuse.NewConnManager,
+ quicreuse.VerifySourceAddress(opts.ResourceManager.VerifySourceAddress))
+ }
+ libp2pOpts = append(libp2pOpts,
+ qr,
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1"),
+ )
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebTransport",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebTransport-CustomReuse",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs, libp2p.QUICReuse(quicreuse.NewConnManager))
+ } else {
+ qr := libp2p.QUICReuse(quicreuse.NewConnManager)
+ if !opts.NoRcmgr && opts.ResourceManager != nil {
+ qr = libp2p.QUICReuse(
+ quicreuse.NewConnManager,
+ quicreuse.VerifySourceAddress(opts.ResourceManager.VerifySourceAddress),
+ )
+ }
+ libp2pOpts = append(libp2pOpts,
+ qr,
+ libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"),
+ )
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+ {
+ Name: "WebRTC",
+ HostGenerator: func(t *testing.T, opts TransportTestCaseOpts) host.Host {
+ libp2pOpts := transformOpts(opts)
+ libp2pOpts = append(libp2pOpts, libp2p.Transport(libp2pwebrtc.New))
+ if opts.NoListen {
+ libp2pOpts = append(libp2pOpts, libp2p.NoListenAddrs)
+ } else {
+ libp2pOpts = append(libp2pOpts, libp2p.ListenAddrStrings("/ip4/127.0.0.1/udp/0/webrtc-direct"))
+ }
+ h, err := libp2p.New(libp2pOpts...)
+ require.NoError(t, err)
+ return h
+ },
+ },
+}
+
+func TestPing(t *testing.T) {
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ ctx := context.Background()
+ res := <-ping.Ping(ctx, h2, h1.ID())
+ require.NoError(t, res.Error)
+ })
+ }
+}
+
+func TestBigPing(t *testing.T) {
+ // 64k buffers
+ sendBuf := make([]byte, 64<<10)
+ recvBuf := make([]byte, 64<<10)
+ const totalSends = 64
+
+ // Fill with random bytes
+ _, err := rand.Read(sendBuf)
+ require.NoError(t, err)
+
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ h1.SetStreamHandler("/big-ping", func(s network.Stream) {
+ io.Copy(s, s)
+ s.Close()
+ })
+
+ errCh := make(chan error, 1)
+ allocs := testing.AllocsPerRun(10, func() {
+ s, err := h2.NewStream(context.Background(), h1.ID(), "/big-ping")
+ require.NoError(t, err)
+ defer s.Close()
+
+ go func() {
+ for i := 0; i < totalSends; i++ {
+ _, err := io.ReadFull(s, recvBuf)
+ if err != nil {
+ errCh <- err
+ return
+ }
+ if !bytes.Equal(sendBuf, recvBuf) {
+ errCh <- fmt.Errorf("received data does not match sent data")
+ }
+
+ }
+ _, err = s.Read([]byte{0})
+ errCh <- err
+ }()
+
+ for i := 0; i < totalSends; i++ {
+ s.Write(sendBuf)
+ }
+ s.CloseWrite()
+ require.ErrorIs(t, <-errCh, io.EOF)
+ })
+
+ if int(allocs) > (len(sendBuf)*totalSends)/4 {
+ t.Logf("Expected fewer allocs, got: %f", allocs)
+ }
+ })
+ }
+}
+
+// TestLotsOfDataManyStreams tests sending a lot of data on multiple streams.
+func TestLotsOfDataManyStreams(t *testing.T) {
+ // Skip on windows because of https://github.com/libp2p/go-libp2p/issues/2341
+ if runtime.GOOS == "windows" {
+ t.Skip("Skipping on windows because of https://github.com/libp2p/go-libp2p/issues/2341")
+ }
+
+ // 64k buffer
+ const bufSize = 64 << 10
+ sendBuf := [bufSize]byte{}
+ const totalStreams = 500
+ const parallel = 8
+ // Total sends are > 20MiB
+ require.Greater(t, len(sendBuf)*totalStreams, 20<<20)
+ t.Log("Total sends:", len(sendBuf)*totalStreams)
+
+ // Fill with random bytes
+ _, err := rand.Read(sendBuf[:])
+ require.NoError(t, err)
+
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+ start := time.Now()
+ defer func() {
+ t.Log("Total time:", time.Since(start))
+ }()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ h1.SetStreamHandler("/big-ping", func(s network.Stream) {
+ io.Copy(s, s)
+ s.Close()
+ })
+
+ sem := make(chan struct{}, parallel)
+ var wg sync.WaitGroup
+ for i := 0; i < totalStreams; i++ {
+ wg.Add(1)
+ sem <- struct{}{}
+ go func() {
+ defer wg.Done()
+ recvBuf := [bufSize]byte{}
+ defer func() { <-sem }()
+
+ s, err := h2.NewStream(context.Background(), h1.ID(), "/big-ping")
+ require.NoError(t, err)
+ defer s.Close()
+
+ _, err = s.Write(sendBuf[:])
+ require.NoError(t, err)
+ s.CloseWrite()
+
+ _, err = io.ReadFull(s, recvBuf[:])
+ require.NoError(t, err)
+ require.Equal(t, sendBuf, recvBuf)
+
+ _, err = s.Read([]byte{0})
+ require.ErrorIs(t, err, io.EOF)
+ }()
+ }
+
+ wg.Wait()
+ })
+ }
+}
+
+func TestManyStreams(t *testing.T) {
+ const streamCount = 128
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{NoRcmgr: true})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, NoRcmgr: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ h1.SetStreamHandler("echo", func(s network.Stream) {
+ io.Copy(s, s)
+ s.CloseWrite()
+ })
+
+ streams := make([]network.Stream, streamCount)
+ for i := 0; i < streamCount; i++ {
+ s, err := h2.NewStream(context.Background(), h1.ID(), "echo")
+ require.NoError(t, err)
+ streams[i] = s
+ }
+
+ wg := sync.WaitGroup{}
+ wg.Add(streamCount)
+ errCh := make(chan error, 1)
+ for _, s := range streams {
+ go func(s network.Stream) {
+ defer wg.Done()
+
+ s.Write([]byte("hello"))
+ s.CloseWrite()
+ b, err := io.ReadAll(s)
+ if err == nil {
+ if !bytes.Equal(b, []byte("hello")) {
+ err = fmt.Errorf("received data does not match sent data")
+ }
+ }
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(s)
+ }
+ wg.Wait()
+ close(errCh)
+
+ require.NoError(t, <-errCh)
+ for _, s := range streams {
+ require.NoError(t, s.Close())
+ }
+ })
+ }
+}
+
+// TestMoreStreamsThanOurLimits tests handling more streams than our and the
+// peer's resource limits. It spawns 1024 Go routines that try to open a stream
+// and send and receive data. If they encounter an error they'll try again after
+// a sleep. If the transport is well behaved, eventually all Go routines will
+// have sent and received a message.
+func TestMoreStreamsThanOurLimits(t *testing.T) {
+ const streamCount = 1024
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ if strings.Contains(tc.Name, "WebRTC") {
+ t.Skip("This test potentially exhausts the uint16 WebRTC stream ID space.")
+ }
+ listenerLimits := rcmgr.PartialLimitConfig{
+ PeerDefault: rcmgr.ResourceLimits{
+ Streams: 32,
+ StreamsInbound: 16,
+ StreamsOutbound: 16,
+ },
+ }
+ r, err := rcmgr.NewResourceManager(rcmgr.NewFixedLimiter(listenerLimits.Build(rcmgr.DefaultLimits.AutoScale())))
+ require.NoError(t, err)
+ listener := tc.HostGenerator(t, TransportTestCaseOpts{ResourceManager: r})
+ dialer := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true, NoRcmgr: true})
+ defer listener.Close()
+ defer dialer.Close()
+
+ require.NoError(t, dialer.Connect(context.Background(), peer.AddrInfo{
+ ID: listener.ID(),
+ Addrs: listener.Addrs(),
+ }))
+
+ var handledStreams atomic.Int32
+ var sawFirstErr atomic.Bool
+
+ workQueue := make(chan struct{}, streamCount)
+ for i := 0; i < streamCount; i++ {
+ workQueue <- struct{}{}
+ }
+ close(workQueue)
+
+ listener.SetStreamHandler("echo", func(s network.Stream) {
+ // Wait a bit so that we have more parallel streams open at the same time
+ time.Sleep(time.Millisecond * 10)
+ io.Copy(s, s)
+ s.Close()
+ })
+
+ wg := sync.WaitGroup{}
+ errCh := make(chan error, 1)
+ var completedStreams atomic.Int32
+
+ const maxWorkerCount = streamCount
+ workerCount := 4
+
+ var startWorker func(workerIdx int)
+ startWorker = func(workerIdx int) {
+ wg.Add(1)
+ defer wg.Done()
+ for {
+ _, ok := <-workQueue
+ if !ok {
+ return
+ }
+
+ // Inline function so we can use defer
+ func() {
+ var didErr bool
+ defer completedStreams.Add(1)
+ defer func() {
+ // Only the first worker adds more workers
+ if workerIdx == 0 && !didErr && !sawFirstErr.Load() {
+ nextWorkerCount := workerCount * 2
+ if nextWorkerCount < maxWorkerCount {
+ for i := workerCount; i < nextWorkerCount; i++ {
+ go startWorker(i)
+ }
+ workerCount = nextWorkerCount
+ }
+ }
+ }()
+
+ var s network.Stream
+ var err error
+ // maxRetries is an arbitrary retry amount if there's any error.
+ maxRetries := streamCount * 4
+ shouldRetry := func(_ error) bool {
+ didErr = true
+ sawFirstErr.Store(true)
+ maxRetries--
+ if maxRetries == 0 || len(errCh) > 0 {
+ select {
+ case errCh <- errors.New("max retries exceeded"):
+ default:
+ }
+ return false
+ }
+ return true
+ }
+
+ for {
+ s, err = dialer.NewStream(context.Background(), listener.ID(), "echo")
+ if err != nil {
+ if shouldRetry(err) {
+ time.Sleep(50 * time.Millisecond)
+ continue
+ }
+ t.Logf("opening stream failed: %v", err)
+ return
+ }
+ err = func(s network.Stream) error {
+ defer s.Close()
+ err = s.SetDeadline(time.Now().Add(100 * time.Millisecond))
+ if err != nil {
+ return err
+ }
+
+ _, err = s.Write([]byte("hello"))
+ if err != nil {
+ return err
+ }
+
+ err = s.CloseWrite()
+ if err != nil {
+ return err
+ }
+
+ b, err := io.ReadAll(s)
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(b, []byte("hello")) {
+ return errors.New("received data does not match sent data")
+ }
+ handledStreams.Add(1)
+
+ return nil
+ }(s)
+ if err != nil && shouldRetry(err) {
+ time.Sleep(50 * time.Millisecond)
+ continue
+ }
+ return
+ }
+ }()
+ }
+ }
+
+ // Create any initial parallel workers
+ for i := 1; i < workerCount; i++ {
+ go startWorker(i)
+ }
+
+ // Start the first worker
+ startWorker(0)
+
+ wg.Wait()
+ close(errCh)
+
+ require.NoError(t, <-errCh)
+ require.Equal(t, streamCount, int(handledStreams.Load()))
+ require.True(t, sawFirstErr.Load(), "Expected to see an error from the peer")
+ })
+ }
+}
+
+func TestListenerStreamResets(t *testing.T) {
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ h1.SetStreamHandler("reset", func(s network.Stream) {
+ s.Reset()
+ })
+
+ s, err := h2.NewStream(context.Background(), h1.ID(), "reset")
+ if err != nil {
+ require.ErrorIs(t, err, network.ErrReset)
+ return
+ }
+
+ _, err = s.Read([]byte{0})
+ require.ErrorIs(t, err, network.ErrReset)
+ })
+ }
+}
+
+func TestDialerStreamResets(t *testing.T) {
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ errCh := make(chan error, 1)
+ acceptedCh := make(chan struct{}, 1)
+ h1.SetStreamHandler("echo", func(s network.Stream) {
+ acceptedCh <- struct{}{}
+ _, err := io.Copy(s, s)
+ errCh <- err
+ })
+
+ s, err := h2.NewStream(context.Background(), h1.ID(), "echo")
+ require.NoError(t, err)
+ s.Write([]byte{})
+ <-acceptedCh
+ s.Reset()
+ require.ErrorIs(t, <-errCh, network.ErrReset)
+ })
+ }
+}
+
+func TestStreamReadDeadline(t *testing.T) {
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ require.NoError(t, h2.Connect(context.Background(), peer.AddrInfo{
+ ID: h1.ID(),
+ Addrs: h1.Addrs(),
+ }))
+
+ h1.SetStreamHandler("echo", func(s network.Stream) {
+ io.Copy(s, s)
+ })
+
+ s, err := h2.NewStream(context.Background(), h1.ID(), "echo")
+ require.NoError(t, err)
+ require.NoError(t, s.SetReadDeadline(time.Now().Add(100*time.Millisecond)))
+ _, err = s.Read([]byte{0})
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "deadline")
+ var nerr net.Error
+ require.ErrorAs(t, err, &nerr, "expected a net.Error")
+ require.True(t, nerr.Timeout(), "expected net.Error.Timeout() == true")
+ // now test that the stream is still usable
+ s.SetReadDeadline(time.Time{})
+ _, err = s.Write([]byte("foobar"))
+ require.NoError(t, err)
+ b := make([]byte, 6)
+ _, err = s.Read(b)
+ require.Equal(t, "foobar", string(b))
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestDiscoverPeerIDFromSecurityNegotiation(t *testing.T) {
+ // extracts the peerID of the dialed peer from the error
+ extractPeerIDFromError := func(inputErr error) (peer.ID, error) {
+ var dialErr *swarm.DialError
+ if !errors.As(inputErr, &dialErr) {
+ return "", inputErr
+ }
+ innerErr := dialErr.DialErrors[0].Cause
+
+ var peerIDMismatchErr sec.ErrPeerIDMismatch
+ if errors.As(innerErr, &peerIDMismatchErr) {
+ return peerIDMismatchErr.Actual, nil
+ }
+
+ return "", inputErr
+ }
+
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ h1 := tc.HostGenerator(t, TransportTestCaseOpts{})
+ h2 := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer h1.Close()
+ defer h2.Close()
+
+ // runs a test to verify we can extract the peer ID from a target with just its address
+ t.Helper()
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // Use a bogus peer ID so that when we connect to the target we get an error telling
+ // us the targets real peer ID
+ bogusPeerId, err := peer.Decode("QmadAdJ3f63JyNs65X7HHzqDwV53ynvCcKtNFvdNaz3nhk")
+ require.NoError(t, err, "the hard coded bogus peerID is invalid")
+
+ ai := &peer.AddrInfo{
+ ID: bogusPeerId,
+ Addrs: []ma.Multiaddr{h1.Addrs()[0]},
+ }
+
+ // Try connecting with the bogus peer ID
+ err = h2.Connect(ctx, *ai)
+ require.Error(t, err, "somehow we successfully connected to a bogus peerID!")
+
+ // Extract the actual peer ID from the error
+ newPeerId, err := extractPeerIDFromError(err)
+ require.NoError(t, err)
+ ai.ID = newPeerId
+ // Make sure the new ID is what we expected
+ require.Equal(t, h1.ID(), ai.ID)
+
+ // and just to double-check try connecting again to make sure it works
+ require.NoError(t, h2.Connect(ctx, *ai))
+ })
+ }
+}
+
+// TestCloseConnWhenBlocked tests that the server closes the connection when the rcmgr blocks it.
+func TestCloseConnWhenBlocked(t *testing.T) {
+ for _, tc := range transportsToTest {
+ // WebRTC doesn't have a connection when rcmgr blocks it, so there's nothing to close.
+ if tc.Name == "WebRTC" {
+ continue
+ }
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ mockRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ if matched, _ := regexp.MatchString(`^(QUIC|WebTransport)`, tc.Name); matched {
+ mockRcmgr.EXPECT().VerifySourceAddress(gomock.Any()).AnyTimes().Return(false)
+ // If the initial TLS ClientHello is split into two quic-go might call the transport multiple times to open a
+ // connection. This will only be called multiple times if the connection is rejected. If were were to accept
+ // the connection, this would have been called only once.
+ mockRcmgr.EXPECT().OpenConnection(network.DirInbound, gomock.Any(), gomock.Any()).Return(nil, errors.New("connection blocked")).AnyTimes()
+ } else {
+ mockRcmgr.EXPECT().OpenConnection(network.DirInbound, gomock.Any(), gomock.Any()).Return(nil, errors.New("connection blocked"))
+ }
+ mockRcmgr.EXPECT().Close().AnyTimes()
+
+ server := tc.HostGenerator(t, TransportTestCaseOpts{ResourceManager: mockRcmgr})
+ client := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer server.Close()
+ defer client.Close()
+
+ client.Peerstore().AddAddrs(server.ID(), server.Addrs(), peerstore.PermanentAddrTTL)
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ _, err := client.NewStream(ctx, server.ID(), ping.ID)
+ require.Error(t, err)
+ require.False(t, errors.Is(err, context.DeadlineExceeded), "expected error to be not be context deadline exceeded")
+ })
+ }
+}
+
+// TestConnDroppedWhenBlocked is similar to TestCloseConnWhenBlocked, but for
+// transports like WebRTC we don't have a connection when we block it. Instead
+// we just ignore the connection attempt. This tests that the client hits the
+// connection attempt deadline and neither server nor client see a successful
+// connection attempt
+func TestConnDroppedWhenBlocked(t *testing.T) {
+ for _, tc := range transportsToTest {
+ if tc.Name != "WebRTC" {
+ continue
+ }
+ t.Run(tc.Name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ mockRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ mockRcmgr.EXPECT().OpenConnection(network.DirInbound, gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(func(network.Direction, bool, ma.Multiaddr) (network.ConnManagementScope, error) {
+ // Block the connection
+ return nil, fmt.Errorf("connections blocked")
+ })
+ mockRcmgr.EXPECT().Close().AnyTimes()
+
+ server := tc.HostGenerator(t, TransportTestCaseOpts{ResourceManager: mockRcmgr})
+ client := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer server.Close()
+ defer client.Close()
+
+ serverSub, err := server.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+ clientSub, err := client.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged))
+ require.NoError(t, err)
+
+ client.Peerstore().AddAddrs(server.ID(), server.Addrs(), peerstore.PermanentAddrTTL)
+ ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+ defer cancel()
+ _, err = client.NewStream(ctx, server.ID(), ping.ID)
+ require.Error(t, err)
+ require.True(t, errors.Is(err, context.DeadlineExceeded), "The client should have hit the deadline when connecting")
+ select {
+ case <-serverSub.Out():
+ t.Fatal("expected no connected event. Connection should have failed")
+ case <-clientSub.Out():
+ t.Fatal("expected no connected event. Connection should have failed")
+ case <-time.After(time.Second):
+ }
+ })
+ }
+}
+
+// TestConnClosedWhenRemoteCloses tests that a connection is closed locally when it's closed by remote
+func TestConnClosedWhenRemoteCloses(t *testing.T) {
+ for _, tc := range transportsToTest {
+ t.Run(tc.Name, func(t *testing.T) {
+ server := tc.HostGenerator(t, TransportTestCaseOpts{})
+ client := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer server.Close()
+ defer client.Close()
+
+ client.Peerstore().AddAddrs(server.ID(), server.Addrs(), peerstore.PermanentAddrTTL)
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ err := client.Connect(ctx, peer.AddrInfo{ID: server.ID(), Addrs: server.Addrs()})
+ require.NoError(t, err)
+
+ require.Eventually(t, func() bool {
+ return server.Network().Connectedness(client.ID()) != network.NotConnected
+ }, 5*time.Second, 50*time.Millisecond)
+ for _, c := range client.Network().ConnsToPeer(server.ID()) {
+ c.Close()
+ }
+ require.Eventually(t, func() bool {
+ return server.Network().Connectedness(client.ID()) == network.NotConnected
+ }, 5*time.Second, 50*time.Millisecond)
+ })
+ }
+}
+
+func TestErrorCodes(t *testing.T) {
+ assertStreamErrors := func(s network.Stream, expectedError error) {
+ buf := make([]byte, 10)
+ _, err := s.Read(buf)
+ require.ErrorIs(t, err, expectedError)
+
+ _, err = s.Write(buf)
+ require.ErrorIs(t, err, expectedError)
+ }
+
+ for _, tc := range transportsToTest {
+ if strings.HasPrefix(tc.Name, "WebTransport") {
+ t.Skipf("skipping: %s, not implemented", tc.Name)
+ continue
+ }
+ t.Run(tc.Name, func(t *testing.T) {
+ server := tc.HostGenerator(t, TransportTestCaseOpts{})
+ client := tc.HostGenerator(t, TransportTestCaseOpts{NoListen: true})
+ defer server.Close()
+ defer client.Close()
+
+ client.Peerstore().AddAddrs(server.ID(), server.Addrs(), peerstore.PermanentAddrTTL)
+
+ // setup stream handler
+ remoteStreamQ := make(chan network.Stream)
+ server.SetStreamHandler("/test", func(s network.Stream) {
+ b := make([]byte, 10)
+ n, err := s.Read(b)
+ if !assert.NoError(t, err) {
+ return
+ }
+ _, err = s.Write(b[:n])
+ if !assert.NoError(t, err) {
+ return
+ }
+ remoteStreamQ <- s
+ })
+
+ // pingPong writes and reads "hello" on the stream
+ pingPong := func(s network.Stream) {
+ buf := []byte("hello")
+ _, err := s.Write(buf)
+ require.NoError(t, err)
+
+ _, err = s.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, buf, []byte("hello"))
+ }
+
+ t.Run("StreamResetWithError", func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ s, err := client.NewStream(ctx, server.ID(), "/test")
+ require.NoError(t, err)
+ pingPong(s)
+
+ remoteStream := <-remoteStreamQ
+ defer remoteStream.Reset()
+
+ err = s.ResetWithError(42)
+ require.NoError(t, err)
+ assertStreamErrors(s, &network.StreamError{
+ ErrorCode: 42,
+ Remote: false,
+ })
+
+ assertStreamErrors(remoteStream, &network.StreamError{
+ ErrorCode: 42,
+ Remote: true,
+ })
+ })
+ t.Run("StreamResetWithErrorByRemote", func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ s, err := client.NewStream(ctx, server.ID(), "/test")
+ require.NoError(t, err)
+ pingPong(s)
+
+ remoteStream := <-remoteStreamQ
+
+ err = remoteStream.ResetWithError(42)
+ require.NoError(t, err)
+
+ assertStreamErrors(s, &network.StreamError{
+ ErrorCode: 42,
+ Remote: true,
+ })
+
+ assertStreamErrors(remoteStream, &network.StreamError{
+ ErrorCode: 42,
+ Remote: false,
+ })
+ })
+
+ t.Run("StreamResetByConnCloseWithError", func(t *testing.T) {
+ if tc.Name == "WebRTC" {
+ t.Skipf("skipping: %s, not implemented", tc.Name)
+ return
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ s, err := client.NewStream(ctx, server.ID(), "/test")
+ require.NoError(t, err)
+ pingPong(s)
+
+ remoteStream := <-remoteStreamQ
+ defer remoteStream.Reset()
+
+ err = s.Conn().CloseWithError(42)
+ require.NoError(t, err)
+
+ assertStreamErrors(s, &network.ConnError{
+ ErrorCode: 42,
+ Remote: false,
+ })
+
+ assertStreamErrors(remoteStream, &network.ConnError{
+ ErrorCode: 42,
+ Remote: true,
+ })
+ })
+
+ t.Run("NewStreamErrorByConnCloseWithError", func(t *testing.T) {
+ if tc.Name == "WebRTC" {
+ t.Skipf("skipping: %s, not implemented", tc.Name)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ s, err := client.NewStream(ctx, server.ID(), "/test")
+ require.NoError(t, err)
+ pingPong(s)
+
+ err = s.Conn().CloseWithError(42)
+ require.NoError(t, err)
+
+ remoteStream := <-remoteStreamQ
+ defer remoteStream.Reset()
+
+ localErr := &network.ConnError{
+ ErrorCode: 42,
+ Remote: false,
+ }
+
+ remoteErr := &network.ConnError{
+ ErrorCode: 42,
+ Remote: true,
+ }
+
+ // assert these first to ensure that remote has closed the connection
+ assertStreamErrors(remoteStream, remoteErr)
+
+ _, err = s.Conn().NewStream(ctx)
+ require.ErrorIs(t, err, localErr)
+
+ _, err = remoteStream.Conn().NewStream(ctx)
+ require.ErrorIs(t, err, remoteErr)
+ })
+ })
+ }
+}
diff --git a/p2p/test/webtransport/webtransport_test.go b/p2p/test/webtransport/webtransport_test.go
new file mode 100644
index 0000000000..e9c612baac
--- /dev/null
+++ b/p2p/test/webtransport/webtransport_test.go
@@ -0,0 +1,53 @@
+package webtransport_test
+
+import (
+ "testing"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ "github.com/libp2p/go-libp2p"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/test"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func extractCertHashes(addr ma.Multiaddr) []string {
+ var certHashesStr []string
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ certHashesStr = append(certHashesStr, c.Value())
+ }
+ return true
+ })
+ return certHashesStr
+}
+
+func TestDeterministicCertsAfterReboot(t *testing.T) {
+ priv, _, err := test.RandTestKeyPair(ic.Ed25519, 256)
+ require.NoError(t, err)
+
+ cl := clock.NewMock()
+ // Move one year ahead to avoid edge cases around epoch
+ cl.Add(time.Hour * 24 * 365)
+ h, err := libp2p.New(libp2p.NoTransports, libp2p.Transport(libp2pwebtransport.New, libp2pwebtransport.WithClock(cl)), libp2p.Identity(priv))
+ require.NoError(t, err)
+ err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+
+ prevCerthashes := extractCertHashes(h.Addrs()[0])
+ h.Close()
+
+ h, err = libp2p.New(libp2p.NoTransports, libp2p.Transport(libp2pwebtransport.New, libp2pwebtransport.WithClock(cl)), libp2p.Identity(priv))
+ require.NoError(t, err)
+ defer h.Close()
+ err = h.Network().Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+
+ nextCertHashes := extractCertHashes(h.Addrs()[0])
+
+ for i := range prevCerthashes {
+ require.Equal(t, prevCerthashes[i], nextCertHashes[i])
+ }
+}
diff --git a/p2p/transport/quic/cmd/client/main.go b/p2p/transport/quic/cmd/client/main.go
new file mode 100644
index 0000000000..e9883d2a1d
--- /dev/null
+++ b/p2p/transport/quic/cmd/client/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ cmdlib "github.com/libp2p/go-libp2p/p2p/transport/quic/cmd/lib"
+)
+
+func main() {
+ if len(os.Args) != 3 {
+ fmt.Printf("Usage: %s ", os.Args[0])
+ return
+ }
+ if err := cmdlib.RunClient(os.Args[1], os.Args[2]); err != nil {
+ log.Fatal(err.Error())
+ }
+}
diff --git a/p2p/transport/quic/cmd/lib/lib.go b/p2p/transport/quic/cmd/lib/lib.go
new file mode 100644
index 0000000000..fd2a270af0
--- /dev/null
+++ b/p2p/transport/quic/cmd/lib/lib.go
@@ -0,0 +1,129 @@
+package cmdlib
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "log"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+func RunClient(raddr string, p string) error {
+ peerID, err := peer.Decode(p)
+ if err != nil {
+ return err
+ }
+ addr, err := ma.NewMultiaddr(raddr)
+ if err != nil {
+ return err
+ }
+ priv, _, err := ic.GenerateECDSAKeyPair(rand.Reader)
+ if err != nil {
+ return err
+ }
+
+ reuse, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ if err != nil {
+ return err
+ }
+ t, err := libp2pquic.NewTransport(priv, reuse, nil, nil, nil)
+ if err != nil {
+ return err
+ }
+
+ log.Printf("Dialing %s\n", addr.String())
+ conn, err := t.Dial(context.Background(), addr, peerID)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+ str, err := conn.OpenStream(context.Background())
+ if err != nil {
+ return err
+ }
+ defer str.Close()
+ const msg = "Hello world!"
+ log.Printf("Sending: %s\n", msg)
+ if _, err := str.Write([]byte(msg)); err != nil {
+ return err
+ }
+ if err := str.CloseWrite(); err != nil {
+ return err
+ }
+ data, err := io.ReadAll(str)
+ if err != nil {
+ return err
+ }
+ log.Printf("Received: %s\n", data)
+ return nil
+}
+
+func RunServer(port string, location chan peer.AddrInfo) error {
+ addr, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/udp/%s/quic-v1", port))
+ if err != nil {
+ return err
+ }
+ priv, _, err := ic.GenerateECDSAKeyPair(rand.Reader)
+ if err != nil {
+ return err
+ }
+ peerID, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ return err
+ }
+
+ reuse, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ if err != nil {
+ return err
+ }
+ t, err := libp2pquic.NewTransport(priv, reuse, nil, nil, nil)
+ if err != nil {
+ return err
+ }
+
+ ln, err := t.Listen(addr)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Listening. Now run: go run cmd/client/main.go %s %s\n", ln.Multiaddr(), peerID)
+ if location != nil {
+ location <- peer.AddrInfo{ID: peerID, Addrs: []ma.Multiaddr{ln.Multiaddr()}}
+ }
+ for {
+ conn, err := ln.Accept()
+ if err != nil {
+ return err
+ }
+ log.Printf("Accepted new connection from %s (%s)\n", conn.RemotePeer(), conn.RemoteMultiaddr())
+ go func() {
+ if err := handleConn(conn); err != nil {
+ log.Printf("handling conn failed: %s", err.Error())
+ }
+ }()
+ }
+}
+
+func handleConn(conn tpt.CapableConn) error {
+ str, err := conn.AcceptStream()
+ if err != nil {
+ return err
+ }
+ data, err := io.ReadAll(str)
+ if err != nil {
+ return err
+ }
+ log.Printf("Received: %s\n", data)
+ if _, err := str.Write(data); err != nil {
+ return err
+ }
+ return str.Close()
+}
diff --git a/p2p/transport/quic/cmd/lib/lib_test.go b/p2p/transport/quic/cmd/lib/lib_test.go
new file mode 100644
index 0000000000..2175874322
--- /dev/null
+++ b/p2p/transport/quic/cmd/lib/lib_test.go
@@ -0,0 +1,31 @@
+package cmdlib
+
+import (
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/multiformats/go-multiaddr"
+)
+
+func TestCmd(t *testing.T) {
+ serverLocation := make(chan peer.AddrInfo)
+ go RunServer("0", serverLocation)
+
+ l := <-serverLocation
+
+ ip, rest := multiaddr.SplitFirst(l.Addrs[0])
+ if ip.Protocol().Code == multiaddr.P_IP4 && ip.Value() == "0.0.0.0" {
+ // Windows can't dial to 0.0.0.0 so replace with localhost
+ var err error
+ c, err := multiaddr.NewComponent("ip4", "127.0.0.1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ip = c
+ }
+
+ err := RunClient(ip.Encapsulate(rest).String(), l.ID.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/p2p/transport/quic/cmd/server/main.go b/p2p/transport/quic/cmd/server/main.go
new file mode 100644
index 0000000000..c478d34b22
--- /dev/null
+++ b/p2p/transport/quic/cmd/server/main.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ cmdlib "github.com/libp2p/go-libp2p/p2p/transport/quic/cmd/lib"
+)
+
+func main() {
+ if len(os.Args) != 2 {
+ fmt.Printf("Usage: %s ", os.Args[0])
+ return
+ }
+ if err := cmdlib.RunServer(os.Args[1], nil); err != nil {
+ log.Fatal(err.Error())
+ }
+}
diff --git a/p2p/transport/quic/conn.go b/p2p/transport/quic/conn.go
new file mode 100644
index 0000000000..a8dba723f5
--- /dev/null
+++ b/p2p/transport/quic/conn.go
@@ -0,0 +1,104 @@
+package libp2pquic
+
+import (
+ "context"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+type conn struct {
+ quicConn *quic.Conn
+ transport *transport
+ scope network.ConnManagementScope
+
+ localPeer peer.ID
+ localMultiaddr ma.Multiaddr
+
+ remotePeerID peer.ID
+ remotePubKey ic.PubKey
+ remoteMultiaddr ma.Multiaddr
+}
+
+var _ tpt.CapableConn = &conn{}
+
+// Close closes the connection.
+// It must be called even if the peer closed the connection in order for
+// garbage collection to properly work in this package.
+func (c *conn) Close() error {
+ return c.closeWithError(0, "")
+}
+
+// CloseWithError closes the connection
+// It must be called even if the peer closed the connection in order for
+// garbage collection to properly work in this package.
+func (c *conn) CloseWithError(errCode network.ConnErrorCode) error {
+ return c.closeWithError(quic.ApplicationErrorCode(errCode), "")
+}
+
+func (c *conn) closeWithError(errCode quic.ApplicationErrorCode, errString string) error {
+ c.transport.removeConn(c.quicConn)
+ err := c.quicConn.CloseWithError(errCode, errString)
+ c.scope.Done()
+ return err
+}
+
+// IsClosed returns whether a connection is fully closed.
+func (c *conn) IsClosed() bool {
+ return c.quicConn.Context().Err() != nil
+}
+
+func (c *conn) allowWindowIncrease(size uint64) bool {
+ return c.scope.ReserveMemory(int(size), network.ReservationPriorityMedium) == nil
+}
+
+// OpenStream creates a new stream.
+func (c *conn) OpenStream(ctx context.Context) (network.MuxedStream, error) {
+ qstr, err := c.quicConn.OpenStreamSync(ctx)
+ if err != nil {
+ return nil, parseStreamError(err)
+ }
+ return &stream{Stream: qstr}, nil
+}
+
+// AcceptStream accepts a stream opened by the other side.
+func (c *conn) AcceptStream() (network.MuxedStream, error) {
+ qstr, err := c.quicConn.AcceptStream(context.Background())
+ if err != nil {
+ return nil, parseStreamError(err)
+ }
+ return &stream{Stream: qstr}, nil
+}
+
+// LocalPeer returns our peer ID
+func (c *conn) LocalPeer() peer.ID { return c.localPeer }
+
+// RemotePeer returns the peer ID of the remote peer.
+func (c *conn) RemotePeer() peer.ID { return c.remotePeerID }
+
+// RemotePublicKey returns the public key of the remote peer.
+func (c *conn) RemotePublicKey() ic.PubKey { return c.remotePubKey }
+
+// LocalMultiaddr returns the local Multiaddr associated
+func (c *conn) LocalMultiaddr() ma.Multiaddr { return c.localMultiaddr }
+
+// RemoteMultiaddr returns the remote Multiaddr associated
+func (c *conn) RemoteMultiaddr() ma.Multiaddr { return c.remoteMultiaddr }
+
+func (c *conn) Transport() tpt.Transport { return c.transport }
+
+func (c *conn) Scope() network.ConnScope { return c.scope }
+
+// ConnState is the state of security connection.
+func (c *conn) ConnState() network.ConnectionState {
+ t := "quic-v1"
+ if _, err := c.LocalMultiaddr().ValueForProtocol(ma.P_QUIC); err == nil {
+ t = "quic"
+ }
+ return network.ConnectionState{Transport: t}
+}
diff --git a/p2p/transport/quic/conn_test.go b/p2p/transport/quic/conn_test.go
new file mode 100644
index 0000000000..703255a8b4
--- /dev/null
+++ b/p2p/transport/quic/conn_test.go
@@ -0,0 +1,732 @@
+package libp2pquic
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "errors"
+ "io"
+ mrand "math/rand"
+ "net"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+ quicproxy "github.com/quic-go/quic-go/integrationtests/tools/proxy"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package libp2pquic -destination mock_connection_gater_test.go github.com/libp2p/go-libp2p/core/connmgr ConnectionGater && go run golang.org/x/tools/cmd/goimports -w mock_connection_gater_test.go"
+
+type connTestCase struct {
+ Name string
+ Options []quicreuse.Option
+}
+
+var connTestCases = []*connTestCase{
+ {"reuseport_on", []quicreuse.Option{}},
+ {"reuseport_off", []quicreuse.Option{quicreuse.DisableReuseport()}},
+}
+
+func createPeer(t *testing.T) (peer.ID, ic.PrivKey) {
+ var priv ic.PrivKey
+ var err error
+ switch mrand.Int() % 4 {
+ case 0:
+ priv, _, err = ic.GenerateECDSAKeyPair(rand.Reader)
+ case 1:
+ priv, _, err = ic.GenerateRSAKeyPair(2048, rand.Reader)
+ case 2:
+ priv, _, err = ic.GenerateEd25519Key(rand.Reader)
+ case 3:
+ priv, _, err = ic.GenerateSecp256k1Key(rand.Reader)
+ }
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ t.Logf("using a %s key: %s", priv.Type(), id)
+ return id, priv
+}
+
+func runServer(t *testing.T, tr tpt.Transport, addr string) tpt.Listener {
+ t.Helper()
+
+ ln, err := tr.Listen(ma.StringCast(addr))
+ require.NoError(t, err)
+ return ln
+}
+
+func newConnManager(t *testing.T, opts ...quicreuse.Option) *quicreuse.ConnManager {
+ t.Helper()
+ cm, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, opts...)
+ require.NoError(t, err)
+ t.Cleanup(func() { cm.Close() })
+ return cm
+}
+
+func TestHandshake(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testHandshake(t, tc)
+ })
+ }
+}
+
+func testHandshake(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ clientID, clientKey := createPeer(t)
+
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+
+ handshake := func(t *testing.T, ln tpt.Listener) {
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ serverConn, err := ln.Accept()
+ require.NoError(t, err)
+ defer serverConn.Close()
+
+ require.Equal(t, conn.LocalPeer(), clientID)
+ require.Equal(t, conn.RemotePeer(), serverID)
+ require.True(t, conn.RemotePublicKey().Equals(serverKey.GetPublic()), "remote public key doesn't match")
+
+ require.Equal(t, serverConn.LocalPeer(), serverID)
+ require.Equal(t, serverConn.RemotePeer(), clientID)
+ require.True(t, serverConn.RemotePublicKey().Equals(clientKey.GetPublic()), "remote public key doesn't match")
+ }
+
+ t.Run("on IPv4", func(t *testing.T) {
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln.Close()
+ handshake(t, ln)
+ })
+
+ t.Run("on IPv6", func(t *testing.T) {
+ ln := runServer(t, serverTransport, "/ip6/::1/udp/0/quic-v1")
+ defer ln.Close()
+ handshake(t, ln)
+ })
+}
+
+func TestResourceManagerSuccess(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testResourceManagerSuccess(t, tc)
+ })
+ }
+}
+
+func testResourceManagerSuccess(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ clientID, clientKey := createPeer(t)
+
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ serverRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, serverRcmgr)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln, err := serverTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ clientRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, clientRcmgr)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+
+ connChan := make(chan tpt.CapableConn)
+ serverConnScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ go func() {
+ serverRcmgr.EXPECT().OpenConnection(network.DirInbound, false, gomock.Not(ln.Multiaddr())).Return(serverConnScope, nil)
+ serverConnScope.EXPECT().SetPeer(clientID)
+ serverConn, err := ln.Accept()
+ require.NoError(t, err)
+ connChan <- serverConn
+ }()
+
+ connScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ clientRcmgr.EXPECT().OpenConnection(network.DirOutbound, false, ln.Multiaddr()).Return(connScope, nil)
+ connScope.EXPECT().SetPeer(serverID)
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ serverConn := <-connChan
+ t.Log("received conn")
+ connScope.EXPECT().Done().MinTimes(1) // for dialed connections, we might call Done multiple times
+ conn.Close()
+ serverConnScope.EXPECT().Done()
+ serverConn.Close()
+}
+
+func TestResourceManagerDialDenied(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testResourceManagerDialDenied(t, tc)
+ })
+ }
+}
+
+func testResourceManagerDialDenied(t *testing.T, tc *connTestCase) {
+ _, clientKey := createPeer(t)
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, rcmgr)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+
+ connScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ target := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1")
+
+ rcmgr.EXPECT().OpenConnection(network.DirOutbound, false, target).Return(connScope, nil)
+ rerr := errors.New("nope")
+ p := peer.ID("server")
+ connScope.EXPECT().SetPeer(p).Return(rerr)
+ connScope.EXPECT().Done()
+
+ _, err = clientTransport.Dial(context.Background(), target, p)
+ require.ErrorIs(t, err, rerr)
+
+}
+
+func TestResourceManagerAcceptDenied(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testResourceManagerAcceptDenied(t, tc)
+ })
+ }
+}
+
+func testResourceManagerAcceptDenied(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ clientID, clientKey := createPeer(t)
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ clientRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, clientRcmgr)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+
+ serverRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ serverConnScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ rerr := errors.New("denied")
+ gomock.InOrder(
+ serverRcmgr.EXPECT().OpenConnection(network.DirInbound, false, gomock.Any()).Return(serverConnScope, nil),
+ serverConnScope.EXPECT().SetPeer(clientID).Return(rerr),
+ serverConnScope.EXPECT().Done(),
+ )
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, serverRcmgr)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln, err := serverTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ defer ln.Close()
+ connChan := make(chan tpt.CapableConn)
+ go func() {
+ ln.Accept()
+ close(connChan)
+ }()
+
+ clientConnScope := mocknetwork.NewMockConnManagementScope(ctrl)
+ clientRcmgr.EXPECT().OpenConnection(network.DirOutbound, false, ln.Multiaddr()).Return(clientConnScope, nil)
+ clientConnScope.EXPECT().SetPeer(serverID)
+ // In rare instances, the connection gating error will already occur on Dial.
+ // In that case, Done is called on the connection scope.
+ clientConnScope.EXPECT().Done().MaxTimes(1)
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ // In rare instances, the connection gating error will already occur on Dial.
+ if err == nil {
+ _, err = conn.AcceptStream()
+ require.Error(t, err)
+ }
+ select {
+ case <-connChan:
+ t.Fatal("didn't expect to accept a connection")
+ default:
+ }
+}
+
+func TestStreams(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testStreams(t, tc)
+ })
+ t.Run(tc.Name, func(t *testing.T) {
+ testStreamsErrorCode(t, tc)
+ })
+ }
+}
+
+func testStreams(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ _, clientKey := createPeer(t)
+
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln.Close()
+
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ serverConn, err := ln.Accept()
+ require.NoError(t, err)
+ defer serverConn.Close()
+
+ str, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ _, err = str.Write([]byte("foobar"))
+ require.NoError(t, err)
+ str.Close()
+ sstr, err := serverConn.AcceptStream()
+ require.NoError(t, err)
+ data, err := io.ReadAll(sstr)
+ require.NoError(t, err)
+ require.Equal(t, data, []byte("foobar"))
+}
+
+func testStreamsErrorCode(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ _, clientKey := createPeer(t)
+
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln.Close()
+
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ serverConn, err := ln.Accept()
+ require.NoError(t, err)
+ defer serverConn.Close()
+
+ str, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ err = str.ResetWithError(42)
+ require.NoError(t, err)
+
+ sstr, err := serverConn.AcceptStream()
+ require.NoError(t, err)
+ _, err = io.ReadAll(sstr)
+ require.Error(t, err)
+ se := &network.StreamError{}
+ if errors.As(err, &se) {
+ require.Equal(t, se.ErrorCode, network.StreamErrorCode(42))
+ require.True(t, se.Remote)
+ } else {
+ t.Fatalf("expected error to be of network.StreamError type, got %T, %v", err, err)
+ }
+
+}
+
+func TestHandshakeFailPeerIDMismatch(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testHandshakeFailPeerIDMismatch(t, tc)
+ })
+ }
+}
+
+func testHandshakeFailPeerIDMismatch(t *testing.T, tc *connTestCase) {
+ _, serverKey := createPeer(t)
+ _, clientKey := createPeer(t)
+ thirdPartyID, _ := createPeer(t)
+
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ // dial, but expect the wrong peer ID
+ _, err = clientTransport.Dial(context.Background(), ln.Multiaddr(), thirdPartyID)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "CRYPTO_ERROR")
+ defer clientTransport.(io.Closer).Close()
+
+ acceptErr := make(chan error)
+ go func() {
+ _, err := ln.Accept()
+ acceptErr <- err
+ }()
+
+ select {
+ case <-acceptErr:
+ t.Fatal("didn't expect Accept to return before being closed")
+ case <-time.After(100 * time.Millisecond):
+ }
+
+ require.NoError(t, ln.Close())
+ require.Error(t, <-acceptErr)
+}
+
+func TestConnectionGating(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testConnectionGating(t, tc)
+ })
+ }
+}
+
+func testConnectionGating(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ _, clientKey := createPeer(t)
+
+ mockCtrl := gomock.NewController(t)
+ defer mockCtrl.Finish()
+ cg := NewMockConnectionGater(mockCtrl)
+
+ t.Run("accepted connections", func(t *testing.T) {
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, cg, nil)
+ defer serverTransport.(io.Closer).Close()
+ require.NoError(t, err)
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln.Close()
+
+ cg.EXPECT().InterceptAccept(gomock.Any())
+
+ accepted := make(chan struct{})
+ go func() {
+ defer close(accepted)
+ _, err := ln.Accept()
+ require.NoError(t, err)
+ }()
+
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+ // make sure that connection attempts fails
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ // In rare instances, the connection gating error will already occur on Dial.
+ // In most cases, it will be returned by AcceptStream.
+ if err == nil {
+ _, err = conn.AcceptStream()
+ }
+ require.Contains(t, err.Error(), "connection gated")
+
+ // now allow the address and make sure the connection goes through
+ cg.EXPECT().InterceptAccept(gomock.Any()).Return(true)
+ cg.EXPECT().InterceptSecured(gomock.Any(), gomock.Any(), gomock.Any()).Return(true)
+ conn, err = clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ require.Eventually(t, func() bool {
+ select {
+ case <-accepted:
+ return true
+ default:
+ return false
+ }
+ }, time.Second, 10*time.Millisecond)
+ })
+
+ t.Run("secured connections", func(t *testing.T) {
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln.Close()
+
+ cg := NewMockConnectionGater(mockCtrl)
+ cg.EXPECT().InterceptSecured(gomock.Any(), gomock.Any(), gomock.Any())
+
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, cg, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+
+ // make sure that connection attempts fails
+ _, err = clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "connection gated")
+
+ // now allow the peerId and make sure the connection goes through
+ cg.EXPECT().InterceptSecured(gomock.Any(), gomock.Any(), gomock.Any()).Return(true)
+ conn, err := clientTransport.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ conn.Close()
+ })
+}
+
+func TestDialTwo(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testDialTwo(t, tc)
+ })
+ }
+}
+
+func testDialTwo(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ _, clientKey := createPeer(t)
+ serverID2, serverKey2 := createPeer(t)
+
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln1 := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln1.Close()
+ serverTransport2, err := NewTransport(serverKey2, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport2.(io.Closer).Close()
+ ln2 := runServer(t, serverTransport2, "/ip4/127.0.0.1/udp/0/quic-v1")
+ defer ln2.Close()
+
+ data := bytes.Repeat([]byte{'a'}, 5*1<<20) // 5 MB
+ // wait for both servers to accept a connection
+ // then send some data
+ go func() {
+ serverConn1, err := ln1.Accept()
+ require.NoError(t, err)
+ serverConn2, err := ln2.Accept()
+ require.NoError(t, err)
+
+ for _, c := range []tpt.CapableConn{serverConn1, serverConn2} {
+ go func(conn tpt.CapableConn) {
+ str, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ defer str.Close()
+ _, err = str.Write(data)
+ require.NoError(t, err)
+ }(c)
+ }
+ }()
+
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+ c1, err := clientTransport.Dial(context.Background(), ln1.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer c1.Close()
+ c2, err := clientTransport.Dial(context.Background(), ln2.Multiaddr(), serverID2)
+ require.NoError(t, err)
+ defer c2.Close()
+
+ done := make(chan struct{}, 2)
+ // receive the data on both connections at the same time
+ for _, c := range []tpt.CapableConn{c1, c2} {
+ go func(conn tpt.CapableConn) {
+ str, err := conn.AcceptStream()
+ require.NoError(t, err)
+ str.CloseWrite()
+ d, err := io.ReadAll(str)
+ require.NoError(t, err)
+ require.Equal(t, d, data)
+ done <- struct{}{}
+ }(c)
+ }
+
+ for i := 0; i < 2; i++ {
+ require.Eventually(t, func() bool {
+ select {
+ case <-done:
+ return true
+ default:
+ return false
+ }
+ }, 15*time.Second, 50*time.Millisecond)
+ }
+}
+
+func TestStatelessReset(t *testing.T) {
+ for _, tc := range connTestCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ testStatelessReset(t, tc)
+ })
+ }
+}
+
+func newUDPConnLocalhost(t testing.TB, port int) (*net.UDPConn, func()) {
+ t.Helper()
+ conn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: port})
+ require.NoError(t, err)
+ return conn, func() { conn.Close() }
+}
+
+func testStatelessReset(t *testing.T, tc *connTestCase) {
+ serverID, serverKey := createPeer(t)
+ _, clientKey := createPeer(t)
+
+ serverTransport, err := NewTransport(serverKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer serverTransport.(io.Closer).Close()
+ ln := runServer(t, serverTransport, "/ip4/127.0.0.1/udp/0/quic-v1")
+
+ var drop uint32
+ dropCallback := func(quicproxy.Direction, net.Addr, net.Addr, []byte) bool { return atomic.LoadUint32(&drop) > 0 }
+ proxyConn, cleanup := newUDPConnLocalhost(t, 0)
+ proxy := quicproxy.Proxy{
+ Conn: proxyConn,
+ ServerAddr: ln.Addr().(*net.UDPAddr),
+ DropPacket: dropCallback,
+ }
+ err = proxy.Start()
+ require.NoError(t, err)
+
+ // establish a connection
+ clientTransport, err := NewTransport(clientKey, newConnManager(t, tc.Options...), nil, nil, nil)
+ require.NoError(t, err)
+ defer clientTransport.(io.Closer).Close()
+ proxyAddr, err := quicreuse.ToQuicMultiaddr(proxy.LocalAddr(), quic.Version1)
+ require.NoError(t, err)
+ conn, err := clientTransport.Dial(context.Background(), proxyAddr, serverID)
+ require.NoError(t, err)
+ connChan := make(chan tpt.CapableConn)
+ go func() {
+ conn, err := ln.Accept()
+ require.NoError(t, err)
+ str, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ _, err = conn.LocalMultiaddr().ValueForProtocol(ma.P_QUIC_V1)
+ require.NoError(t, err)
+ str.Write([]byte("foobar"))
+ connChan <- conn
+ }()
+
+ str, err := conn.AcceptStream()
+ require.NoError(t, err)
+ _, err = str.Read(make([]byte, 6))
+ require.NoError(t, err)
+
+ // Stop forwarding packets and close the server.
+ // This prevents the CONNECTION_CLOSE from reaching the client.
+ atomic.StoreUint32(&drop, 1)
+ ln.Close()
+ (<-connChan).Close()
+ proxyLocalPort := proxy.LocalAddr().(*net.UDPAddr).Port
+ proxy.Close()
+ cleanup()
+
+ // Start another listener (on a different port).
+ ln, err = serverTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ defer ln.Close()
+ // Now that the new server is up, re-enable packet forwarding.
+ atomic.StoreUint32(&drop, 0)
+
+ proxyConn, cleanup = newUDPConnLocalhost(t, proxyLocalPort)
+ defer cleanup()
+ // Recreate the proxy, such that its client-facing port stays constant.
+ proxyBis := quicproxy.Proxy{
+ Conn: proxyConn,
+ ServerAddr: ln.Addr().(*net.UDPAddr),
+ DropPacket: dropCallback,
+ }
+ err = proxyBis.Start()
+ require.NoError(t, err)
+ defer proxyBis.Close()
+
+ // Trigger something (not too small) to be sent, so that we receive the stateless reset.
+ // The new server doesn't have any state for the previously established connection.
+ // We expect it to send a stateless reset.
+ _, rerr := str.Write([]byte("Lorem ipsum dolor sit amet."))
+ if rerr == nil {
+ _, rerr = str.Read([]byte{0, 0})
+ }
+ require.Error(t, rerr)
+ var statelessResetErr *quic.StatelessResetError
+ require.ErrorAs(t, rerr, &statelessResetErr)
+}
+
+// Hole punching is only expected to work with reuseport enabled.
+// We don't need to test `DisableReuseport` option.
+func TestHolePunching(t *testing.T) {
+ serverID, serverKey := createPeer(t)
+ clientID, clientKey := createPeer(t)
+
+ t1, err := NewTransport(serverKey, newConnManager(t), nil, nil, nil)
+ require.NoError(t, err)
+ defer t1.(io.Closer).Close()
+ laddr, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic-v1")
+ require.NoError(t, err)
+ ln1, err := t1.Listen(laddr)
+ require.NoError(t, err)
+ done1 := make(chan struct{})
+ go func() {
+ defer close(done1)
+ _, err := ln1.Accept()
+ require.Error(t, err, "didn't expect to accept any connections")
+ }()
+
+ t2, err := NewTransport(clientKey, newConnManager(t), nil, nil, nil)
+ require.NoError(t, err)
+ defer t2.(io.Closer).Close()
+ ln2, err := t2.Listen(laddr)
+ require.NoError(t, err)
+ done2 := make(chan struct{})
+ go func() {
+ defer close(done2)
+ _, err := ln2.Accept()
+ require.Error(t, err, "didn't expect to accept any connections")
+ }()
+ connChan := make(chan tpt.CapableConn)
+ go func() {
+ conn, err := t2.Dial(
+ network.WithSimultaneousConnect(context.Background(), false, ""),
+ ln1.Multiaddr(),
+ serverID,
+ )
+ require.NoError(t, err)
+ connChan <- conn
+ }()
+ // Make sure the server role (the dial on t2) has progressed far enough.
+ // If it hasn't created the hole punch map entry, the connection will be accepted as a regular connection,
+ // which would make this test fail.
+ require.Eventually(t, func() bool {
+ tr := t2.(*transport)
+ tr.holePunchingMx.Lock()
+ defer tr.holePunchingMx.Unlock()
+ return len(tr.holePunching) > 0
+ }, time.Second, 10*time.Millisecond)
+
+ conn1, err := t1.Dial(
+ network.WithSimultaneousConnect(context.Background(), true, ""),
+ ln2.Multiaddr(),
+ clientID,
+ )
+ require.NoError(t, err)
+ defer conn1.Close()
+ require.Equal(t, conn1.RemotePeer(), clientID)
+ var conn2 tpt.CapableConn
+ require.Eventually(t, func() bool {
+ select {
+ case conn2 = <-connChan:
+ return true
+ default:
+ return false
+ }
+ }, time.Second, 10*time.Millisecond)
+ defer conn2.Close()
+ require.Equal(t, conn2.RemotePeer(), serverID)
+ ln1.Close()
+ ln2.Close()
+ <-done1
+ <-done2
+}
diff --git a/p2p/transport/quic/listener.go b/p2p/transport/quic/listener.go
new file mode 100644
index 0000000000..009d33cfd3
--- /dev/null
+++ b/p2p/transport/quic/listener.go
@@ -0,0 +1,158 @@
+package libp2pquic
+
+import (
+ "context"
+ "errors"
+ "net"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ p2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+// A listener listens for QUIC connections.
+type listener struct {
+ reuseListener quicreuse.Listener
+ transport *transport
+ rcmgr network.ResourceManager
+ privKey ic.PrivKey
+ localPeer peer.ID
+ localMultiaddrs map[quic.Version]ma.Multiaddr
+}
+
+func newListener(ln quicreuse.Listener, t *transport, localPeer peer.ID, key ic.PrivKey, rcmgr network.ResourceManager) (listener, error) {
+ localMultiaddrs := make(map[quic.Version]ma.Multiaddr)
+ for _, addr := range ln.Multiaddrs() {
+ if _, err := addr.ValueForProtocol(ma.P_QUIC_V1); err == nil {
+ localMultiaddrs[quic.Version1] = addr
+ }
+ }
+
+ return listener{
+ reuseListener: ln,
+ transport: t,
+ rcmgr: rcmgr,
+ privKey: key,
+ localPeer: localPeer,
+ localMultiaddrs: localMultiaddrs,
+ }, nil
+}
+
+// Accept accepts new connections.
+func (l *listener) Accept() (tpt.CapableConn, error) {
+ for {
+ qconn, err := l.reuseListener.Accept(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ c, err := l.wrapConn(qconn)
+ if err != nil {
+ log.Debug("failed to setup connection", "err", err)
+ qconn.CloseWithError(quic.ApplicationErrorCode(network.ConnResourceLimitExceeded), "")
+ continue
+ }
+ l.transport.addConn(qconn, c)
+ if l.transport.gater != nil && !(l.transport.gater.InterceptAccept(c) && l.transport.gater.InterceptSecured(network.DirInbound, c.remotePeerID, c)) {
+ c.closeWithError(quic.ApplicationErrorCode(network.ConnGated), "connection gated")
+ continue
+ }
+
+ // return through active hole punching if any
+ key := holePunchKey{addr: qconn.RemoteAddr().String(), peer: c.remotePeerID}
+ var wasHolePunch bool
+ l.transport.holePunchingMx.Lock()
+ holePunch, ok := l.transport.holePunching[key]
+ if ok && !holePunch.fulfilled {
+ holePunch.connCh <- c
+ wasHolePunch = true
+ holePunch.fulfilled = true
+ }
+ l.transport.holePunchingMx.Unlock()
+ if wasHolePunch {
+ continue
+ }
+ return c, nil
+ }
+}
+
+// wrapConn wraps a QUIC connection into a libp2p [tpt.CapableConn].
+// If wrapping fails. The caller is responsible for cleaning up the
+// connection.
+func (l *listener) wrapConn(qconn *quic.Conn) (*conn, error) {
+ remoteMultiaddr, err := quicreuse.ToQuicMultiaddr(qconn.RemoteAddr(), qconn.ConnectionState().Version)
+ if err != nil {
+ return nil, err
+ }
+ connScope, err := network.UnwrapConnManagementScope(qconn.Context())
+ if err != nil {
+ connScope = nil
+ // Don't error here.
+ // Setup scope if we don't have scope from quicreuse.
+ // This is better than failing so that users that don't use quicreuse.ConnContext option with the resource
+ // manager work correctly.
+ }
+ if connScope == nil {
+ connScope, err = l.rcmgr.OpenConnection(network.DirInbound, false, remoteMultiaddr)
+ if err != nil {
+ log.Debug("resource manager blocked incoming connection", "addr", qconn.RemoteAddr(), "err", err)
+ return nil, err
+ }
+ }
+ c, err := l.wrapConnWithScope(qconn, connScope, remoteMultiaddr)
+ if err != nil {
+ connScope.Done()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func (l *listener) wrapConnWithScope(qconn *quic.Conn, connScope network.ConnManagementScope, remoteMultiaddr ma.Multiaddr) (*conn, error) {
+ // The tls.Config used to establish this connection already verified the certificate chain.
+ // Since we don't have any way of knowing which tls.Config was used though,
+ // we have to re-determine the peer's identity here.
+ // Therefore, this is expected to never fail.
+ remotePubKey, err := p2ptls.PubKeyFromCertChain(qconn.ConnectionState().TLS.PeerCertificates)
+ if err != nil {
+ return nil, err
+ }
+ remotePeerID, err := peer.IDFromPublicKey(remotePubKey)
+ if err != nil {
+ return nil, err
+ }
+ if err := connScope.SetPeer(remotePeerID); err != nil {
+ log.Debug("resource manager blocked incoming connection for peer", "peer", remotePeerID, "addr", qconn.RemoteAddr(), "err", err)
+ return nil, err
+ }
+
+ localMultiaddr, found := l.localMultiaddrs[qconn.ConnectionState().Version]
+ if !found {
+ return nil, errors.New("unknown QUIC version:" + qconn.ConnectionState().Version.String())
+ }
+
+ return &conn{
+ quicConn: qconn,
+ transport: l.transport,
+ scope: connScope,
+ localPeer: l.localPeer,
+ localMultiaddr: localMultiaddr,
+ remoteMultiaddr: remoteMultiaddr,
+ remotePeerID: remotePeerID,
+ remotePubKey: remotePubKey,
+ }, nil
+}
+
+// Close closes the listener.
+func (l *listener) Close() error {
+ return l.reuseListener.Close()
+}
+
+// Addr returns the address of this listener.
+func (l *listener) Addr() net.Addr {
+ return l.reuseListener.Addr()
+}
diff --git a/p2p/transport/quic/listener_test.go b/p2p/transport/quic/listener_test.go
new file mode 100644
index 0000000000..53d6001d35
--- /dev/null
+++ b/p2p/transport/quic/listener_test.go
@@ -0,0 +1,169 @@
+package libp2pquic
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "testing"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/quic-go/quic-go"
+ "go.uber.org/mock/gomock"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func newTransport(t *testing.T, rcmgr network.ResourceManager) tpt.Transport {
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ require.NoError(t, err)
+ key, err := ic.UnmarshalRsaPrivateKey(x509.MarshalPKCS1PrivateKey(rsaKey))
+ require.NoError(t, err)
+ tr, err := NewTransport(key, newConnManager(t), nil, nil, rcmgr)
+ require.NoError(t, err)
+ return tr
+}
+
+func TestListenAddr(t *testing.T) {
+ tr := newTransport(t, nil)
+ defer tr.(io.Closer).Close()
+
+ t.Run("for IPv4", func(t *testing.T) {
+ localAddrV1 := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")
+ ln, err := tr.Listen(localAddrV1)
+ require.NoError(t, err)
+ defer ln.Close()
+ port := ln.Addr().(*net.UDPAddr).Port
+ require.NotZero(t, port)
+
+ var multiaddrsStrings []string
+ for _, a := range []ma.Multiaddr{ln.Multiaddr()} {
+ multiaddrsStrings = append(multiaddrsStrings, a.String())
+ }
+ require.Contains(t, multiaddrsStrings, fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", port))
+ })
+
+ t.Run("for IPv6", func(t *testing.T) {
+ localAddrV1 := ma.StringCast("/ip6/::/udp/0/quic-v1")
+ ln, err := tr.Listen(localAddrV1)
+ require.NoError(t, err)
+ defer ln.Close()
+ port := ln.Addr().(*net.UDPAddr).Port
+ require.NotZero(t, port)
+ var multiaddrsStrings []string
+ for _, a := range []ma.Multiaddr{ln.Multiaddr()} {
+ multiaddrsStrings = append(multiaddrsStrings, a.String())
+ }
+ require.Contains(t, multiaddrsStrings, fmt.Sprintf("/ip6/::/udp/%d/quic-v1", port))
+ })
+}
+
+func TestAccepting(t *testing.T) {
+ tr := newTransport(t, nil)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ done := make(chan struct{})
+ go func() {
+ ln.Accept()
+ close(done)
+ }()
+ time.Sleep(100 * time.Millisecond)
+ select {
+ case <-done:
+ t.Fatal("Accept didn't block")
+ default:
+ }
+ require.NoError(t, ln.Close())
+ select {
+ case <-done:
+ case <-time.After(100 * time.Millisecond):
+ t.Fatal("Accept didn't return after the listener was closed")
+ }
+}
+
+func TestAcceptAfterClose(t *testing.T) {
+ tr := newTransport(t, nil)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"))
+ require.NoError(t, err)
+ require.NoError(t, ln.Close())
+ _, err = ln.Accept()
+ require.Error(t, err)
+}
+
+func TestCorrectNumberOfVirtualListeners(t *testing.T) {
+ tr := newTransport(t, nil)
+ tpt := tr.(*transport)
+ defer tr.(io.Closer).Close()
+
+ localAddrV1 := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")
+ ln, err := tr.Listen(localAddrV1)
+ require.NoError(t, err)
+ udpAddr, _, err := quicreuse.FromQuicMultiaddr(localAddrV1)
+ require.NoError(t, err)
+
+ require.NoError(t, err)
+ require.Len(t, tpt.listeners[udpAddr.String()], 1)
+ ln.Close()
+ require.Empty(t, tpt.listeners[udpAddr.String()])
+}
+
+func TestCleanupConnWhenBlocked(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ mockRcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ mockRcmgr.EXPECT().OpenConnection(network.DirInbound, false, gomock.Any()).DoAndReturn(func(network.Direction, bool, ma.Multiaddr) (network.ConnManagementScope, error) {
+ // Block the connection
+ return nil, fmt.Errorf("connections blocked")
+ })
+
+ server := newTransport(t, mockRcmgr)
+ serverTpt := server.(*transport)
+ defer server.(io.Closer).Close()
+
+ localAddrV1 := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")
+ ln, err := server.Listen(localAddrV1)
+ require.NoError(t, err)
+ defer ln.Close()
+ go ln.Accept()
+
+ client := newTransport(t, nil)
+ ctx := context.Background()
+
+ var quicErr *quic.ApplicationError = &quic.ApplicationError{}
+ conn, err := client.Dial(ctx, ln.Multiaddr(), serverTpt.localPeer)
+ if err != nil && errors.As(err, &quicErr) {
+ // We hit our expected application error
+ return
+ }
+
+ // No error yet, let's continue using the conn
+ s, err := conn.OpenStream(ctx)
+ if err != nil && errors.As(err, &quicErr) {
+ // We hit our expected application error
+ return
+ }
+
+ // No error yet, let's continue using the conn
+ s.SetReadDeadline(time.Now().Add(10 * time.Second))
+ b := [1]byte{}
+ _, err = s.Read(b[:])
+ connError := &network.ConnError{}
+ if err != nil && errors.As(err, &connError) {
+ // We hit our expected application error
+ return
+ }
+
+ t.Fatalf("expected network.ConnError, got %v", err)
+}
diff --git a/p2p/transport/quic/mock_connection_gater_test.go b/p2p/transport/quic/mock_connection_gater_test.go
new file mode 100644
index 0000000000..0dd3586bb6
--- /dev/null
+++ b/p2p/transport/quic/mock_connection_gater_test.go
@@ -0,0 +1,115 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/connmgr (interfaces: ConnectionGater)
+//
+// Generated by this command:
+//
+// mockgen -package libp2pquic -destination mock_connection_gater_test.go github.com/libp2p/go-libp2p/core/connmgr ConnectionGater
+//
+
+// Package libp2pquic is a generated GoMock package.
+package libp2pquic
+
+import (
+ reflect "reflect"
+
+ control "github.com/libp2p/go-libp2p/core/control"
+ network "github.com/libp2p/go-libp2p/core/network"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ multiaddr "github.com/multiformats/go-multiaddr"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockConnectionGater is a mock of ConnectionGater interface.
+type MockConnectionGater struct {
+ ctrl *gomock.Controller
+ recorder *MockConnectionGaterMockRecorder
+ isgomock struct{}
+}
+
+// MockConnectionGaterMockRecorder is the mock recorder for MockConnectionGater.
+type MockConnectionGaterMockRecorder struct {
+ mock *MockConnectionGater
+}
+
+// NewMockConnectionGater creates a new mock instance.
+func NewMockConnectionGater(ctrl *gomock.Controller) *MockConnectionGater {
+ mock := &MockConnectionGater{ctrl: ctrl}
+ mock.recorder = &MockConnectionGaterMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockConnectionGater) EXPECT() *MockConnectionGaterMockRecorder {
+ return m.recorder
+}
+
+// InterceptAccept mocks base method.
+func (m *MockConnectionGater) InterceptAccept(arg0 network.ConnMultiaddrs) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptAccept", arg0)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptAccept indicates an expected call of InterceptAccept.
+func (mr *MockConnectionGaterMockRecorder) InterceptAccept(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptAccept", reflect.TypeOf((*MockConnectionGater)(nil).InterceptAccept), arg0)
+}
+
+// InterceptAddrDial mocks base method.
+func (m *MockConnectionGater) InterceptAddrDial(arg0 peer.ID, arg1 multiaddr.Multiaddr) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptAddrDial", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptAddrDial indicates an expected call of InterceptAddrDial.
+func (mr *MockConnectionGaterMockRecorder) InterceptAddrDial(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptAddrDial", reflect.TypeOf((*MockConnectionGater)(nil).InterceptAddrDial), arg0, arg1)
+}
+
+// InterceptPeerDial mocks base method.
+func (m *MockConnectionGater) InterceptPeerDial(p peer.ID) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptPeerDial", p)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptPeerDial indicates an expected call of InterceptPeerDial.
+func (mr *MockConnectionGaterMockRecorder) InterceptPeerDial(p any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptPeerDial", reflect.TypeOf((*MockConnectionGater)(nil).InterceptPeerDial), p)
+}
+
+// InterceptSecured mocks base method.
+func (m *MockConnectionGater) InterceptSecured(arg0 network.Direction, arg1 peer.ID, arg2 network.ConnMultiaddrs) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptSecured", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptSecured indicates an expected call of InterceptSecured.
+func (mr *MockConnectionGaterMockRecorder) InterceptSecured(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptSecured", reflect.TypeOf((*MockConnectionGater)(nil).InterceptSecured), arg0, arg1, arg2)
+}
+
+// InterceptUpgraded mocks base method.
+func (m *MockConnectionGater) InterceptUpgraded(arg0 network.Conn) (bool, control.DisconnectReason) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptUpgraded", arg0)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(control.DisconnectReason)
+ return ret0, ret1
+}
+
+// InterceptUpgraded indicates an expected call of InterceptUpgraded.
+func (mr *MockConnectionGaterMockRecorder) InterceptUpgraded(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptUpgraded", reflect.TypeOf((*MockConnectionGater)(nil).InterceptUpgraded), arg0)
+}
diff --git a/p2p/transport/quic/stream.go b/p2p/transport/quic/stream.go
new file mode 100644
index 0000000000..8f308e90f0
--- /dev/null
+++ b/p2p/transport/quic/stream.go
@@ -0,0 +1,91 @@
+package libp2pquic
+
+import (
+ "errors"
+ "math"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/quic-go/quic-go"
+)
+
+const (
+ reset quic.StreamErrorCode = 0
+)
+
+type stream struct {
+ *quic.Stream
+}
+
+var _ network.MuxedStream = stream{}
+
+func parseStreamError(err error) error {
+ if err == nil {
+ return err
+ }
+ se := &quic.StreamError{}
+ if errors.As(err, &se) {
+ var code network.StreamErrorCode
+ if se.ErrorCode > math.MaxUint32 {
+ code = network.StreamCodeOutOfRange
+ } else {
+ code = network.StreamErrorCode(se.ErrorCode)
+ }
+ err = &network.StreamError{
+ ErrorCode: code,
+ Remote: se.Remote,
+ TransportError: se,
+ }
+ }
+ ae := &quic.ApplicationError{}
+ if errors.As(err, &ae) {
+ var code network.ConnErrorCode
+ if ae.ErrorCode > math.MaxUint32 {
+ code = network.ConnCodeOutOfRange
+ } else {
+ code = network.ConnErrorCode(ae.ErrorCode)
+ }
+ err = &network.ConnError{
+ ErrorCode: code,
+ Remote: ae.Remote,
+ TransportError: ae,
+ }
+ }
+ return err
+}
+
+func (s stream) Read(b []byte) (n int, err error) {
+ n, err = s.Stream.Read(b)
+ return n, parseStreamError(err)
+}
+
+func (s stream) Write(b []byte) (n int, err error) {
+ n, err = s.Stream.Write(b)
+ return n, parseStreamError(err)
+}
+
+func (s stream) Reset() error {
+ s.Stream.CancelRead(reset)
+ s.Stream.CancelWrite(reset)
+ return nil
+}
+
+func (s stream) ResetWithError(errCode network.StreamErrorCode) error {
+ s.Stream.CancelRead(quic.StreamErrorCode(errCode))
+ s.Stream.CancelWrite(quic.StreamErrorCode(errCode))
+ return nil
+}
+
+func (s stream) Close() error {
+ s.Stream.CancelRead(reset)
+ return s.Stream.Close()
+}
+
+func (s stream) CloseRead() error {
+ s.Stream.CancelRead(reset)
+ return nil
+}
+
+func (s stream) CloseWrite() error {
+ return s.Stream.Close()
+}
diff --git a/p2p/transport/quic/transport.go b/p2p/transport/quic/transport.go
new file mode 100644
index 0000000000..0176409e48
--- /dev/null
+++ b/p2p/transport/quic/transport.go
@@ -0,0 +1,404 @@
+package libp2pquic
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "math/rand"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ p2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ mafmt "github.com/multiformats/go-multiaddr-fmt"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+)
+
+const ListenOrder = 1
+
+var log = logging.Logger("quic-transport")
+
+var ErrHolePunching = errors.New("hole punching attempted; no active dial")
+
+var HolePunchTimeout = 5 * time.Second
+
+// The Transport implements the tpt.Transport interface for QUIC connections.
+type transport struct {
+ privKey ic.PrivKey
+ localPeer peer.ID
+ identity *p2ptls.Identity
+ connManager *quicreuse.ConnManager
+ gater connmgr.ConnectionGater
+ rcmgr network.ResourceManager
+
+ holePunchingMx sync.Mutex
+ holePunching map[holePunchKey]*activeHolePunch
+
+ rndMx sync.Mutex
+ rnd rand.Rand
+
+ connMx sync.Mutex
+ conns map[*quic.Conn]*conn
+
+ listenersMu sync.Mutex
+ // map of UDPAddr as string to a virtualListeners
+ listeners map[string][]*virtualListener
+}
+
+var _ tpt.Transport = &transport{}
+
+type holePunchKey struct {
+ addr string
+ peer peer.ID
+}
+
+type activeHolePunch struct {
+ connCh chan tpt.CapableConn
+ fulfilled bool
+}
+
+// NewTransport creates a new QUIC transport
+func NewTransport(key ic.PrivKey, connManager *quicreuse.ConnManager, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager) (tpt.Transport, error) {
+ if len(psk) > 0 {
+ log.Error("QUIC doesn't support private networks yet.")
+ return nil, errors.New("QUIC doesn't support private networks yet")
+ }
+ localPeer, err := peer.IDFromPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ identity, err := p2ptls.NewIdentity(key)
+ if err != nil {
+ return nil, err
+ }
+
+ if rcmgr == nil {
+ rcmgr = &network.NullResourceManager{}
+ }
+
+ return &transport{
+ privKey: key,
+ localPeer: localPeer,
+ identity: identity,
+ connManager: connManager,
+ gater: gater,
+ rcmgr: rcmgr,
+ conns: make(map[*quic.Conn]*conn),
+ holePunching: make(map[holePunchKey]*activeHolePunch),
+ rnd: *rand.New(rand.NewSource(time.Now().UnixNano())),
+
+ listeners: make(map[string][]*virtualListener),
+ }, nil
+}
+
+func (t *transport) ListenOrder() int {
+ return ListenOrder
+}
+
+// Dial dials a new QUIC connection
+func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (_c tpt.CapableConn, _err error) {
+ if ok, isClient, _ := network.GetSimultaneousConnect(ctx); ok && !isClient {
+ return t.holePunch(ctx, raddr, p)
+ }
+
+ scope, err := t.rcmgr.OpenConnection(network.DirOutbound, false, raddr)
+ if err != nil {
+ log.Debug("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "err", err)
+ return nil, err
+ }
+
+ c, err := t.dialWithScope(ctx, raddr, p, scope)
+ if err != nil {
+ scope.Done()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (tpt.CapableConn, error) {
+ if err := scope.SetPeer(p); err != nil {
+ log.Debug("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "err", err)
+ return nil, err
+ }
+
+ tlsConf, keyCh := t.identity.ConfigForPeer(p)
+ ctx = quicreuse.WithAssociation(ctx, t)
+ pconn, err := t.connManager.DialQUIC(ctx, raddr, tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, err
+ }
+
+ // Should be ready by this point, don't block.
+ var remotePubKey ic.PubKey
+ select {
+ case remotePubKey = <-keyCh:
+ default:
+ }
+ if remotePubKey == nil {
+ pconn.CloseWithError(1, "")
+ return nil, errors.New("p2p/transport/quic BUG: expected remote pub key to be set")
+ }
+
+ localMultiaddr, err := quicreuse.ToQuicMultiaddr(pconn.LocalAddr(), pconn.ConnectionState().Version)
+ if err != nil {
+ pconn.CloseWithError(1, "")
+ return nil, err
+ }
+ c := &conn{
+ quicConn: pconn,
+ transport: t,
+ scope: scope,
+ localPeer: t.localPeer,
+ localMultiaddr: localMultiaddr,
+ remotePubKey: remotePubKey,
+ remotePeerID: p,
+ remoteMultiaddr: raddr,
+ }
+ if t.gater != nil && !t.gater.InterceptSecured(network.DirOutbound, p, c) {
+ pconn.CloseWithError(quic.ApplicationErrorCode(network.ConnGated), "connection gated")
+ return nil, fmt.Errorf("secured connection gated")
+ }
+ t.addConn(pconn, c)
+ return c, nil
+}
+
+func (t *transport) addConn(conn *quic.Conn, c *conn) {
+ t.connMx.Lock()
+ t.conns[conn] = c
+ t.connMx.Unlock()
+}
+
+func (t *transport) removeConn(conn *quic.Conn) {
+ t.connMx.Lock()
+ delete(t.conns, conn)
+ t.connMx.Unlock()
+}
+
+func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) {
+ network, saddr, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+ addr, err := net.ResolveUDPAddr(network, saddr)
+ if err != nil {
+ return nil, err
+ }
+ tr, err := t.connManager.TransportWithAssociationForDial(t, network, addr)
+ if err != nil {
+ return nil, err
+ }
+ defer tr.DecreaseCount()
+
+ ctx, cancel := context.WithTimeout(ctx, HolePunchTimeout)
+ defer cancel()
+
+ key := holePunchKey{addr: addr.String(), peer: p}
+ t.holePunchingMx.Lock()
+ if _, ok := t.holePunching[key]; ok {
+ t.holePunchingMx.Unlock()
+ return nil, fmt.Errorf("already punching hole for %s", addr)
+ }
+ connCh := make(chan tpt.CapableConn, 1)
+ t.holePunching[key] = &activeHolePunch{connCh: connCh}
+ t.holePunchingMx.Unlock()
+
+ var timer *time.Timer
+ defer func() {
+ if timer != nil {
+ timer.Stop()
+ }
+ }()
+
+ payload := make([]byte, 64)
+ var punchErr error
+loop:
+ for i := 0; ; i++ {
+ t.rndMx.Lock()
+ _, err := t.rnd.Read(payload)
+ t.rndMx.Unlock()
+ if err != nil {
+ punchErr = err
+ break
+ }
+ if _, err := tr.WriteTo(payload, addr); err != nil {
+ punchErr = err
+ break
+ }
+
+ maxSleep := 10 * (i + 1) * (i + 1) // in ms
+ if maxSleep > 200 {
+ maxSleep = 200
+ }
+ d := 10*time.Millisecond + time.Duration(rand.Intn(maxSleep))*time.Millisecond
+ if timer == nil {
+ timer = time.NewTimer(d)
+ } else {
+ timer.Reset(d)
+ }
+ select {
+ case c := <-connCh:
+ t.holePunchingMx.Lock()
+ delete(t.holePunching, key)
+ t.holePunchingMx.Unlock()
+ return c, nil
+ case <-timer.C:
+ case <-ctx.Done():
+ punchErr = ErrHolePunching
+ break loop
+ }
+ }
+ // we only arrive here if punchErr != nil
+ t.holePunchingMx.Lock()
+ defer func() {
+ delete(t.holePunching, key)
+ t.holePunchingMx.Unlock()
+ }()
+ select {
+ case c := <-t.holePunching[key].connCh:
+ return c, nil
+ default:
+ return nil, punchErr
+ }
+}
+
+// Don't use mafmt.QUIC as we don't want to dial DNS addresses. Just /ip{4,6}/udp/quic-v1
+var dialMatcher = mafmt.And(mafmt.IP, mafmt.Base(ma.P_UDP), mafmt.Base(ma.P_QUIC_V1))
+
+// CanDial determines if we can dial to an address
+func (t *transport) CanDial(addr ma.Multiaddr) bool {
+ return dialMatcher.Matches(addr)
+}
+
+// Listen listens for new QUIC connections on the passed multiaddr.
+func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
+ var tlsConf tls.Config
+ tlsConf.GetConfigForClient = func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
+ // return a tls.Config that verifies the peer's certificate chain.
+ // Note that since we have no way of associating an incoming QUIC connection with
+ // the peer ID calculated here, we don't actually receive the peer's public key
+ // from the key chan.
+ conf, _ := t.identity.ConfigForPeer("")
+ return conf, nil
+ }
+ tlsConf.NextProtos = []string{"libp2p"}
+ udpAddr, version, err := quicreuse.FromQuicMultiaddr(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ t.listenersMu.Lock()
+ defer t.listenersMu.Unlock()
+ listeners := t.listeners[udpAddr.String()]
+ var underlyingListener *listener
+ var acceptRunner *acceptLoopRunner
+ if len(listeners) != 0 {
+ // We already have an underlying listener, let's use it
+ underlyingListener = listeners[0].listener
+ acceptRunner = listeners[0].acceptRunnner
+ // Make sure our underlying listener is listening on the specified QUIC version
+ if _, ok := underlyingListener.localMultiaddrs[version]; !ok {
+ return nil, fmt.Errorf("can't listen on quic version %v, underlying listener doesn't support it", version)
+ }
+ } else {
+ ln, err := t.connManager.ListenQUICAndAssociate(t, addr, &tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, err
+ }
+ l, err := newListener(ln, t, t.localPeer, t.privKey, t.rcmgr)
+ if err != nil {
+ _ = ln.Close()
+ return nil, err
+ }
+ underlyingListener = &l
+
+ acceptRunner = &acceptLoopRunner{
+ acceptSem: make(chan struct{}, 1),
+ muxer: make(map[quic.Version]chan acceptVal),
+ }
+ }
+
+ l := &virtualListener{
+ listener: underlyingListener,
+ version: version,
+ udpAddr: udpAddr.String(),
+ t: t,
+ acceptRunnner: acceptRunner,
+ acceptChan: acceptRunner.AcceptForVersion(version),
+ }
+
+ listeners = append(listeners, l)
+ t.listeners[udpAddr.String()] = listeners
+
+ return l, nil
+}
+
+func (t *transport) allowWindowIncrease(conn *quic.Conn, size uint64) bool {
+ // If the QUIC connection tries to increase the window before we've inserted it
+ // into our connections map (which we do right after dialing / accepting it),
+ // we have no way to account for that memory. This should be very rare.
+ // Block this attempt. The connection can request more memory later.
+ t.connMx.Lock()
+ c, ok := t.conns[conn]
+ t.connMx.Unlock()
+ if !ok {
+ return false
+ }
+ return c.allowWindowIncrease(size)
+}
+
+// Proxy returns true if this transport proxies.
+func (t *transport) Proxy() bool {
+ return false
+}
+
+// Protocols returns the set of protocols handled by this transport.
+func (t *transport) Protocols() []int {
+ return t.connManager.Protocols()
+}
+
+func (t *transport) String() string {
+ return "QUIC"
+}
+
+func (t *transport) Close() error {
+ return nil
+}
+
+func (t *transport) CloseVirtualListener(l *virtualListener) error {
+ t.listenersMu.Lock()
+ defer t.listenersMu.Unlock()
+
+ var err error
+ listeners := t.listeners[l.udpAddr]
+ if len(listeners) == 1 {
+ // This is the last virtual listener here, so we can close the underlying listener
+ err = l.listener.Close()
+ delete(t.listeners, l.udpAddr)
+ return err
+ }
+
+ for i := 0; i < len(listeners); i++ {
+ // Swap remove
+ if l == listeners[i] {
+ listeners[i] = listeners[len(listeners)-1]
+ listeners = listeners[:len(listeners)-1]
+ t.listeners[l.udpAddr] = listeners
+ break
+ }
+ }
+
+ return nil
+
+}
diff --git a/p2p/transport/quic/transport_test.go b/p2p/transport/quic/transport_test.go
new file mode 100644
index 0000000000..41e7e4e416
--- /dev/null
+++ b/p2p/transport/quic/transport_test.go
@@ -0,0 +1,69 @@
+package libp2pquic
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "io"
+ "testing"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/require"
+)
+
+func getTransport(t *testing.T) tpt.Transport {
+ t.Helper()
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+ require.NoError(t, err)
+ key, err := ic.UnmarshalRsaPrivateKey(x509.MarshalPKCS1PrivateKey(rsaKey))
+ require.NoError(t, err)
+ tr, err := NewTransport(key, newConnManager(t), nil, nil, nil)
+ require.NoError(t, err)
+ return tr
+}
+
+func TestQUICProtocol(t *testing.T) {
+ tr := getTransport(t)
+ defer tr.(io.Closer).Close()
+
+ protocols := tr.Protocols()
+ if len(protocols) > 1 {
+ t.Fatalf("expected at most one protocol, got %v", protocols)
+ }
+ if protocols[0] != ma.P_QUIC_V1 {
+ t.Fatalf("expected the supported protocol to be QUIC v1, got %d", protocols[0])
+ }
+}
+
+func TestCanDial(t *testing.T) {
+ tr := getTransport(t)
+ defer tr.(io.Closer).Close()
+
+ invalid := []string{
+ "/ip4/127.0.0.1/udp/1234",
+ "/ip4/5.5.5.5/tcp/1234",
+ "/dns/google.com/udp/443/quic-v1",
+ "/ip4/127.0.0.1/udp/1234/quic",
+ }
+ valid := []string{
+ "/ip4/127.0.0.1/udp/1234/quic-v1",
+ "/ip4/5.5.5.5/udp/0/quic-v1",
+ }
+ for _, s := range invalid {
+ invalidAddr, err := ma.NewMultiaddr(s)
+ require.NoError(t, err)
+ if tr.CanDial(invalidAddr) {
+ t.Errorf("didn't expect to be able to dial a non-quic address (%s)", invalidAddr)
+ }
+ }
+ for _, s := range valid {
+ validAddr, err := ma.NewMultiaddr(s)
+ require.NoError(t, err)
+ if !tr.CanDial(validAddr) {
+ t.Errorf("expected to be able to dial QUIC address (%s)", validAddr)
+ }
+ }
+}
diff --git a/p2p/transport/quic/virtuallistener.go b/p2p/transport/quic/virtuallistener.go
new file mode 100644
index 0000000000..5b23e4c507
--- /dev/null
+++ b/p2p/transport/quic/virtuallistener.go
@@ -0,0 +1,176 @@
+package libp2pquic
+
+import (
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+const acceptBufferPerVersion = 4
+
+// virtualListener is a listener that exposes a single multiaddr but uses another listener under the hood
+type virtualListener struct {
+ *listener
+ udpAddr string
+ version quic.Version
+ t *transport
+ acceptRunnner *acceptLoopRunner
+ acceptChan chan acceptVal
+}
+
+var _ tpt.Listener = &virtualListener{}
+
+func (l *virtualListener) Multiaddr() ma.Multiaddr {
+ return l.listener.localMultiaddrs[l.version]
+}
+
+func (l *virtualListener) Close() error {
+ l.acceptRunnner.RmAcceptForVersion(l.version, tpt.ErrListenerClosed)
+ return l.t.CloseVirtualListener(l)
+}
+
+func (l *virtualListener) Accept() (tpt.CapableConn, error) {
+ return l.acceptRunnner.Accept(l.listener, l.version, l.acceptChan)
+}
+
+type acceptVal struct {
+ conn tpt.CapableConn
+ err error
+}
+
+type acceptLoopRunner struct {
+ acceptSem chan struct{}
+
+ muxerMu sync.Mutex
+ muxer map[quic.Version]chan acceptVal
+ muxerClosed bool
+}
+
+func (r *acceptLoopRunner) AcceptForVersion(v quic.Version) chan acceptVal {
+ r.muxerMu.Lock()
+ defer r.muxerMu.Unlock()
+
+ ch := make(chan acceptVal, acceptBufferPerVersion)
+
+ if _, ok := r.muxer[v]; ok {
+ panic("unexpected chan already found in accept muxer")
+ }
+
+ r.muxer[v] = ch
+ return ch
+}
+
+func (r *acceptLoopRunner) RmAcceptForVersion(v quic.Version, err error) {
+ r.muxerMu.Lock()
+ defer r.muxerMu.Unlock()
+
+ if r.muxerClosed {
+ // Already closed, all versions are removed
+ return
+ }
+
+ ch, ok := r.muxer[v]
+ if !ok {
+ panic("expected chan in accept muxer")
+ }
+ ch <- acceptVal{err: err}
+ delete(r.muxer, v)
+}
+
+func (r *acceptLoopRunner) sendErrAndClose(err error) {
+ r.muxerMu.Lock()
+ defer r.muxerMu.Unlock()
+ r.muxerClosed = true
+ for k, ch := range r.muxer {
+ select {
+ case ch <- acceptVal{err: err}:
+ default:
+ }
+ delete(r.muxer, k)
+ close(ch)
+ }
+}
+
+// innerAccept is the inner logic of the Accept loop. Assume caller holds the
+// acceptSemaphore. May return both a nil conn and nil error if it didn't find a
+// conn with the expected version
+func (r *acceptLoopRunner) innerAccept(l *listener, expectedVersion quic.Version, bufferedConnChan chan acceptVal) (tpt.CapableConn, error) {
+ select {
+ // Check if we have a buffered connection first from an earlier Accept call
+ case v, ok := <-bufferedConnChan:
+ if !ok {
+ return nil, tpt.ErrListenerClosed
+ }
+ return v.conn, v.err
+ default:
+ }
+
+ conn, err := l.Accept()
+
+ if err != nil {
+ r.sendErrAndClose(err)
+ return nil, err
+ }
+
+ _, version, err := quicreuse.FromQuicMultiaddr(conn.RemoteMultiaddr())
+ if err != nil {
+ r.sendErrAndClose(err)
+ return nil, err
+ }
+
+ if version == expectedVersion {
+ return conn, nil
+ }
+
+ // This wasn't the version we were expecting, lets queue it up for a
+ // future Accept call with a different version
+ r.muxerMu.Lock()
+ ch, ok := r.muxer[version]
+ r.muxerMu.Unlock()
+
+ if !ok {
+ // Nothing to handle this connection version. Close it
+ conn.Close()
+ return nil, nil
+ }
+
+ // Non blocking
+ select {
+ case ch <- acceptVal{conn: conn}:
+ default:
+ conn.CloseWithError(network.ConnRateLimited)
+ // accept queue filled up, drop the connection
+ log.Warn("Accept queue filled. Dropping connection.")
+ }
+
+ return nil, nil
+}
+
+func (r *acceptLoopRunner) Accept(l *listener, expectedVersion quic.Version, bufferedConnChan chan acceptVal) (tpt.CapableConn, error) {
+ for {
+ var conn tpt.CapableConn
+ var err error
+ select {
+ case r.acceptSem <- struct{}{}:
+ conn, err = r.innerAccept(l, expectedVersion, bufferedConnChan)
+ <-r.acceptSem
+
+ if conn == nil && err == nil {
+ // Didn't find a conn for the expected version and there was no error, lets try again
+ continue
+ }
+ case v, ok := <-bufferedConnChan:
+ if !ok {
+ return nil, tpt.ErrListenerClosed
+ }
+ conn = v.conn
+ err = v.err
+ }
+ return conn, err
+ }
+}
diff --git a/p2p/transport/quicreuse/config.go b/p2p/transport/quicreuse/config.go
new file mode 100644
index 0000000000..62f8919c8b
--- /dev/null
+++ b/p2p/transport/quicreuse/config.go
@@ -0,0 +1,18 @@
+package quicreuse
+
+import (
+ "time"
+
+ "github.com/quic-go/quic-go"
+)
+
+var quicConfig = &quic.Config{
+ MaxIncomingStreams: 256,
+ MaxIncomingUniStreams: 5, // allow some unidirectional streams, in case we speak WebTransport
+ MaxStreamReceiveWindow: 10 * (1 << 20), // 10 MB
+ MaxConnectionReceiveWindow: 15 * (1 << 20), // 15 MB
+ KeepAlivePeriod: 15 * time.Second,
+ Versions: []quic.Version{quic.Version1},
+ // We don't use datagrams (yet), but this is necessary for WebTransport
+ EnableDatagrams: true,
+}
diff --git a/p2p/transport/quicreuse/connmgr.go b/p2p/transport/quicreuse/connmgr.go
new file mode 100644
index 0000000000..e8ace83971
--- /dev/null
+++ b/p2p/transport/quicreuse/connmgr.go
@@ -0,0 +1,462 @@
+// Package quicreuse provides `quicreuse.ConnManager`, which provides functionality
+// for reusing QUIC transports for various purposes, like listening & dialing, having
+// multiple QUIC listeners on the same address with different ALPNs, and sharing the
+// same address with non QUIC transports like WebRTC.
+
+package quicreuse
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "io"
+ "net"
+ "sync"
+
+ "github.com/libp2p/go-netroute"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/quic-go/quic-go"
+ quiclogging "github.com/quic-go/quic-go/logging"
+ quicmetrics "github.com/quic-go/quic-go/metrics"
+ "golang.org/x/time/rate"
+)
+
+type QUICListener interface {
+ Accept(ctx context.Context) (*quic.Conn, error)
+ Close() error
+ Addr() net.Addr
+}
+
+var _ QUICListener = &quic.Listener{}
+
+type QUICTransport interface {
+ Listen(tlsConf *tls.Config, conf *quic.Config) (QUICListener, error)
+ Dial(ctx context.Context, addr net.Addr, tlsConf *tls.Config, conf *quic.Config) (*quic.Conn, error)
+ WriteTo(b []byte, addr net.Addr) (int, error)
+ ReadNonQUICPacket(ctx context.Context, b []byte) (int, net.Addr, error)
+ io.Closer
+}
+
+// ConnManager enables QUIC and WebTransport transports to listen on the same port, reusing
+// listen addresses for dialing, and provides a PacketConn for sharing the listen address
+// with other protocols like WebRTC.
+// Reusing the listen address for dialing helps with address discovery and hole punching. For details
+// of the reuse logic see `ListenQUICAndAssociate` and `DialQUIC`.
+// If reuseport is disabled using the `DisableReuseport` option, listen addresses are not used for
+// dialing.
+type ConnManager struct {
+ reuseUDP4 *reuse
+ reuseUDP6 *reuse
+ enableReuseport bool
+
+ listenUDP listenUDP
+ sourceIPSelectorFn func() (SourceIPSelector, error)
+
+ enableMetrics bool
+ registerer prometheus.Registerer
+
+ serverConfig *quic.Config
+ clientConfig *quic.Config
+
+ quicListenersMu sync.Mutex
+ quicListeners map[string]quicListenerEntry
+
+ srk quic.StatelessResetKey
+ tokenKey quic.TokenGeneratorKey
+ connContext connContextFunc
+
+ verifySourceAddress func(addr net.Addr) bool
+
+ qlogTracerDir string
+}
+
+type quicListenerEntry struct {
+ refCount int
+ ln *quicListener
+}
+
+func defaultListenUDP(network string, laddr *net.UDPAddr) (net.PacketConn, error) {
+ return net.ListenUDP(network, laddr)
+}
+
+func defaultSourceIPSelectorFn() (SourceIPSelector, error) {
+ r, err := netroute.New()
+ return &netrouteSourceIPSelector{routes: r}, err
+}
+
+const (
+ unverifiedAddressNewConnectionRPS = 1000
+ unverifiedAddressNewConnectionBurst = 1000
+)
+
+// NewConnManager returns a new ConnManager
+func NewConnManager(statelessResetKey quic.StatelessResetKey, tokenKey quic.TokenGeneratorKey, opts ...Option) (*ConnManager, error) {
+ cm := &ConnManager{
+ enableReuseport: true,
+ quicListeners: make(map[string]quicListenerEntry),
+ srk: statelessResetKey,
+ tokenKey: tokenKey,
+ registerer: prometheus.DefaultRegisterer,
+ listenUDP: defaultListenUDP,
+ sourceIPSelectorFn: defaultSourceIPSelectorFn,
+ }
+ for _, o := range opts {
+ if err := o(cm); err != nil {
+ return nil, err
+ }
+ }
+
+ quicConf := quicConfig.Clone()
+ quicConf.Tracer = cm.getTracer()
+ serverConfig := quicConf.Clone()
+
+ cm.clientConfig = quicConf
+ cm.serverConfig = serverConfig
+
+ // Verify source addresses when under high load.
+ // This is ensures that the number of spoofed/unverified addresses that are passed to downstream rate limiters
+ // are limited, which enables IP address based rate limiting.
+ sourceAddrRateLimiter := rate.NewLimiter(unverifiedAddressNewConnectionRPS, unverifiedAddressNewConnectionBurst)
+ vsa := cm.verifySourceAddress
+ cm.verifySourceAddress = func(addr net.Addr) bool {
+ if sourceAddrRateLimiter.Allow() {
+ if vsa != nil {
+ return vsa(addr)
+ }
+ return false
+ }
+ return true
+ }
+ if cm.enableReuseport {
+ cm.reuseUDP4 = newReuse(&statelessResetKey, &tokenKey, cm.listenUDP, cm.sourceIPSelectorFn, cm.connContext, cm.verifySourceAddress)
+ cm.reuseUDP6 = newReuse(&statelessResetKey, &tokenKey, cm.listenUDP, cm.sourceIPSelectorFn, cm.connContext, cm.verifySourceAddress)
+ }
+ return cm, nil
+}
+
+func (c *ConnManager) getTracer() func(context.Context, quiclogging.Perspective, quic.ConnectionID) *quiclogging.ConnectionTracer {
+ return func(_ context.Context, p quiclogging.Perspective, ci quic.ConnectionID) *quiclogging.ConnectionTracer {
+ var promTracer *quiclogging.ConnectionTracer
+ if c.enableMetrics {
+ switch p {
+ case quiclogging.PerspectiveClient:
+ promTracer = quicmetrics.NewClientConnectionTracerWithRegisterer(c.registerer)
+ case quiclogging.PerspectiveServer:
+ promTracer = quicmetrics.NewServerConnectionTracerWithRegisterer(c.registerer)
+ default:
+ log.Error("invalid logging perspective", "peer", p)
+ }
+ }
+ var tracer *quiclogging.ConnectionTracer
+ var tracerDir = c.qlogTracerDir
+ if tracerDir == "" {
+ // Fallback to the global qlogTracerDir
+ tracerDir = qlogTracerDir
+ }
+
+ if tracerDir != "" {
+ tracer = qloggerForDir(tracerDir, p, ci)
+ if promTracer != nil {
+ tracer = quiclogging.NewMultiplexedConnectionTracer(promTracer,
+ tracer)
+ }
+ }
+ return tracer
+ }
+}
+
+func (c *ConnManager) getReuse(network string) (*reuse, error) {
+ switch network {
+ case "udp4":
+ return c.reuseUDP4, nil
+ case "udp6":
+ return c.reuseUDP6, nil
+ default:
+ return nil, errors.New("invalid network: must be either udp4 or udp6")
+ }
+}
+
+// LendTransport is an advanced method used to lend an existing QUICTransport
+// to the ConnManager. The ConnManager will close the returned channel when it
+// is done with the transport, so that the owner may safely close the transport.
+func (c *ConnManager) LendTransport(network string, tr QUICTransport, conn net.PacketConn) (<-chan struct{}, error) {
+ c.quicListenersMu.Lock()
+ defer c.quicListenersMu.Unlock()
+
+ localAddr, ok := conn.LocalAddr().(*net.UDPAddr)
+ if !ok {
+ return nil, errors.New("expected a conn.LocalAddr() to return a *net.UDPAddr")
+ }
+
+ refCountedTr := &refcountedTransport{
+ QUICTransport: tr,
+ packetConn: conn,
+ borrowDoneSignal: make(chan struct{}),
+ }
+
+ var reuse *reuse
+ reuse, err := c.getReuse(network)
+ if err != nil {
+ return nil, err
+ }
+ return refCountedTr.borrowDoneSignal, reuse.AddTransport(refCountedTr, localAddr)
+}
+
+// ListenQUIC listens for quic connections with the provided `tlsConf.NextProtos` ALPNs on `addr`. The same addr can be shared between
+// different ALPNs.
+func (c *ConnManager) ListenQUIC(addr ma.Multiaddr, tlsConf *tls.Config, allowWindowIncrease func(conn *quic.Conn, delta uint64) bool) (Listener, error) {
+ return c.ListenQUICAndAssociate(nil, addr, tlsConf, allowWindowIncrease)
+}
+
+// ListenQUICAndAssociate listens for quic connections with the provided `tlsConf.NextProtos` ALPNs on `addr`. The same addr can be shared between
+// different ALPNs.
+// The QUIC Transport used for listening is tagged with the `association`. Any subsequent `TransportWithAssociationForDial`,
+// or `DialQUIC` calls with the same `association` will reuse the QUIC Transport used by this method.
+// A common use of associations is to ensure /quic dials use the quic listening address and /webtransport dials use the
+// WebTransport listening address.
+func (c *ConnManager) ListenQUICAndAssociate(association any, addr ma.Multiaddr, tlsConf *tls.Config, allowWindowIncrease func(conn *quic.Conn, delta uint64) bool) (Listener, error) {
+ netw, host, err := manet.DialArgs(addr)
+ if err != nil {
+ return nil, err
+ }
+ laddr, err := net.ResolveUDPAddr(netw, host)
+ if err != nil {
+ return nil, err
+ }
+
+ c.quicListenersMu.Lock()
+ defer c.quicListenersMu.Unlock()
+
+ key := laddr.String()
+ entry, ok := c.quicListeners[key]
+ if !ok {
+ tr, err := c.transportForListen(netw, laddr)
+ if err != nil {
+ return nil, err
+ }
+ ln, err := newQuicListener(tr, c.serverConfig)
+ if err != nil {
+ return nil, err
+ }
+ key = tr.LocalAddr().String()
+ entry = quicListenerEntry{ln: ln}
+ }
+ if c.enableReuseport && association != nil {
+ if _, ok := entry.ln.transport.(*refcountedTransport); !ok {
+ log.Warn("reuseport is enabled, association is non-nil, but the transport is not a refcountedTransport.")
+ }
+ }
+ l, err := entry.ln.Add(association, tlsConf, allowWindowIncrease, func() {
+ c.onListenerClosed(key)
+ })
+ if err != nil {
+ if entry.refCount <= 0 {
+ entry.ln.Close()
+ }
+ return nil, err
+ }
+ entry.refCount++
+ c.quicListeners[key] = entry
+ return l, nil
+}
+
+func (c *ConnManager) onListenerClosed(key string) {
+ c.quicListenersMu.Lock()
+ defer c.quicListenersMu.Unlock()
+
+ entry := c.quicListeners[key]
+ entry.refCount = entry.refCount - 1
+ if entry.refCount <= 0 {
+ delete(c.quicListeners, key)
+ entry.ln.Close()
+ } else {
+ c.quicListeners[key] = entry
+ }
+}
+
+// SharedNonQUICPacketConn returns a `net.PacketConn` for `laddr` for non QUIC uses.
+func (c *ConnManager) SharedNonQUICPacketConn(_ string, laddr *net.UDPAddr) (net.PacketConn, error) {
+ c.quicListenersMu.Lock()
+ defer c.quicListenersMu.Unlock()
+ key := laddr.String()
+ entry, ok := c.quicListeners[key]
+ if !ok {
+ return nil, errors.New("expected to be able to share with a QUIC listener, but no QUIC listener found. The QUIC listener should start first")
+ }
+ t := entry.ln.transport
+ if t, ok := t.(*refcountedTransport); ok {
+ t.IncreaseCount()
+ ctx, cancel := context.WithCancel(context.Background())
+ return &nonQUICPacketConn{
+ ctx: ctx,
+ ctxCancel: cancel,
+ owningTransport: t,
+ tr: t.QUICTransport,
+ }, nil
+ }
+ return nil, errors.New("expected to be able to share with a QUIC listener, but the QUIC listener is not using a refcountedTransport. `DisableReuseport` should not be set")
+}
+
+func (c *ConnManager) transportForListen(network string, laddr *net.UDPAddr) (RefCountedQUICTransport, error) {
+ if c.enableReuseport {
+ reuse, err := c.getReuse(network)
+ if err != nil {
+ return nil, err
+ }
+ tr, err := reuse.TransportForListen(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return tr, nil
+ }
+
+ conn, err := c.listenUDP(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return c.newSingleOwnerTransport(conn), nil
+}
+
+type associationKey struct{}
+
+// WithAssociation returns a new context with the given association. Used in
+// DialQUIC to prefer a transport that has the given association.
+func WithAssociation(ctx context.Context, association any) context.Context {
+ return context.WithValue(ctx, associationKey{}, association)
+}
+
+// DialQUIC dials `raddr`. Use `WithAssociation` to select a specific transport that was previously used for listening.
+// see the documentation for `ListenQUICAndAssociate` for details on associate.
+// The priority order for reusing the transport is as follows:
+// - Listening transport with the same association
+// - Any other listening transport
+// - Any transport previously used for dialing
+// If none of these are available, it'll create a new transport.
+func (c *ConnManager) DialQUIC(ctx context.Context, raddr ma.Multiaddr, tlsConf *tls.Config, allowWindowIncrease func(conn *quic.Conn, delta uint64) bool) (*quic.Conn, error) {
+ naddr, v, err := FromQuicMultiaddr(raddr)
+ if err != nil {
+ return nil, err
+ }
+ netw, _, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+
+ quicConf := c.clientConfig.Clone()
+ quicConf.AllowConnectionWindowIncrease = allowWindowIncrease
+
+ if v == quic.Version1 {
+ // The endpoint has explicit support for QUIC v1, so we'll only use that version.
+ quicConf.Versions = []quic.Version{quic.Version1}
+ } else {
+ return nil, errors.New("unknown QUIC version")
+ }
+
+ var tr RefCountedQUICTransport
+ association := ctx.Value(associationKey{})
+ tr, err = c.TransportWithAssociationForDial(association, netw, naddr)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := tr.Dial(ctx, naddr, tlsConf, quicConf)
+ if err != nil {
+ tr.DecreaseCount()
+ return nil, err
+ }
+ return conn, nil
+}
+
+// TransportForDial returns a transport for dialing `raddr`.
+// If reuseport is enabled, it attempts to reuse the QUIC Transport used for
+// previous listens or dials.
+func (c *ConnManager) TransportForDial(network string, raddr *net.UDPAddr) (RefCountedQUICTransport, error) {
+ return c.TransportWithAssociationForDial(nil, network, raddr)
+}
+
+// TransportWithAssociationForDial returns a transport for dialing `raddr`.
+// If reuseport is enabled, it attempts to reuse the QUIC Transport previously used for listening with `ListenQuicAndAssociate`
+// with the same `association`. If it fails to do so, it uses any other previously used transport.
+func (c *ConnManager) TransportWithAssociationForDial(association any, network string, raddr *net.UDPAddr) (RefCountedQUICTransport, error) {
+ if c.enableReuseport {
+ reuse, err := c.getReuse(network)
+ if err != nil {
+ return nil, err
+ }
+ return reuse.TransportWithAssociationForDial(association, network, raddr)
+ }
+
+ var laddr *net.UDPAddr
+ switch network {
+ case "udp4":
+ laddr = &net.UDPAddr{IP: net.IPv4zero, Port: 0}
+ case "udp6":
+ laddr = &net.UDPAddr{IP: net.IPv6zero, Port: 0}
+ }
+ conn, err := c.listenUDP(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return c.newSingleOwnerTransport(conn), nil
+}
+
+func (c *ConnManager) newSingleOwnerTransport(conn net.PacketConn) *singleOwnerTransport {
+ return &singleOwnerTransport{
+ Transport: &wrappedQUICTransport{
+ Transport: newQUICTransport(
+ conn,
+ &c.tokenKey,
+ &c.srk,
+ c.connContext,
+ c.verifySourceAddress,
+ ),
+ },
+ packetConn: conn}
+}
+
+// Protocols returns the supported QUIC protocols. The only supported protocol at the moment is /quic-v1.
+func (c *ConnManager) Protocols() []int {
+ return []int{ma.P_QUIC_V1}
+}
+
+func (c *ConnManager) Close() error {
+ if !c.enableReuseport {
+ return nil
+ }
+ if err := c.reuseUDP6.Close(); err != nil {
+ return err
+ }
+ return c.reuseUDP4.Close()
+}
+
+func (c *ConnManager) ClientConfig() *quic.Config {
+ return c.clientConfig
+}
+
+// wrappedQUICTransport wraps a `quic.Transport` to confirm to `QUICTransport`
+type wrappedQUICTransport struct {
+ *quic.Transport
+}
+
+var _ QUICTransport = (*wrappedQUICTransport)(nil)
+
+func (t *wrappedQUICTransport) Listen(tlsConf *tls.Config, conf *quic.Config) (QUICListener, error) {
+ return t.Transport.Listen(tlsConf, conf)
+}
+
+func newQUICTransport(
+ conn net.PacketConn,
+ tokenGeneratorKey *quic.TokenGeneratorKey,
+ statelessResetKey *quic.StatelessResetKey,
+ connContext connContextFunc,
+ verifySourceAddress func(addr net.Addr) bool,
+) *quic.Transport {
+ return &quic.Transport{
+ Conn: conn,
+ TokenGeneratorKey: tokenGeneratorKey,
+ StatelessResetKey: statelessResetKey,
+ ConnContext: connContext,
+ VerifySourceAddress: verifySourceAddress,
+ }
+}
diff --git a/p2p/transport/quicreuse/connmgr_test.go b/p2p/transport/quicreuse/connmgr_test.go
new file mode 100644
index 0000000000..f99646cdce
--- /dev/null
+++ b/p2p/transport/quicreuse/connmgr_test.go
@@ -0,0 +1,627 @@
+package quicreuse
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
+ libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+ "github.com/stretchr/testify/require"
+)
+
+func checkClosed(t *testing.T, cm *ConnManager) {
+ for _, r := range []*reuse{cm.reuseUDP4, cm.reuseUDP6} {
+ if r == nil {
+ continue
+ }
+ r.mutex.Lock()
+ for _, tr := range r.globalListeners {
+ require.Zero(t, tr.GetCount())
+ }
+ for _, trs := range r.unicast {
+ for _, tr := range trs {
+ require.Zero(t, tr.GetCount())
+ }
+ }
+ r.mutex.Unlock()
+ }
+ require.Eventually(t, func() bool { return !isGarbageCollectorRunning() }, 200*time.Millisecond, 10*time.Millisecond)
+}
+
+func TestListenOnSameProto(t *testing.T) {
+ t.Run("with reuseport", func(t *testing.T) {
+ testListenOnSameProto(t, true)
+ })
+
+ t.Run("without reuseport", func(t *testing.T) {
+ testListenOnSameProto(t, false)
+ })
+}
+
+func testListenOnSameProto(t *testing.T, enableReuseport bool) {
+ var opts []Option
+ if !enableReuseport {
+ opts = append(opts, DisableReuseport())
+ }
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, opts...)
+ require.NoError(t, err)
+ defer checkClosed(t, cm)
+ defer cm.Close()
+
+ const alpn = "proto"
+
+ ln1, err := cm.ListenQUIC(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), &tls.Config{NextProtos: []string{alpn}}, nil)
+ require.NoError(t, err)
+ defer func() { _ = ln1.Close() }()
+
+ addr := ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port))
+ _, err = cm.ListenQUIC(addr, &tls.Config{NextProtos: []string{alpn}}, nil)
+ require.EqualError(t, err, "already listening for protocol "+alpn)
+
+ // listening on a different address works
+ ln2, err := cm.ListenQUIC(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), &tls.Config{NextProtos: []string{alpn}}, nil)
+ require.NoError(t, err)
+ defer func() { _ = ln2.Close() }()
+}
+
+// The conn passed to quic-go should be a conn that quic-go can be
+// type-asserted to a UDPConn. That way, it can use all kinds of optimizations.
+func TestConnectionPassedToQUICForListening(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("skipping on windows. Windows doesn't support these optimizations")
+ }
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, DisableReuseport())
+ require.NoError(t, err)
+ defer cm.Close()
+
+ raddr := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")
+
+ naddr, _, err := FromQuicMultiaddr(raddr)
+ require.NoError(t, err)
+ netw, _, err := manet.DialArgs(raddr)
+ require.NoError(t, err)
+
+ _, err = cm.ListenQUIC(raddr, &tls.Config{NextProtos: []string{"proto"}}, nil)
+ require.NoError(t, err)
+ quicTr, err := cm.transportForListen(netw, naddr)
+ require.NoError(t, err)
+ defer quicTr.Close()
+ if _, ok := quicTr.(*singleOwnerTransport).Transport.(*wrappedQUICTransport).Conn.(quic.OOBCapablePacketConn); !ok {
+ t.Fatal("connection passed to quic-go cannot be type asserted to a *net.UDPConn")
+ }
+}
+
+func TestAcceptErrorGetCleanedUp(t *testing.T) {
+ raddr := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1")
+
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, DisableReuseport())
+ require.NoError(t, err)
+ defer cm.Close()
+
+ originalNumberOfGoroutines := runtime.NumGoroutine()
+ t.Log("num goroutines:", originalNumberOfGoroutines)
+
+ // This spawns a background goroutine for the listener
+ l, err := cm.ListenQUIC(raddr, &tls.Config{NextProtos: []string{"proto"}}, nil)
+ require.NoError(t, err)
+
+ // We spawned a goroutine for the listener
+ require.Greater(t, runtime.NumGoroutine(), originalNumberOfGoroutines)
+ l.Close()
+
+ // Now make sure we have less goroutines than before
+ // Manually doing the same as require.Eventually, except avoiding adding a goroutine
+ goRoutinesCleanedUp := false
+ for i := 0; i < 50; i++ {
+ t.Log("num goroutines:", runtime.NumGoroutine())
+ if runtime.NumGoroutine() <= originalNumberOfGoroutines {
+ goRoutinesCleanedUp = true
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ require.True(t, goRoutinesCleanedUp, "goroutines were not cleaned up")
+}
+
+// The connection passed to quic-go needs to be type-assertable to a net.UDPConn,
+// in order to enable features like batch processing and ECN.
+func TestConnectionPassedToQUICForDialing(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("skipping on windows. Windows doesn't support these optimizations")
+ }
+ for _, reuse := range []bool{true, false} {
+ t.Run(fmt.Sprintf("reuseport: %t", reuse), func(t *testing.T) {
+ var cm *ConnManager
+ var err error
+ if reuse {
+ cm, err = NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ } else {
+ cm, err = NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, DisableReuseport())
+ }
+ require.NoError(t, err)
+ defer func() { _ = cm.Close() }()
+
+ raddr := ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1")
+
+ naddr, _, err := FromQuicMultiaddr(raddr)
+ require.NoError(t, err)
+ netw, _, err := manet.DialArgs(raddr)
+ require.NoError(t, err)
+
+ quicTr, err := cm.TransportForDial(netw, naddr)
+
+ require.NoError(t, err, "dial error")
+ defer func() { _ = quicTr.Close() }()
+ if reuse {
+ if _, ok := quicTr.(*refcountedTransport).QUICTransport.(*wrappedQUICTransport).Conn.(quic.OOBCapablePacketConn); !ok {
+ t.Fatal("connection passed to quic-go cannot be type asserted to a *net.UDPConn")
+ }
+ } else {
+ if _, ok := quicTr.(*singleOwnerTransport).Transport.(*wrappedQUICTransport).Conn.(quic.OOBCapablePacketConn); !ok {
+ t.Fatal("connection passed to quic-go cannot be type asserted to a *net.UDPConn")
+ }
+ }
+ })
+ }
+}
+
+func getTLSConfForProto(t *testing.T, alpn string) (peer.ID, *tls.Config) {
+ t.Helper()
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ // We use the libp2p TLS certificate here, just because it's convenient.
+ identity, err := libp2ptls.NewIdentity(priv)
+ require.NoError(t, err)
+ var tlsConf tls.Config
+ tlsConf.NextProtos = []string{alpn}
+ tlsConf.GetConfigForClient = func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
+ c, _ := identity.ConfigForPeer("")
+ c.NextProtos = tlsConf.NextProtos
+ return c, nil
+ }
+ return id, &tlsConf
+}
+
+func connectWithProtocol(t *testing.T, addr net.Addr, alpn string) (peer.ID, error) {
+ t.Helper()
+ clientKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ clientIdentity, err := libp2ptls.NewIdentity(clientKey)
+ require.NoError(t, err)
+ tlsConf, peerChan := clientIdentity.ConfigForPeer("")
+ cconn, err := net.ListenUDP("udp4", nil)
+ tlsConf.NextProtos = []string{alpn}
+ require.NoError(t, err)
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ c, err := quic.Dial(ctx, cconn, addr, tlsConf, nil)
+ cancel()
+ if err != nil {
+ return "", err
+ }
+ defer c.CloseWithError(0, "")
+ require.Equal(t, alpn, c.ConnectionState().TLS.NegotiatedProtocol)
+ serverID, err := peer.IDFromPublicKey(<-peerChan)
+ require.NoError(t, err)
+ return serverID, nil
+}
+
+func TestListener(t *testing.T) {
+ t.Run("with reuseport", func(t *testing.T) {
+ testListener(t, true)
+ })
+
+ t.Run("without reuseport", func(t *testing.T) {
+ testListener(t, false)
+ })
+}
+
+func testListener(t *testing.T, enableReuseport bool) {
+ var opts []Option
+ if !enableReuseport {
+ opts = append(opts, DisableReuseport())
+ }
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, opts...)
+ require.NoError(t, err)
+
+ id1, tlsConf1 := getTLSConfForProto(t, "proto1")
+ ln1, err := cm.ListenQUIC(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), tlsConf1, nil)
+ require.NoError(t, err)
+
+ id2, tlsConf2 := getTLSConfForProto(t, "proto2")
+ ln2, err := cm.ListenQUIC(
+ ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port)),
+ tlsConf2,
+ nil,
+ )
+ require.NoError(t, err)
+ require.Equal(t, ln1.Addr(), ln2.Addr())
+
+ // Test that the right certificate is served.
+ id, err := connectWithProtocol(t, ln1.Addr(), "proto1")
+ require.NoError(t, err)
+ require.Equal(t, id1, id)
+ id, err = connectWithProtocol(t, ln1.Addr(), "proto2")
+ require.NoError(t, err)
+ require.Equal(t, id2, id)
+ // No such protocol registered.
+ _, err = connectWithProtocol(t, ln1.Addr(), "proto3")
+ require.Error(t, err)
+
+ // Now close the first listener to test that it's properly deregistered.
+ require.NoError(t, ln1.Close())
+ _, err = connectWithProtocol(t, ln1.Addr(), "proto1")
+ require.Error(t, err)
+ // connecting to the other listener should still be possible
+ id, err = connectWithProtocol(t, ln1.Addr(), "proto2")
+ require.NoError(t, err)
+ require.Equal(t, id2, id)
+
+ ln2.Close()
+ cm.Close()
+
+ checkClosed(t, cm)
+}
+
+func TestExternalTransport(t *testing.T) {
+ conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero})
+ require.NoError(t, err)
+ defer conn.Close()
+ port := conn.LocalAddr().(*net.UDPAddr).Port
+ tr := &quic.Transport{Conn: conn}
+ defer tr.Close()
+
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ doneWithTr, err := cm.LendTransport("udp4", &wrappedQUICTransport{tr}, conn)
+ require.NoError(t, err)
+
+ // make sure this transport is used when listening on the same port
+ ln, err := cm.ListenQUICAndAssociate(
+ "quic",
+ ma.StringCast(fmt.Sprintf("/ip4/0.0.0.0/udp/%d", port)),
+ &tls.Config{NextProtos: []string{"libp2p"}},
+ func(*quic.Conn, uint64) bool { return false },
+ )
+ require.NoError(t, err)
+ defer ln.Close()
+ require.Equal(t, port, ln.Addr().(*net.UDPAddr).Port)
+
+ // make sure this transport is used when dialing out
+ udpLn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)})
+ require.NoError(t, err)
+ defer udpLn.Close()
+ addrChan := make(chan net.Addr, 1)
+ go func() {
+ _, addr, _ := udpLn.ReadFrom(make([]byte, 2000))
+ addrChan <- addr
+ }()
+ ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond)
+ defer cancel()
+ _, err = cm.DialQUIC(
+ ctx,
+ ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", udpLn.LocalAddr().(*net.UDPAddr).Port)),
+ &tls.Config{NextProtos: []string{"libp2p"}},
+ func(*quic.Conn, uint64) bool { return false },
+ )
+ require.ErrorIs(t, err, context.DeadlineExceeded)
+
+ select {
+ case addr := <-addrChan:
+ require.Equal(t, port, addr.(*net.UDPAddr).Port)
+ case <-time.After(time.Second):
+ t.Fatal("timeout")
+ }
+
+ cm.Close()
+ select {
+ case <-doneWithTr:
+ default:
+ t.Fatal("doneWithTr not closed")
+ }
+}
+
+func TestAssociate(t *testing.T) {
+ testAssociate := func(lnAddr1, lnAddr2 ma.Multiaddr, dialAddr *net.UDPAddr) {
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ defer cm.Close()
+
+ lp2pTLS := &tls.Config{NextProtos: []string{"libp2p"}}
+ assoc1 := "test-1"
+ ln1, err := cm.ListenQUICAndAssociate(assoc1, lnAddr1, lp2pTLS, nil)
+ require.NoError(t, err)
+ defer ln1.Close()
+ addrs := ln1.Multiaddrs()
+ require.Len(t, addrs, 1)
+
+ addr := addrs[0]
+ assoc2 := "test-2"
+ h3TLS := &tls.Config{NextProtos: []string{"h3"}}
+ ln2, err := cm.ListenQUICAndAssociate(assoc2, addr, h3TLS, nil)
+ require.NoError(t, err)
+ defer ln2.Close()
+
+ tr1, err := cm.TransportWithAssociationForDial(assoc1, "udp4", dialAddr)
+ require.NoError(t, err)
+ defer tr1.Close()
+ require.Equal(t, tr1.LocalAddr().String(), ln1.Addr().String())
+
+ tr2, err := cm.TransportWithAssociationForDial(assoc2, "udp4", dialAddr)
+ require.NoError(t, err)
+ defer tr2.Close()
+ require.Equal(t, tr2.LocalAddr().String(), ln2.Addr().String())
+
+ ln3, err := cm.ListenQUICAndAssociate(assoc1, lnAddr2, lp2pTLS, nil)
+ require.NoError(t, err)
+ defer ln3.Close()
+
+ // an unused association should also return the same transport
+ // association is only a preference for a specific transport, not an exclusion criteria
+ tr3, err := cm.TransportWithAssociationForDial("unused", "udp4", dialAddr)
+ require.NoError(t, err)
+ defer tr3.Close()
+ require.Contains(t, []string{ln2.Addr().String(), ln3.Addr().String()}, tr3.LocalAddr().String())
+ }
+
+ t.Run("MultipleUnspecifiedListeners", func(_ *testing.T) {
+ testAssociate(ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1"),
+ ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1"),
+ &net.UDPAddr{IP: net.IPv4(1, 1, 1, 1), Port: 1})
+ })
+ t.Run("MultipleSpecificListeners", func(_ *testing.T) {
+ testAssociate(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
+ ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
+ &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1},
+ )
+ })
+}
+
+func TestConnContext(t *testing.T) {
+ for _, reuse := range []bool{true, false} {
+ t.Run(fmt.Sprintf("reuseport:%t_error", reuse), func(t *testing.T) {
+ opts := []Option{
+ ConnContext(func(ctx context.Context, _ *quic.ClientInfo) (context.Context, error) {
+ return ctx, errors.New("test error")
+ })}
+ if !reuse {
+ opts = append(opts, DisableReuseport())
+ }
+ cm, err := NewConnManager(
+ quic.StatelessResetKey{},
+ quic.TokenGeneratorKey{},
+ opts...,
+ )
+ require.NoError(t, err)
+ defer func() { _ = cm.Close() }()
+
+ proto1 := "proto1"
+ _, proto1TLS := getTLSConfForProto(t, proto1)
+ ln1, err := cm.ListenQUIC(
+ ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
+ proto1TLS,
+ nil,
+ )
+ require.NoError(t, err)
+ defer ln1.Close()
+ proto2 := "proto2"
+ _, proto2TLS := getTLSConfForProto(t, proto2)
+ ln2, err := cm.ListenQUIC(
+ ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%d/quic-v1", ln1.Addr().(*net.UDPAddr).Port)),
+ proto2TLS,
+ nil,
+ )
+ require.NoError(t, err)
+ defer ln2.Close()
+
+ _, err = connectWithProtocol(t, ln1.Addr(), proto1)
+ require.ErrorContains(t, err, "CONNECTION_REFUSED")
+
+ _, err = connectWithProtocol(t, ln1.Addr(), proto2)
+ require.ErrorContains(t, err, "CONNECTION_REFUSED")
+ })
+ t.Run(fmt.Sprintf("reuseport:%t_success", reuse), func(t *testing.T) {
+ type ctxKey struct{}
+ opts := []Option{
+ ConnContext(func(ctx context.Context, _ *quic.ClientInfo) (context.Context, error) {
+ return context.WithValue(ctx, ctxKey{}, "success"), nil
+ })}
+ if !reuse {
+ opts = append(opts, DisableReuseport())
+ }
+ cm, err := NewConnManager(
+ quic.StatelessResetKey{},
+ quic.TokenGeneratorKey{},
+ opts...,
+ )
+ require.NoError(t, err)
+ defer func() { _ = cm.Close() }()
+
+ proto1 := "proto1"
+ _, proto1TLS := getTLSConfForProto(t, proto1)
+ ln1, err := cm.ListenQUIC(
+ ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
+ proto1TLS,
+ nil,
+ )
+ require.NoError(t, err)
+ defer ln1.Close()
+
+ clientKey, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ clientIdentity, err := libp2ptls.NewIdentity(clientKey)
+ require.NoError(t, err)
+ tlsConf, peerChan := clientIdentity.ConfigForPeer("")
+ cconn, err := net.ListenUDP("udp4", nil)
+ tlsConf.NextProtos = []string{proto1}
+ require.NoError(t, err)
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ conn, err := quic.Dial(ctx, cconn, ln1.Addr(), tlsConf, nil)
+ cancel()
+ require.NoError(t, err)
+ defer conn.CloseWithError(0, "")
+
+ require.Equal(t, proto1, conn.ConnectionState().TLS.NegotiatedProtocol)
+ _, err = peer.IDFromPublicKey(<-peerChan)
+ require.NoError(t, err)
+
+ acceptCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ c, err := ln1.Accept(acceptCtx)
+ cancel()
+ require.NoError(t, err)
+ defer c.CloseWithError(0, "")
+
+ require.Equal(t, "success", c.Context().Value(ctxKey{}))
+ })
+ }
+}
+
+func TestAssociationCleanup(t *testing.T) {
+ cm, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ defer cm.Close()
+
+ // Create 3 listeners with 3 different associations
+ lp2pTLS := &tls.Config{NextProtos: []string{"libp2p"}}
+ assoc1 := "test-association-1"
+ assoc2 := "test-association-2"
+ assoc3 := "test-association-3"
+
+ ln1, err := cm.ListenQUICAndAssociate(assoc1, ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), lp2pTLS, nil)
+ require.NoError(t, err)
+ defer ln1.Close()
+
+ addr := ln1.Multiaddrs()[0]
+ port, err := addr.ValueForProtocol(ma.P_UDP)
+ require.NoError(t, err)
+
+ h3TLS := &tls.Config{NextProtos: []string{"h3"}}
+ ln2, err := cm.ListenQUICAndAssociate(assoc2, ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/%s/quic-v1", port)), h3TLS, nil)
+ require.NoError(t, err)
+ defer ln2.Close()
+
+ ln3, err := cm.ListenQUICAndAssociate(assoc3, ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), lp2pTLS, nil)
+ require.NoError(t, err)
+ defer ln3.Close()
+
+ // Get the listen addresses for verification
+ addr1 := ln1.Addr().String()
+ addr2 := ln2.Addr().String()
+ addr3 := ln3.Addr().String()
+ require.Equal(t, addr1, addr2)
+
+ // Test that dialing with assoc1 uses the first listener's address
+ dialAddr := &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1234}
+
+ numTries := 100
+
+ for i := 0; i < numTries; i++ {
+ tr, err := cm.TransportWithAssociationForDial(assoc1, "udp4", dialAddr)
+ require.NoError(t, err)
+ require.Equal(t, addr1, tr.LocalAddr().String(), "assoc1 should use addr1")
+ }
+
+ // Close the first listener
+ ln1.Close()
+
+ // Call TransportWithAssociationForDial 10 times with assoc1 and check if we get at least one different address
+ foundDifferentAddr := false
+ for i := 0; i < numTries; i++ {
+ tr, err := cm.TransportWithAssociationForDial(assoc1, "udp4", dialAddr)
+ require.NoError(t, err)
+ actualAddr := tr.LocalAddr().String()
+ if actualAddr != addr1 {
+ foundDifferentAddr = true
+ break
+ }
+ }
+ require.True(t, foundDifferentAddr, "assoc1 should use a different address than addr1 at least once after ln1 is closed")
+
+ for i := 0; i < numTries; i++ {
+ // Test that dialing with assoc2 still uses the second listener's address
+ tr2Still, err := cm.TransportWithAssociationForDial(assoc2, "udp4", dialAddr)
+ require.NoError(t, err)
+ require.Equal(t, addr2, tr2Still.LocalAddr().String(), "assoc2 should still use addr2")
+ }
+
+ // Close the second listener
+ ln2.Close()
+
+ // Call TransportWithAssociationForDial 10 times with assoc2 and check if we get at least one different address
+ foundDifferentAddr2 := false
+ for i := 0; i < numTries; i++ {
+ tr, err := cm.TransportWithAssociationForDial(assoc2, "udp4", dialAddr)
+ require.NoError(t, err)
+ actualAddr := tr.LocalAddr().String()
+ if actualAddr != addr2 {
+ foundDifferentAddr2 = true
+ }
+ }
+ require.True(t, foundDifferentAddr2, "assoc2 should use a different address than addr2 at least once after ln2 is closed")
+
+ for i := 0; i < numTries; i++ {
+ // Test that dialing with assoc3 still uses the third listener's address
+ tr3Still, err := cm.TransportWithAssociationForDial(assoc3, "udp4", dialAddr)
+ require.NoError(t, err)
+ require.Equal(t, addr3, tr3Still.LocalAddr().String(), "assoc3 should still use addr3")
+ }
+}
+
+func TestConnManagerIsolation(t *testing.T) {
+ // Create two separate ConnManager instances
+ cm1, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ defer cm1.Close()
+
+ cm2, err := NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{})
+ require.NoError(t, err)
+ defer cm2.Close()
+
+ // Create listeners in both ConnManagers
+ lp2pTLS := &tls.Config{NextProtos: []string{"libp2p"}}
+ assoc1 := "cm1-association"
+ assoc2 := "cm2-association"
+
+ ln1, err := cm1.ListenQUICAndAssociate(assoc1, ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), lp2pTLS, nil)
+ require.NoError(t, err)
+ defer ln1.Close()
+
+ ln2, err := cm2.ListenQUICAndAssociate(assoc2, ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"), lp2pTLS, nil)
+ require.NoError(t, err)
+ defer ln2.Close()
+
+ // Verify that each ConnManager has its own isolated associations
+
+ // Verify associations are isolated
+ cm1.quicListenersMu.Lock()
+ key1 := ln1.Addr().String()
+ entry1 := cm1.quicListeners[key1]
+ tr1, ok := entry1.ln.transport.(*refcountedTransport)
+ require.True(t, ok)
+ require.True(t, tr1.hasAssociation(assoc1))
+ require.False(t, tr1.hasAssociation(assoc2))
+ cm1.quicListenersMu.Unlock()
+
+ cm2.quicListenersMu.Lock()
+ key2 := ln2.Addr().String()
+ entry2 := cm2.quicListeners[key2]
+ tr2, ok := entry2.ln.transport.(*refcountedTransport)
+ require.True(t, ok)
+ require.True(t, tr2.hasAssociation(assoc2))
+ require.False(t, tr2.hasAssociation(assoc1))
+ cm2.quicListenersMu.Unlock()
+}
diff --git a/p2p/transport/quicreuse/listener.go b/p2p/transport/quicreuse/listener.go
new file mode 100644
index 0000000000..71a896b773
--- /dev/null
+++ b/p2p/transport/quicreuse/listener.go
@@ -0,0 +1,228 @@
+package quicreuse
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+)
+
+type Listener interface {
+ Accept(context.Context) (*quic.Conn, error)
+ Addr() net.Addr
+ Multiaddrs() []ma.Multiaddr
+ io.Closer
+}
+
+type protoConf struct {
+ ln *listener
+ tlsConf *tls.Config
+ allowWindowIncrease func(conn *quic.Conn, delta uint64) bool
+}
+
+type quicListener struct {
+ l QUICListener
+ transport RefCountedQUICTransport
+ running chan struct{}
+ addrs []ma.Multiaddr
+
+ protocolsMu sync.Mutex
+ protocols map[string]protoConf
+}
+
+func newQuicListener(tr RefCountedQUICTransport, quicConfig *quic.Config) (*quicListener, error) {
+ localMultiaddrs := make([]ma.Multiaddr, 0, 2)
+ a, err := ToQuicMultiaddr(tr.LocalAddr(), quic.Version1)
+ if err != nil {
+ return nil, err
+ }
+ localMultiaddrs = append(localMultiaddrs, a)
+ cl := &quicListener{
+ protocols: map[string]protoConf{},
+ running: make(chan struct{}),
+ transport: tr,
+ addrs: localMultiaddrs,
+ }
+ tlsConf := &tls.Config{
+ SessionTicketsDisabled: true, // This is set for the config for client, but we set it here as well: https://github.com/quic-go/quic-go/issues/4029
+ GetConfigForClient: func(info *tls.ClientHelloInfo) (*tls.Config, error) {
+ cl.protocolsMu.Lock()
+ defer cl.protocolsMu.Unlock()
+ for _, proto := range info.SupportedProtos {
+ if entry, ok := cl.protocols[proto]; ok {
+ conf := entry.tlsConf
+ if conf.GetConfigForClient != nil {
+ return conf.GetConfigForClient(info)
+ }
+ return conf, nil
+ }
+ }
+ return nil, fmt.Errorf("no supported protocol found. offered: %+v", info.SupportedProtos)
+ },
+ }
+ quicConf := quicConfig.Clone()
+ quicConf.AllowConnectionWindowIncrease = cl.allowWindowIncrease
+ ln, err := tr.Listen(tlsConf, quicConf)
+ if err != nil {
+ return nil, err
+ }
+ cl.l = ln
+ go cl.Run() // This go routine shuts down once the underlying quic.Listener is closed (or returns an error).
+ return cl, nil
+}
+
+func (l *quicListener) allowWindowIncrease(conn *quic.Conn, delta uint64) bool {
+ l.protocolsMu.Lock()
+ defer l.protocolsMu.Unlock()
+
+ conf, ok := l.protocols[conn.ConnectionState().TLS.NegotiatedProtocol]
+ if !ok {
+ return false
+ }
+ return conf.allowWindowIncrease(conn, delta)
+}
+
+func (l *quicListener) Add(association any, tlsConf *tls.Config, allowWindowIncrease func(conn *quic.Conn, delta uint64) bool, onRemove func()) (*listener, error) {
+ l.protocolsMu.Lock()
+ defer l.protocolsMu.Unlock()
+
+ if len(tlsConf.NextProtos) == 0 {
+ return nil, errors.New("no ALPN found in tls.Config")
+ }
+
+ for _, proto := range tlsConf.NextProtos {
+ if _, ok := l.protocols[proto]; ok {
+ return nil, fmt.Errorf("already listening for protocol %s", proto)
+ }
+ }
+
+ ln := &listener{
+ queue: make(chan *quic.Conn, queueLen),
+ acceptLoopRunning: l.running,
+ addr: l.l.Addr(),
+ addrs: l.addrs,
+ }
+ if association != nil {
+ if tr, ok := l.transport.(*refcountedTransport); ok {
+ tr.associateForListener(association, ln)
+ }
+ }
+
+ ln.remove = func() {
+ if association != nil {
+ if tr, ok := l.transport.(*refcountedTransport); ok {
+ tr.RemoveAssociationsForListener(ln)
+ }
+ }
+ l.protocolsMu.Lock()
+ for _, proto := range tlsConf.NextProtos {
+ delete(l.protocols, proto)
+ }
+ l.protocolsMu.Unlock()
+ onRemove()
+ }
+
+ for _, proto := range tlsConf.NextProtos {
+ l.protocols[proto] = protoConf{
+ ln: ln,
+ tlsConf: tlsConf,
+ allowWindowIncrease: allowWindowIncrease,
+ }
+ }
+ return ln, nil
+}
+
+func (l *quicListener) Run() error {
+ defer close(l.running)
+ defer l.transport.DecreaseCount()
+ for {
+ conn, err := l.l.Accept(context.Background())
+ if err != nil {
+ if errors.Is(err, quic.ErrServerClosed) || strings.Contains(err.Error(), "use of closed network connection") {
+ return transport.ErrListenerClosed
+ }
+ return err
+ }
+ proto := conn.ConnectionState().TLS.NegotiatedProtocol
+
+ l.protocolsMu.Lock()
+ ln, ok := l.protocols[proto]
+ if !ok {
+ l.protocolsMu.Unlock()
+ return fmt.Errorf("negotiated unknown protocol: %s", proto)
+ }
+ ln.ln.add(conn)
+ l.protocolsMu.Unlock()
+ }
+}
+
+func (l *quicListener) Close() error {
+ err := l.l.Close()
+ <-l.running // wait for Run to return
+ return err
+}
+
+const queueLen = 16
+
+// A listener for a single ALPN protocol (set).
+type listener struct {
+ queue chan *quic.Conn
+ acceptLoopRunning chan struct{}
+ addr net.Addr
+ addrs []ma.Multiaddr
+ remove func()
+ closeOnce sync.Once
+}
+
+var _ Listener = &listener{}
+
+func (l *listener) add(c *quic.Conn) {
+ select {
+ case l.queue <- c:
+ default:
+ c.CloseWithError(1, "queue full")
+ }
+}
+
+func (l *listener) Accept(ctx context.Context) (*quic.Conn, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-l.acceptLoopRunning:
+ return nil, transport.ErrListenerClosed
+ case c, ok := <-l.queue:
+ if !ok {
+ return nil, transport.ErrListenerClosed
+ }
+ return c, nil
+ }
+}
+
+func (l *listener) Addr() net.Addr {
+ return l.addr
+}
+
+func (l *listener) Multiaddrs() []ma.Multiaddr {
+ return l.addrs
+}
+
+func (l *listener) Close() error {
+ l.closeOnce.Do(func() {
+ l.remove()
+ close(l.queue)
+ // drain the queue
+ for conn := range l.queue {
+ conn.CloseWithError(quic.ApplicationErrorCode(network.ConnShutdown), "closing")
+ }
+ })
+ return nil
+}
diff --git a/p2p/transport/quicreuse/nonquic_packetconn.go b/p2p/transport/quicreuse/nonquic_packetconn.go
new file mode 100644
index 0000000000..3fc5ad7cc5
--- /dev/null
+++ b/p2p/transport/quicreuse/nonquic_packetconn.go
@@ -0,0 +1,72 @@
+package quicreuse
+
+import (
+ "context"
+ "net"
+ "time"
+)
+
+// nonQUICPacketConn is a net.PacketConn that can be used to read and write
+// non-QUIC packets on a quic.Transport. This lets us reuse this UDP port for
+// other transports like WebRTC.
+type nonQUICPacketConn struct {
+ owningTransport RefCountedQUICTransport
+ tr QUICTransport
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ readCtx context.Context
+ readCancel context.CancelFunc
+}
+
+// Close implements net.PacketConn.
+func (n *nonQUICPacketConn) Close() error {
+ n.ctxCancel()
+
+ // Don't actually close the underlying transport since someone else might be using it.
+ // reuse has it's own gc to close unused transports.
+ n.owningTransport.DecreaseCount()
+ return nil
+}
+
+// LocalAddr implements net.PacketConn.
+func (n *nonQUICPacketConn) LocalAddr() net.Addr {
+ return n.owningTransport.LocalAddr()
+}
+
+// ReadFrom implements net.PacketConn.
+func (n *nonQUICPacketConn) ReadFrom(p []byte) (int, net.Addr, error) {
+ ctx := n.readCtx
+ if ctx == nil {
+ ctx = n.ctx
+ }
+ return n.tr.ReadNonQUICPacket(ctx, p)
+}
+
+// SetDeadline implements net.PacketConn.
+func (n *nonQUICPacketConn) SetDeadline(t time.Time) error {
+ // Only used for reads.
+ return n.SetReadDeadline(t)
+}
+
+// SetReadDeadline implements net.PacketConn.
+func (n *nonQUICPacketConn) SetReadDeadline(t time.Time) error {
+ if t.IsZero() && n.readCtx != nil {
+ n.readCancel()
+ n.readCtx = nil
+ }
+ n.readCtx, n.readCancel = context.WithDeadline(n.ctx, t)
+ return nil
+}
+
+// SetWriteDeadline implements net.PacketConn.
+func (n *nonQUICPacketConn) SetWriteDeadline(_ time.Time) error {
+ // Unused. quic-go doesn't support deadlines for writes.
+ return nil
+}
+
+// WriteTo implements net.PacketConn.
+func (n *nonQUICPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) {
+ return n.tr.WriteTo(p, addr)
+}
+
+var _ net.PacketConn = &nonQUICPacketConn{}
diff --git a/p2p/transport/quicreuse/options.go b/p2p/transport/quicreuse/options.go
new file mode 100644
index 0000000000..d6c146565c
--- /dev/null
+++ b/p2p/transport/quicreuse/options.go
@@ -0,0 +1,75 @@
+package quicreuse
+
+import (
+ "context"
+ "errors"
+ "net"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/quic-go/quic-go"
+)
+
+type Option func(*ConnManager) error
+
+type listenUDP func(network string, laddr *net.UDPAddr) (net.PacketConn, error)
+
+func OverrideListenUDP(f listenUDP) Option {
+ return func(m *ConnManager) error {
+ m.listenUDP = f
+ return nil
+ }
+}
+
+func OverrideSourceIPSelector(f func() (SourceIPSelector, error)) Option {
+ return func(m *ConnManager) error {
+ m.sourceIPSelectorFn = f
+ return nil
+ }
+}
+
+func WithQlogTracerDir(dir string) Option {
+ return func(m *ConnManager) error {
+ m.qlogTracerDir = dir
+ return nil
+ }
+}
+
+func DisableReuseport() Option {
+ return func(m *ConnManager) error {
+ m.enableReuseport = false
+ return nil
+ }
+}
+
+// ConnContext sets the context for all connections accepted by listeners. This doesn't affect the
+// context for dialed connections. To reject a connection, return a non nil error.
+func ConnContext(f func(ctx context.Context, clientInfo *quic.ClientInfo) (context.Context, error)) Option {
+ return func(m *ConnManager) error {
+ if m.connContext != nil {
+ return errors.New("cannot set ConnContext more than once")
+ }
+ m.connContext = f
+ return nil
+ }
+}
+
+// VerifySourceAddress returns whether to verify the source address for incoming connection requests.
+// For more details see: `quic.Transport.VerifySourceAddress`
+func VerifySourceAddress(f func(addr net.Addr) bool) Option {
+ return func(m *ConnManager) error {
+ m.verifySourceAddress = f
+ return nil
+ }
+}
+
+// EnableMetrics enables Prometheus metrics collection. If reg is nil,
+// prometheus.DefaultRegisterer will be used as the registerer.
+func EnableMetrics(reg prometheus.Registerer) Option {
+ return func(m *ConnManager) error {
+ m.enableMetrics = true
+ if reg != nil {
+ m.registerer = reg
+ }
+ return nil
+ }
+}
diff --git a/p2p/transport/quicreuse/quic_multiaddr.go b/p2p/transport/quicreuse/quic_multiaddr.go
new file mode 100644
index 0000000000..af16547357
--- /dev/null
+++ b/p2p/transport/quicreuse/quic_multiaddr.go
@@ -0,0 +1,58 @@
+package quicreuse
+
+import (
+ "errors"
+ "net"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/quic-go/quic-go"
+)
+
+var (
+ quicV1MA = ma.StringCast("/quic-v1")
+)
+
+func ToQuicMultiaddr(na net.Addr, version quic.Version) (ma.Multiaddr, error) {
+ udpMA, err := manet.FromNetAddr(na)
+ if err != nil {
+ return nil, err
+ }
+ switch version {
+ case quic.Version1:
+ return udpMA.Encapsulate(quicV1MA), nil
+ default:
+ return nil, errors.New("unknown QUIC version")
+ }
+}
+
+func FromQuicMultiaddr(addr ma.Multiaddr) (*net.UDPAddr, quic.Version, error) {
+ var version quic.Version
+ partsBeforeQUIC := make([]ma.Component, 0, 2)
+loop:
+ for _, c := range addr {
+ switch c.Protocol().Code {
+ case ma.P_QUIC_V1:
+ version = quic.Version1
+ break loop
+ default:
+ partsBeforeQUIC = append(partsBeforeQUIC, c)
+ }
+ }
+ if len(partsBeforeQUIC) == 0 {
+ return nil, version, errors.New("no addr before QUIC component")
+ }
+ if version == 0 {
+ // Not found
+ return nil, version, errors.New("unknown QUIC version")
+ }
+ netAddr, err := manet.ToNetAddr(partsBeforeQUIC)
+ if err != nil {
+ return nil, version, err
+ }
+ udpAddr, ok := netAddr.(*net.UDPAddr)
+ if !ok {
+ return nil, 0, errors.New("not a *net.UDPAddr")
+ }
+ return udpAddr, version, nil
+}
diff --git a/p2p/transport/quicreuse/quic_multiaddr_test.go b/p2p/transport/quicreuse/quic_multiaddr_test.go
new file mode 100644
index 0000000000..a6242e2674
--- /dev/null
+++ b/p2p/transport/quicreuse/quic_multiaddr_test.go
@@ -0,0 +1,34 @@
+package quicreuse
+
+import (
+ "net"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+ "github.com/stretchr/testify/require"
+)
+
+func TestConvertToQuicMultiaddr(t *testing.T) {
+ addr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 42), Port: 1337}
+ maddr, err := ToQuicMultiaddr(addr, quic.Version1)
+ require.NoError(t, err)
+ require.Equal(t, "/ip4/192.168.0.42/udp/1337/quic-v1", maddr.String())
+}
+
+func TestConvertToQuicV1Multiaddr(t *testing.T) {
+ addr := &net.UDPAddr{IP: net.IPv4(192, 168, 0, 42), Port: 1337}
+ maddr, err := ToQuicMultiaddr(addr, quic.Version1)
+ require.NoError(t, err)
+ require.Equal(t, "/ip4/192.168.0.42/udp/1337/quic-v1", maddr.String())
+}
+
+func TestConvertFromQuicV1Multiaddr(t *testing.T) {
+ maddr, err := ma.NewMultiaddr("/ip4/192.168.0.42/udp/1337/quic-v1")
+ require.NoError(t, err)
+ udpAddr, v, err := FromQuicMultiaddr(maddr)
+ require.NoError(t, err)
+ require.Equal(t, net.IPv4(192, 168, 0, 42), udpAddr.IP)
+ require.Equal(t, 1337, udpAddr.Port)
+ require.Equal(t, quic.Version1, v)
+}
diff --git a/p2p/transport/quicreuse/reuse.go b/p2p/transport/quicreuse/reuse.go
new file mode 100644
index 0000000000..2176d24e7b
--- /dev/null
+++ b/p2p/transport/quicreuse/reuse.go
@@ -0,0 +1,483 @@
+package quicreuse
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/google/gopacket/routing"
+ "github.com/quic-go/quic-go"
+)
+
+type RefCountedQUICTransport interface {
+ LocalAddr() net.Addr
+
+ // Used to send packets directly around QUIC. Useful for hole punching.
+ WriteTo([]byte, net.Addr) (int, error)
+
+ Close() error
+
+ // count transport reference
+ DecreaseCount()
+ IncreaseCount()
+
+ Dial(ctx context.Context, addr net.Addr, tlsConf *tls.Config, conf *quic.Config) (*quic.Conn, error)
+ Listen(tlsConf *tls.Config, conf *quic.Config) (QUICListener, error)
+}
+
+type singleOwnerTransport struct {
+ Transport QUICTransport
+
+ // Used to write packets directly around QUIC.
+ packetConn net.PacketConn
+}
+
+var _ QUICTransport = &singleOwnerTransport{}
+var _ RefCountedQUICTransport = (*singleOwnerTransport)(nil)
+
+func (c *singleOwnerTransport) IncreaseCount() {}
+func (c *singleOwnerTransport) DecreaseCount() { c.Transport.Close() }
+func (c *singleOwnerTransport) LocalAddr() net.Addr {
+ return c.packetConn.LocalAddr()
+}
+
+func (c *singleOwnerTransport) Dial(ctx context.Context, addr net.Addr, tlsConf *tls.Config, conf *quic.Config) (*quic.Conn, error) {
+ return c.Transport.Dial(ctx, addr, tlsConf, conf)
+}
+
+func (c *singleOwnerTransport) ReadNonQUICPacket(ctx context.Context, b []byte) (int, net.Addr, error) {
+ return c.Transport.ReadNonQUICPacket(ctx, b)
+}
+
+func (c *singleOwnerTransport) Close() error {
+ return errors.Join(c.Transport.Close(), c.packetConn.Close())
+}
+
+func (c *singleOwnerTransport) WriteTo(b []byte, addr net.Addr) (int, error) {
+ return c.Transport.WriteTo(b, addr)
+}
+
+func (c *singleOwnerTransport) Listen(tlsConf *tls.Config, conf *quic.Config) (QUICListener, error) {
+ return c.Transport.Listen(tlsConf, conf)
+}
+
+// Constant. Defined as variables to simplify testing.
+var (
+ garbageCollectInterval = 30 * time.Second
+ maxUnusedDuration = 10 * time.Second
+)
+
+type refcountedTransport struct {
+ QUICTransport
+
+ // Used to write packets directly around QUIC.
+ packetConn net.PacketConn
+
+ mutex sync.Mutex
+ refCount int
+ unusedSince time.Time
+
+ // Only set for transports we are borrowing.
+ // If set, we will _never_ close the underlying transport. We only close this
+ // channel to signal to the owner that we are done with it.
+ borrowDoneSignal chan struct{}
+
+ // Store associations as association -> set of listener objects
+ associations map[any]map[*listener]struct{}
+}
+
+type connContextFunc = func(context.Context, *quic.ClientInfo) (context.Context, error)
+
+// associateForListener associates an arbitrary value with this transport for a specific listener.
+// This lets us "tag" the refcountedTransport when listening so we can use it
+// later for dialing. The listener parameter allows proper cleanup when the listener closes.
+// Necessary for holepunching and learning about our own observed listening address.
+func (c *refcountedTransport) associateForListener(a any, ln *listener) {
+ if a == nil {
+ return
+ }
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ if c.associations == nil {
+ c.associations = make(map[any]map[*listener]struct{})
+ }
+ if c.associations[a] == nil {
+ c.associations[a] = make(map[*listener]struct{})
+ }
+ c.associations[a][ln] = struct{}{}
+}
+
+// RemoveAssociationsForListener removes ALL associations added by a specific listener
+func (c *refcountedTransport) RemoveAssociationsForListener(ln *listener) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ // Remove this listener from all associations
+ for association, listeners := range c.associations {
+ delete(listeners, ln)
+ // If no listeners remain for this association, remove the association entirely
+ if len(listeners) == 0 {
+ delete(c.associations, association)
+ }
+ }
+}
+
+// hasAssociation returns true if the transport has the given association.
+// If it is a nil association, it will always return true.
+func (c *refcountedTransport) hasAssociation(a any) bool {
+ if a == nil {
+ return true
+ }
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ listeners, ok := c.associations[a]
+ return ok && len(listeners) > 0
+}
+
+func (c *refcountedTransport) IncreaseCount() {
+ c.mutex.Lock()
+ c.refCount++
+ c.unusedSince = time.Time{}
+ c.mutex.Unlock()
+}
+
+func (c *refcountedTransport) Close() error {
+ if c.borrowDoneSignal != nil {
+ close(c.borrowDoneSignal)
+ return nil
+ }
+
+ return errors.Join(c.QUICTransport.Close(), c.packetConn.Close())
+}
+
+func (c *refcountedTransport) WriteTo(b []byte, addr net.Addr) (int, error) {
+ return c.QUICTransport.WriteTo(b, addr)
+}
+
+func (c *refcountedTransport) LocalAddr() net.Addr {
+ return c.packetConn.LocalAddr()
+}
+
+func (c *refcountedTransport) Listen(tlsConf *tls.Config, conf *quic.Config) (QUICListener, error) {
+ return c.QUICTransport.Listen(tlsConf, conf)
+}
+
+func (c *refcountedTransport) DecreaseCount() {
+ c.mutex.Lock()
+ c.refCount--
+ if c.refCount == 0 {
+ c.unusedSince = time.Now()
+ }
+ c.mutex.Unlock()
+}
+
+func (c *refcountedTransport) ShouldGarbageCollect(now time.Time) bool {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ return !c.unusedSince.IsZero() && c.unusedSince.Add(maxUnusedDuration).Before(now)
+}
+
+type reuse struct {
+ mutex sync.Mutex
+
+ closeChan chan struct{}
+ gcStopChan chan struct{}
+
+ listenUDP listenUDP
+
+ sourceIPSelectorFn func() (SourceIPSelector, error)
+
+ routes SourceIPSelector
+ unicast map[string] /* IP.String() */ map[int] /* port */ *refcountedTransport
+ // globalListeners contains transports that are listening on 0.0.0.0 / ::
+ globalListeners map[int]*refcountedTransport
+ // globalDialers contains transports that we've dialed out from. These transports are listening on 0.0.0.0 / ::
+ // On Dial, transports are reused from this map if no transport is available in the globalListeners
+ // On Listen, transports are reused from this map if the requested port is 0, and then moved to globalListeners
+ globalDialers map[int]*refcountedTransport
+
+ statelessResetKey *quic.StatelessResetKey
+ tokenGeneratorKey *quic.TokenGeneratorKey
+ connContext connContextFunc
+ verifySourceAddress func(addr net.Addr) bool
+}
+
+func newReuse(srk *quic.StatelessResetKey, tokenKey *quic.TokenGeneratorKey, listenUDP listenUDP, sourceIPSelectorFn func() (SourceIPSelector, error),
+ connContext connContextFunc, verifySourceAddress func(addr net.Addr) bool) *reuse {
+ r := &reuse{
+ unicast: make(map[string]map[int]*refcountedTransport),
+ globalListeners: make(map[int]*refcountedTransport),
+ globalDialers: make(map[int]*refcountedTransport),
+ closeChan: make(chan struct{}),
+ gcStopChan: make(chan struct{}),
+ listenUDP: listenUDP,
+ sourceIPSelectorFn: sourceIPSelectorFn,
+ statelessResetKey: srk,
+ tokenGeneratorKey: tokenKey,
+ connContext: connContext,
+ verifySourceAddress: verifySourceAddress,
+ }
+ go r.gc()
+ return r
+}
+
+func (r *reuse) gc() {
+ defer func() {
+ r.mutex.Lock()
+ for _, tr := range r.globalListeners {
+ tr.Close()
+ }
+ for _, tr := range r.globalDialers {
+ tr.Close()
+ }
+ for _, trs := range r.unicast {
+ for _, tr := range trs {
+ tr.Close()
+ }
+ }
+ r.mutex.Unlock()
+ close(r.gcStopChan)
+ }()
+ ticker := time.NewTicker(garbageCollectInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-r.closeChan:
+ return
+ case <-ticker.C:
+ now := time.Now()
+ r.mutex.Lock()
+ for key, tr := range r.globalListeners {
+ if tr.ShouldGarbageCollect(now) {
+ tr.Close()
+ delete(r.globalListeners, key)
+ }
+ }
+ for key, tr := range r.globalDialers {
+ if tr.ShouldGarbageCollect(now) {
+ tr.Close()
+ delete(r.globalDialers, key)
+ }
+ }
+ for ukey, trs := range r.unicast {
+ for key, tr := range trs {
+ if tr.ShouldGarbageCollect(now) {
+ tr.Close()
+ delete(trs, key)
+ }
+ }
+ if len(trs) == 0 {
+ delete(r.unicast, ukey)
+ // If we've dropped all transports with a unicast binding,
+ // assume our routes may have changed.
+ if len(r.unicast) == 0 {
+ r.routes = nil
+ } else {
+ // Ignore the error, there's nothing we can do about
+ // it.
+ r.routes, _ = r.sourceIPSelectorFn()
+ }
+ }
+ }
+ r.mutex.Unlock()
+ }
+ }
+}
+
+func (r *reuse) TransportWithAssociationForDial(association any, network string, raddr *net.UDPAddr) (*refcountedTransport, error) {
+ var ip *net.IP
+
+ // Only bother looking up the source address if we actually _have_ non 0.0.0.0 listeners.
+ // Otherwise, save some time.
+
+ r.mutex.Lock()
+ router := r.routes
+ r.mutex.Unlock()
+
+ if router != nil {
+ src, err := router.PreferredSourceIPForDestination(raddr)
+ if err == nil && !src.IsUnspecified() {
+ ip = &src
+ }
+ }
+
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ tr, err := r.transportForDialLocked(association, network, ip)
+ if err != nil {
+ return nil, err
+ }
+ tr.IncreaseCount()
+ return tr, nil
+}
+
+func (r *reuse) transportForDialLocked(association any, network string, source *net.IP) (*refcountedTransport, error) {
+ if source != nil {
+ // We already have at least one suitable transport...
+ if trs, ok := r.unicast[source.String()]; ok {
+ // Prefer a transport that has the given association. We want to
+ // reuse the transport the association used for listening.
+ for _, tr := range trs {
+ if tr.hasAssociation(association) {
+ return tr, nil
+ }
+ }
+ // We don't have a transport with the association, use any one
+ for _, tr := range trs {
+ return tr, nil
+ }
+ }
+ }
+
+ // Use a transport listening on 0.0.0.0 (or ::).
+ // Again, prefer a transport that has the given association.
+ for _, tr := range r.globalListeners {
+ if tr.hasAssociation(association) {
+ return tr, nil
+ }
+ }
+ // We don't have a transport with the association, use any one
+ for _, tr := range r.globalListeners {
+ return tr, nil
+ }
+
+ // Use a transport we've previously dialed from
+ for _, tr := range r.globalDialers {
+ return tr, nil
+ }
+
+ // We don't have a transport that we can use for dialing.
+ // Dial a new connection from a random port.
+ var addr *net.UDPAddr
+ switch network {
+ case "udp4":
+ addr = &net.UDPAddr{IP: net.IPv4zero, Port: 0}
+ case "udp6":
+ addr = &net.UDPAddr{IP: net.IPv6zero, Port: 0}
+ }
+ conn, err := r.listenUDP(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ tr := r.newTransport(conn)
+ r.globalDialers[conn.LocalAddr().(*net.UDPAddr).Port] = tr
+ return tr, nil
+}
+
+func (r *reuse) AddTransport(tr *refcountedTransport, laddr *net.UDPAddr) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ if !laddr.IP.IsUnspecified() {
+ return errors.New("adding transport for specific IP not supported")
+ }
+ if _, ok := r.globalDialers[laddr.Port]; ok {
+ return fmt.Errorf("already have global dialer for port %d", laddr.Port)
+ }
+ r.globalDialers[laddr.Port] = tr
+ return nil
+}
+
+func (r *reuse) TransportForListen(network string, laddr *net.UDPAddr) (*refcountedTransport, error) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+
+ // Check if we can reuse a transport we have already dialed out from.
+ // We reuse a transport from globalDialers when the requested port is 0 or the requested
+ // port is already in the globalDialers.
+ // If we are reusing a transport from globalDialers, we move the globalDialers entry to
+ // globalListeners
+ if laddr.IP.IsUnspecified() {
+ var rTr *refcountedTransport
+ var localAddr *net.UDPAddr
+
+ if laddr.Port == 0 {
+ // the requested port is 0, we can reuse any transport
+ for _, tr := range r.globalDialers {
+ rTr = tr
+ localAddr = rTr.LocalAddr().(*net.UDPAddr)
+ delete(r.globalDialers, localAddr.Port)
+ break
+ }
+ } else if _, ok := r.globalDialers[laddr.Port]; ok {
+ rTr = r.globalDialers[laddr.Port]
+ localAddr = rTr.LocalAddr().(*net.UDPAddr)
+ delete(r.globalDialers, localAddr.Port)
+ }
+ // found a match
+ if rTr != nil {
+ rTr.IncreaseCount()
+ r.globalListeners[localAddr.Port] = rTr
+ return rTr, nil
+ }
+ }
+
+ conn, err := r.listenUDP(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ tr := r.newTransport(conn)
+ tr.IncreaseCount()
+
+ localAddr := conn.LocalAddr().(*net.UDPAddr)
+ // Deal with listen on a global address
+ if localAddr.IP.IsUnspecified() {
+ // The kernel already checked that the laddr is not already listen
+ // so we need not check here (when we create ListenUDP).
+ r.globalListeners[localAddr.Port] = tr
+ return tr, nil
+ }
+
+ // Deal with listen on a unicast address
+ if _, ok := r.unicast[localAddr.IP.String()]; !ok {
+ r.unicast[localAddr.IP.String()] = make(map[int]*refcountedTransport)
+ // Assume the system's routes may have changed if we're adding a new listener.
+ // Ignore the error, there's nothing we can do.
+ r.routes, _ = r.sourceIPSelectorFn()
+ }
+
+ // The kernel already checked that the laddr is not already listen
+ // so we need not check here (when we create ListenUDP).
+ r.unicast[localAddr.IP.String()][localAddr.Port] = tr
+ return tr, nil
+}
+
+func (r *reuse) newTransport(conn net.PacketConn) *refcountedTransport {
+ return &refcountedTransport{
+ QUICTransport: &wrappedQUICTransport{
+ Transport: newQUICTransport(
+ conn,
+ r.tokenGeneratorKey,
+ r.statelessResetKey,
+ r.connContext,
+ r.verifySourceAddress,
+ ),
+ },
+ packetConn: conn,
+ }
+}
+
+func (r *reuse) Close() error {
+ close(r.closeChan)
+ <-r.gcStopChan
+ return nil
+}
+
+type SourceIPSelector interface {
+ PreferredSourceIPForDestination(dst *net.UDPAddr) (net.IP, error)
+}
+
+type netrouteSourceIPSelector struct {
+ routes routing.Router
+}
+
+func (s *netrouteSourceIPSelector) PreferredSourceIPForDestination(dst *net.UDPAddr) (net.IP, error) {
+ _, _, src, err := s.routes.Route(dst.IP)
+ return src, err
+}
diff --git a/p2p/transport/quicreuse/reuse_test.go b/p2p/transport/quicreuse/reuse_test.go
new file mode 100644
index 0000000000..82860e88fc
--- /dev/null
+++ b/p2p/transport/quicreuse/reuse_test.go
@@ -0,0 +1,240 @@
+package quicreuse
+
+import (
+ "bytes"
+ "net"
+ "os"
+ "runtime/pprof"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-netroute"
+ "github.com/stretchr/testify/require"
+)
+
+func (c *refcountedTransport) GetCount() int {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ return c.refCount
+}
+
+func closeAllConns(reuse *reuse) {
+ reuse.mutex.Lock()
+ for _, tr := range reuse.globalListeners {
+ for tr.GetCount() > 0 {
+ tr.DecreaseCount()
+ }
+ }
+ for _, tr := range reuse.globalDialers {
+ for tr.GetCount() > 0 {
+ tr.DecreaseCount()
+ }
+ }
+ for _, trs := range reuse.unicast {
+ for _, tr := range trs {
+ for tr.GetCount() > 0 {
+ tr.DecreaseCount()
+ }
+ }
+ }
+ reuse.mutex.Unlock()
+}
+
+func platformHasRoutingTables() bool {
+ _, err := netroute.New()
+ return err == nil
+}
+
+func isGarbageCollectorRunning() bool {
+ var b bytes.Buffer
+ pprof.Lookup("goroutine").WriteTo(&b, 1)
+ return strings.Contains(b.String(), "quicreuse.(*reuse).gc")
+}
+
+func cleanup(t *testing.T, reuse *reuse) {
+ t.Cleanup(func() {
+ closeAllConns(reuse)
+ reuse.Close()
+ require.False(t, isGarbageCollectorRunning(), "reuse gc still running")
+ })
+}
+
+func TestReuseListenOnAllIPv4(t *testing.T) {
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ require.Eventually(t, isGarbageCollectorRunning, 500*time.Millisecond, 50*time.Millisecond, "expected garbage collector to be running")
+ cleanup(t, reuse)
+
+ addr, err := net.ResolveUDPAddr("udp4", "0.0.0.0:0")
+ require.NoError(t, err)
+ conn, err := reuse.TransportForListen("udp4", addr)
+ require.NoError(t, err)
+ require.Equal(t, 1, conn.GetCount())
+}
+
+func TestReuseListenOnAllIPv6(t *testing.T) {
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ require.Eventually(t, isGarbageCollectorRunning, 500*time.Millisecond, 50*time.Millisecond, "expected garbage collector to be running")
+ cleanup(t, reuse)
+
+ addr, err := net.ResolveUDPAddr("udp6", "[::]:1234")
+ require.NoError(t, err)
+ tr, err := reuse.TransportForListen("udp6", addr)
+ require.NoError(t, err)
+ defer tr.Close()
+ require.Equal(t, 1, tr.GetCount())
+}
+
+func TestReuseCreateNewGlobalConnOnDial(t *testing.T) {
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ cleanup(t, reuse)
+
+ addr, err := net.ResolveUDPAddr("udp4", "1.1.1.1:1234")
+ require.NoError(t, err)
+ conn, err := reuse.TransportWithAssociationForDial(nil, "udp4", addr)
+ require.NoError(t, err)
+ require.Equal(t, 1, conn.GetCount())
+ laddr := conn.LocalAddr().(*net.UDPAddr)
+ require.Equal(t, "0.0.0.0", laddr.IP.String())
+ require.NotEqual(t, 0, laddr.Port)
+}
+
+func TestReuseConnectionWhenDialing(t *testing.T) {
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ cleanup(t, reuse)
+
+ addr, err := net.ResolveUDPAddr("udp4", "0.0.0.0:0")
+ require.NoError(t, err)
+ ltr, err := reuse.TransportForListen("udp4", addr)
+ require.NoError(t, err)
+ require.Equal(t, 1, ltr.GetCount())
+ // dial
+ raddr, err := net.ResolveUDPAddr("udp4", "1.1.1.1:1234")
+ require.NoError(t, err)
+ tr, err := reuse.TransportWithAssociationForDial(nil, "udp4", raddr)
+ require.NoError(t, err)
+ require.Equal(t, 2, tr.GetCount())
+}
+
+func TestReuseConnectionWhenListening(t *testing.T) {
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ cleanup(t, reuse)
+
+ raddr, err := net.ResolveUDPAddr("udp4", "1.1.1.1:1234")
+ require.NoError(t, err)
+ tr, err := reuse.TransportWithAssociationForDial(nil, "udp4", raddr)
+ require.NoError(t, err)
+ laddr := &net.UDPAddr{IP: net.IPv4zero, Port: tr.LocalAddr().(*net.UDPAddr).Port}
+ lconn, err := reuse.TransportForListen("udp4", laddr)
+ require.NoError(t, err)
+ require.Equal(t, 2, lconn.GetCount())
+ require.Equal(t, 2, tr.GetCount())
+}
+
+func TestReuseConnectionWhenDialBeforeListen(t *testing.T) {
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ cleanup(t, reuse)
+
+ // dial any address
+ raddr, err := net.ResolveUDPAddr("udp4", "1.1.1.1:1234")
+ require.NoError(t, err)
+ rTr, err := reuse.TransportWithAssociationForDial(nil, "udp4", raddr)
+ require.NoError(t, err)
+
+ // open a listener
+ laddr := &net.UDPAddr{IP: net.IPv4zero, Port: 1234}
+ lTr, err := reuse.TransportForListen("udp4", laddr)
+ require.NoError(t, err)
+
+ // new dials should go via the listener connection
+ raddr, err = net.ResolveUDPAddr("udp4", "1.1.1.1:1235")
+ require.NoError(t, err)
+ tr, err := reuse.TransportWithAssociationForDial(nil, "udp4", raddr)
+ require.NoError(t, err)
+ require.Equal(t, lTr, tr)
+ require.Equal(t, 2, tr.GetCount())
+
+ // a listener on an unspecified port should reuse the dialer
+ laddr2 := &net.UDPAddr{IP: net.IPv4zero, Port: 0}
+ lconn2, err := reuse.TransportForListen("udp4", laddr2)
+ require.NoError(t, err)
+ require.Equal(t, rTr, lconn2)
+ require.Equal(t, 2, lconn2.GetCount())
+}
+
+func TestReuseListenOnSpecificInterface(t *testing.T) {
+ if platformHasRoutingTables() {
+ t.Skip("this test only works on platforms that support routing tables")
+ }
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ cleanup(t, reuse)
+
+ router, err := netroute.New()
+ require.NoError(t, err)
+
+ raddr, err := net.ResolveUDPAddr("udp4", "1.1.1.1:1234")
+ require.NoError(t, err)
+ _, _, ip, err := router.Route(raddr.IP)
+ require.NoError(t, err)
+ // listen
+ addr, err := net.ResolveUDPAddr("udp4", ip.String()+":0")
+ require.NoError(t, err)
+ lconn, err := reuse.TransportForListen("udp4", addr)
+ require.NoError(t, err)
+ require.Equal(t, 1, lconn.GetCount())
+ // dial
+ conn, err := reuse.TransportWithAssociationForDial(nil, "udp4", raddr)
+ require.NoError(t, err)
+ require.Equal(t, 1, conn.GetCount())
+}
+
+func TestReuseGarbageCollect(t *testing.T) {
+ maxUnusedDurationOrig := maxUnusedDuration
+ garbageCollectIntervalOrig := garbageCollectInterval
+ t.Cleanup(func() {
+ maxUnusedDuration = maxUnusedDurationOrig
+ garbageCollectInterval = garbageCollectIntervalOrig
+ })
+ garbageCollectInterval = 50 * time.Millisecond
+ maxUnusedDuration = 100 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ // Increase these timeouts if in CI
+ garbageCollectInterval = 10 * garbageCollectInterval
+ maxUnusedDuration = 10 * maxUnusedDuration
+ }
+
+ reuse := newReuse(nil, nil, defaultListenUDP, defaultSourceIPSelectorFn, nil, nil)
+ cleanup(t, reuse)
+
+ numGlobals := func() int {
+ reuse.mutex.Lock()
+ defer reuse.mutex.Unlock()
+ return len(reuse.globalListeners) + len(reuse.globalDialers)
+ }
+
+ raddr, err := net.ResolveUDPAddr("udp4", "1.2.3.4:1234")
+ require.NoError(t, err)
+ dTr, err := reuse.TransportWithAssociationForDial(nil, "udp4", raddr)
+ require.NoError(t, err)
+ require.Equal(t, 1, dTr.GetCount())
+
+ addr, err := net.ResolveUDPAddr("udp4", "0.0.0.0:1234")
+ require.NoError(t, err)
+ lTr, err := reuse.TransportForListen("udp4", addr)
+ require.NoError(t, err)
+ require.Equal(t, 1, lTr.GetCount())
+
+ closeTime := time.Now()
+ lTr.DecreaseCount()
+ dTr.DecreaseCount()
+
+ for {
+ num := numGlobals()
+ if closeTime.Add(maxUnusedDuration).Before(time.Now()) {
+ break
+ }
+ require.Equal(t, 2, num)
+ time.Sleep(2 * time.Millisecond)
+ }
+ require.Eventually(t, func() bool { return numGlobals() == 0 }, 4*garbageCollectInterval, 10*time.Millisecond)
+}
diff --git a/p2p/transport/quicreuse/tracer.go b/p2p/transport/quicreuse/tracer.go
new file mode 100644
index 0000000000..ce8c2802f2
--- /dev/null
+++ b/p2p/transport/quicreuse/tracer.go
@@ -0,0 +1,94 @@
+package quicreuse
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/klauspost/compress/zstd"
+ golog "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/logging"
+ "github.com/quic-go/quic-go/qlog"
+)
+
+var log = golog.Logger("quic-utils")
+
+// QLOGTracer holds a qlog tracer dir, if qlogging is enabled (enabled using the QLOGDIR environment variable).
+// Otherwise it is an empty string.
+var qlogTracerDir string
+
+func init() {
+ qlogTracerDir = os.Getenv("QLOGDIR")
+}
+
+func qloggerForDir(qlogDir string, p logging.Perspective, ci quic.ConnectionID) *logging.ConnectionTracer {
+ // create the QLOGDIR, if it doesn't exist
+ if err := os.MkdirAll(qlogDir, 0777); err != nil {
+ log.Error("creating the QLOGDIR failed", "err", err)
+ return nil
+ }
+ return qlog.NewConnectionTracer(newQlogger(qlogDir, p, ci), p, ci)
+}
+
+// The qlogger logs qlog events to a temporary file: ..qlog.swp.
+// When it is closed, it compresses the temporary file and saves it as .qlog.zst.
+// It is not possible to compress on the fly, as compression algorithms keep a lot of internal state,
+// which can easily exhaust the host system's memory when running a few hundred QUIC connections in parallel.
+type qlogger struct {
+ f *os.File // QLOGDIR/.log_xxx.qlog.swp
+ filename string // QLOGDIR/log_xxx.qlog.zst
+ *bufio.Writer // buffering the f
+}
+
+func newQlogger(qlogDir string, role logging.Perspective, connID quic.ConnectionID) io.WriteCloser {
+ t := time.Now().UTC().Format("2006-01-02T15-04-05.999999999UTC")
+ r := "server"
+ if role == logging.PerspectiveClient {
+ r = "client"
+ }
+ finalFilename := fmt.Sprintf("%s%clog_%s_%s_%s.qlog.zst", qlogDir, os.PathSeparator, t, r, connID)
+ filename := fmt.Sprintf("%s%c.log_%s_%s_%s.qlog.swp", qlogDir, os.PathSeparator, t, r, connID)
+ f, err := os.Create(filename)
+ if err != nil {
+ log.Error("unable to create qlog file", "filename", filename, "error", err)
+ return nil
+ }
+ return &qlogger{
+ f: f,
+ filename: finalFilename,
+ // The size of a qlog file for a raw file download is ~2/3 of the amount of data transferred.
+ // bufio.NewWriter creates a buffer with a buffer of only 4 kB, leading to a large number of syscalls.
+ Writer: bufio.NewWriterSize(f, 128<<10),
+ }
+}
+
+func (l *qlogger) Close() error {
+ defer os.Remove(l.f.Name())
+ defer l.f.Close()
+ if err := l.Writer.Flush(); err != nil {
+ return err
+ }
+ if _, err := l.f.Seek(0, io.SeekStart); err != nil { // set the read position to the beginning of the file
+ return err
+ }
+ f, err := os.Create(l.filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ buf := bufio.NewWriterSize(f, 128<<10)
+ c, err := zstd.NewWriter(buf, zstd.WithEncoderLevel(zstd.SpeedFastest), zstd.WithWindowSize(32*1024))
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(c, l.f); err != nil {
+ return err
+ }
+ if err := c.Close(); err != nil {
+ return err
+ }
+ return buf.Flush()
+}
diff --git a/p2p/transport/quicreuse/tracer_test.go b/p2p/transport/quicreuse/tracer_test.go
new file mode 100644
index 0000000000..bea6b91dbf
--- /dev/null
+++ b/p2p/transport/quicreuse/tracer_test.go
@@ -0,0 +1,68 @@
+package quicreuse
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/klauspost/compress/zstd"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/logging"
+ "github.com/stretchr/testify/require"
+)
+
+func getFile(t *testing.T, dir string) os.FileInfo {
+ files, err := os.ReadDir(dir)
+ require.NoError(t, err)
+ require.Len(t, files, 1)
+ info, err := files[0].Info()
+ require.NoError(t, err)
+ return info
+}
+
+func TestSaveQlog(t *testing.T) {
+ qlogDir := t.TempDir()
+ logger := newQlogger(qlogDir, logging.PerspectiveServer, quic.ConnectionIDFromBytes([]byte{0xde, 0xad, 0xbe, 0xef}))
+ file := getFile(t, qlogDir)
+ require.Equal(t, ".", string(file.Name()[0]))
+ require.Truef(t, strings.HasSuffix(file.Name(), ".qlog.swp"), "expected %s to have the .qlog.swp file ending", file.Name())
+ // close the logger. This should move the file.
+ require.NoError(t, logger.Close())
+ file = getFile(t, qlogDir)
+ require.NotEqual(t, ".", string(file.Name()[0]))
+ require.Truef(t, strings.HasSuffix(file.Name(), ".qlog.zst"), "expected %s to have the .qlog.zst file ending", file.Name())
+ require.Contains(t, file.Name(), "server")
+ require.Contains(t, file.Name(), "deadbeef")
+}
+
+func TestQlogBuffering(t *testing.T) {
+ qlogDir := t.TempDir()
+ logger := newQlogger(qlogDir, logging.PerspectiveServer, quic.ConnectionIDFromBytes([]byte("connid")))
+ initialSize := getFile(t, qlogDir).Size()
+ // Do a small write.
+ // Since the writter is buffered, this should not be written to disk yet.
+ logger.Write([]byte("foobar"))
+ require.Equal(t, getFile(t, qlogDir).Size(), initialSize)
+ // Close the logger. This should flush the buffer to disk.
+ require.NoError(t, logger.Close())
+ finalSize := getFile(t, qlogDir).Size()
+ t.Logf("initial log file size: %d, final log file size: %d\n", initialSize, finalSize)
+ require.Greater(t, finalSize, initialSize)
+}
+
+func TestQlogCompression(t *testing.T) {
+ qlogDir := t.TempDir()
+ logger := newQlogger(qlogDir, logging.PerspectiveServer, quic.ConnectionIDFromBytes([]byte("connid")))
+ logger.Write([]byte("foobar"))
+ require.NoError(t, logger.Close())
+ compressed, err := os.ReadFile(qlogDir + "/" + getFile(t, qlogDir).Name())
+ require.NoError(t, err)
+ require.NotEqual(t, "foobar", compressed)
+ c, err := zstd.NewReader(bytes.NewReader(compressed))
+ require.NoError(t, err)
+ data, err := io.ReadAll(c)
+ require.NoError(t, err)
+ require.Equal(t, []byte("foobar"), data)
+}
diff --git a/p2p/transport/tcp/metrics.go b/p2p/transport/tcp/metrics.go
new file mode 100644
index 0000000000..1f8d75f6be
--- /dev/null
+++ b/p2p/transport/tcp/metrics.go
@@ -0,0 +1,297 @@
+//go:build !windows && !riscv64 && !loong64
+
+package tcp
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/marten-seemann/tcp"
+ "github.com/mikioh/tcpinfo"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ newConns *prometheus.CounterVec
+ closedConns *prometheus.CounterVec
+ segsSentDesc *prometheus.Desc
+ segsRcvdDesc *prometheus.Desc
+ bytesSentDesc *prometheus.Desc
+ bytesRcvdDesc *prometheus.Desc
+)
+
+const collectFrequency = 10 * time.Second
+
+var defaultCollector *aggregatingCollector
+
+var initMetricsOnce sync.Once
+
+func initMetrics() {
+ segsSentDesc = prometheus.NewDesc("tcp_sent_segments_total", "TCP segments sent", nil, nil)
+ segsRcvdDesc = prometheus.NewDesc("tcp_rcvd_segments_total", "TCP segments received", nil, nil)
+ bytesSentDesc = prometheus.NewDesc("tcp_sent_bytes", "TCP bytes sent", nil, nil)
+ bytesRcvdDesc = prometheus.NewDesc("tcp_rcvd_bytes", "TCP bytes received", nil, nil)
+
+ defaultCollector = newAggregatingCollector()
+ prometheus.MustRegister(defaultCollector)
+
+ const direction = "direction"
+
+ newConns = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "tcp_connections_new_total",
+ Help: "TCP new connections",
+ },
+ []string{direction},
+ )
+ prometheus.MustRegister(newConns)
+ closedConns = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "tcp_connections_closed_total",
+ Help: "TCP connections closed",
+ },
+ []string{direction},
+ )
+ prometheus.MustRegister(closedConns)
+}
+
+type aggregatingCollector struct {
+ cronOnce sync.Once
+
+ mutex sync.Mutex
+ highestID uint64
+ conns map[uint64] /* id */ *tracingConn
+ rtts prometheus.Histogram
+ connDurations prometheus.Histogram
+ segsSent, segsRcvd uint64
+ bytesSent, bytesRcvd uint64
+}
+
+var _ prometheus.Collector = &aggregatingCollector{}
+
+func newAggregatingCollector() *aggregatingCollector {
+ c := &aggregatingCollector{
+ conns: make(map[uint64]*tracingConn),
+ rtts: prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "tcp_rtt",
+ Help: "TCP round trip time",
+ Buckets: prometheus.ExponentialBuckets(0.001, 1.25, 40), // 1ms to ~6000ms
+ }),
+ connDurations: prometheus.NewHistogram(prometheus.HistogramOpts{
+ Name: "tcp_connection_duration",
+ Help: "TCP Connection Duration",
+ Buckets: prometheus.ExponentialBuckets(1, 1.5, 40), // 1s to ~12 weeks
+ }),
+ }
+ return c
+}
+
+func (c *aggregatingCollector) AddConn(t *tracingConn) uint64 {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+ c.highestID++
+ c.conns[c.highestID] = t
+ return c.highestID
+}
+
+func (c *aggregatingCollector) removeConn(id uint64) {
+ delete(c.conns, id)
+}
+
+func (c *aggregatingCollector) Describe(descs chan<- *prometheus.Desc) {
+ descs <- c.rtts.Desc()
+ descs <- c.connDurations.Desc()
+ if hasSegmentCounter {
+ descs <- segsSentDesc
+ descs <- segsRcvdDesc
+ }
+ if hasByteCounter {
+ descs <- bytesSentDesc
+ descs <- bytesRcvdDesc
+ }
+}
+
+func (c *aggregatingCollector) cron() {
+ ticker := time.NewTicker(collectFrequency)
+ defer ticker.Stop()
+
+ for now := range ticker.C {
+ c.gatherMetrics(now)
+ }
+}
+
+func (c *aggregatingCollector) gatherMetrics(now time.Time) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ c.segsSent = 0
+ c.segsRcvd = 0
+ c.bytesSent = 0
+ c.bytesRcvd = 0
+ for _, conn := range c.conns {
+ info, err := conn.getTCPInfo()
+ if err != nil {
+ if strings.Contains(err.Error(), "use of closed network connection") {
+ continue
+ }
+ log.Error("Failed to get TCP info", "error", err)
+ continue
+ }
+ if hasSegmentCounter {
+ c.segsSent += getSegmentsSent(info)
+ c.segsRcvd += getSegmentsRcvd(info)
+ }
+ if hasByteCounter {
+ c.bytesSent += getBytesSent(info)
+ c.bytesRcvd += getBytesRcvd(info)
+ }
+ c.rtts.Observe(info.RTT.Seconds())
+ c.connDurations.Observe(now.Sub(conn.startTime).Seconds())
+ }
+}
+
+func (c *aggregatingCollector) Collect(metrics chan<- prometheus.Metric) {
+ // Start collecting the metrics collection the first time Collect is called.
+ c.cronOnce.Do(func() {
+ c.gatherMetrics(time.Now())
+ go c.cron()
+ })
+
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ metrics <- c.rtts
+ metrics <- c.connDurations
+ if hasSegmentCounter {
+ segsSentMetric, err := prometheus.NewConstMetric(segsSentDesc, prometheus.CounterValue, float64(c.segsSent))
+ if err != nil {
+ log.Error("creating tcp_sent_segments_total metric failed", "error", err)
+ return
+ }
+ segsRcvdMetric, err := prometheus.NewConstMetric(segsRcvdDesc, prometheus.CounterValue, float64(c.segsRcvd))
+ if err != nil {
+ log.Error("creating tcp_rcvd_segments_total metric failed", "error", err)
+ return
+ }
+ metrics <- segsSentMetric
+ metrics <- segsRcvdMetric
+ }
+ if hasByteCounter {
+ bytesSentMetric, err := prometheus.NewConstMetric(bytesSentDesc, prometheus.CounterValue, float64(c.bytesSent))
+ if err != nil {
+ log.Error("creating tcp_sent_bytes metric failed", "error", err)
+ return
+ }
+ bytesRcvdMetric, err := prometheus.NewConstMetric(bytesRcvdDesc, prometheus.CounterValue, float64(c.bytesRcvd))
+ if err != nil {
+ log.Error("creating tcp_rcvd_bytes metric failed", "error", err)
+ return
+ }
+ metrics <- bytesSentMetric
+ metrics <- bytesRcvdMetric
+ }
+}
+
+func (c *aggregatingCollector) ClosedConn(conn *tracingConn, direction string) {
+ c.mutex.Lock()
+ c.removeConn(conn.id)
+ c.mutex.Unlock()
+ closedConns.WithLabelValues(direction).Inc()
+}
+
+type tracingConn struct {
+ id uint64
+
+ collector *aggregatingCollector
+
+ startTime time.Time
+ isClient bool
+
+ manet.Conn
+ tcpConn *tcp.Conn
+ closeOnce sync.Once
+ closeErr error
+}
+
+// newTracingConn wraps a manet.Conn with a tracingConn. A nil collector will use the default collector.
+func newTracingConn(c manet.Conn, collector *aggregatingCollector, isClient bool) (*tracingConn, error) {
+ initMetricsOnce.Do(func() { initMetrics() })
+ conn, err := tcp.NewConn(c)
+ if err != nil {
+ return nil, err
+ }
+ tc := &tracingConn{
+ startTime: time.Now(),
+ isClient: isClient,
+ Conn: c,
+ tcpConn: conn,
+ collector: collector,
+ }
+ if tc.collector == nil {
+ tc.collector = defaultCollector
+ }
+ tc.id = tc.collector.AddConn(tc)
+ newConns.WithLabelValues(tc.getDirection()).Inc()
+ return tc, nil
+}
+
+func (c *tracingConn) getDirection() string {
+ if c.isClient {
+ return "outgoing"
+ }
+ return "incoming"
+}
+
+func (c *tracingConn) Close() error {
+ c.closeOnce.Do(func() {
+ c.collector.ClosedConn(c, c.getDirection())
+ c.closeErr = c.Conn.Close()
+ })
+ return c.closeErr
+}
+
+func (c *tracingConn) getTCPInfo() (*tcpinfo.Info, error) {
+ var o tcpinfo.Info
+ var b [256]byte
+ i, err := c.tcpConn.Option(o.Level(), o.Name(), b[:])
+ if err != nil {
+ return nil, err
+ }
+ info := i.(*tcpinfo.Info)
+ return info, nil
+}
+
+type tracingListener struct {
+ transport.GatedMaListener
+ collector *aggregatingCollector
+}
+
+// newTracingListener wraps a manet.Listener with a tracingListener. A nil collector will use the default collector.
+func newTracingListener(l transport.GatedMaListener, collector *aggregatingCollector) *tracingListener {
+ return &tracingListener{GatedMaListener: l, collector: collector}
+}
+
+func (l *tracingListener) Accept() (manet.Conn, network.ConnManagementScope, error) {
+ conn, scope, err := l.GatedMaListener.Accept()
+ if err != nil {
+ if scope != nil {
+ scope.Done()
+ log.Error("BUG: got non-nil scope but also an error", "error", err)
+ }
+ return nil, nil, err
+ }
+
+ tc, err := newTracingConn(conn, l.collector, false)
+ if err != nil {
+ log.Error("failed to create tracingConn", "conn_type", fmt.Sprintf("%T", conn), "error", err)
+ conn.Close()
+ scope.Done()
+ return nil, nil, err
+ }
+ return tc, scope, nil
+}
diff --git a/p2p/transport/tcp/metrics_darwin.go b/p2p/transport/tcp/metrics_darwin.go
new file mode 100644
index 0000000000..32cc0288d1
--- /dev/null
+++ b/p2p/transport/tcp/metrics_darwin.go
@@ -0,0 +1,15 @@
+//go:build darwin
+
+package tcp
+
+import "github.com/mikioh/tcpinfo"
+
+const (
+ hasSegmentCounter = true
+ hasByteCounter = true
+)
+
+func getSegmentsSent(info *tcpinfo.Info) uint64 { return info.Sys.SegsSent }
+func getSegmentsRcvd(info *tcpinfo.Info) uint64 { return info.Sys.SegsReceived }
+func getBytesSent(info *tcpinfo.Info) uint64 { return info.Sys.BytesSent }
+func getBytesRcvd(info *tcpinfo.Info) uint64 { return info.Sys.BytesReceived }
diff --git a/p2p/transport/tcp/metrics_general.go b/p2p/transport/tcp/metrics_general.go
new file mode 100644
index 0000000000..07ebd1a589
--- /dev/null
+++ b/p2p/transport/tcp/metrics_general.go
@@ -0,0 +1,15 @@
+//go:build !linux && !darwin && !windows && !riscv64 && !loong64
+
+package tcp
+
+import "github.com/mikioh/tcpinfo"
+
+const (
+ hasSegmentCounter = false
+ hasByteCounter = false
+)
+
+func getSegmentsSent(_ *tcpinfo.Info) uint64 { return 0 }
+func getSegmentsRcvd(_ *tcpinfo.Info) uint64 { return 0 }
+func getBytesSent(_ *tcpinfo.Info) uint64 { return 0 }
+func getBytesRcvd(_ *tcpinfo.Info) uint64 { return 0 }
diff --git a/p2p/transport/tcp/metrics_linux.go b/p2p/transport/tcp/metrics_linux.go
new file mode 100644
index 0000000000..a1a10ee353
--- /dev/null
+++ b/p2p/transport/tcp/metrics_linux.go
@@ -0,0 +1,15 @@
+//go:build linux
+
+package tcp
+
+import "github.com/mikioh/tcpinfo"
+
+const (
+ hasSegmentCounter = true
+ hasByteCounter = false
+)
+
+func getSegmentsSent(info *tcpinfo.Info) uint64 { return uint64(info.Sys.SegsOut) }
+func getSegmentsRcvd(info *tcpinfo.Info) uint64 { return uint64(info.Sys.SegsIn) }
+func getBytesSent(_ *tcpinfo.Info) uint64 { return 0 }
+func getBytesRcvd(_ *tcpinfo.Info) uint64 { return 0 }
diff --git a/p2p/transport/tcp/metrics_none.go b/p2p/transport/tcp/metrics_none.go
new file mode 100644
index 0000000000..2e561fb6cb
--- /dev/null
+++ b/p2p/transport/tcp/metrics_none.go
@@ -0,0 +1,19 @@
+// riscv64 see: https://github.com/marten-seemann/tcp/pull/1
+
+//go:build windows || riscv64 || loong64
+
+package tcp
+
+import (
+ "github.com/libp2p/go-libp2p/core/transport"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type aggregatingCollector struct{}
+
+func newTracingConn(c manet.Conn, collector *aggregatingCollector, isClient bool) (manet.Conn, error) {
+ return c, nil
+}
+func newTracingListener(l transport.GatedMaListener, collector *aggregatingCollector) transport.GatedMaListener {
+ return l
+}
diff --git a/p2p/transport/tcp/metrics_test.go b/p2p/transport/tcp/metrics_test.go
new file mode 100644
index 0000000000..7645abc8df
--- /dev/null
+++ b/p2p/transport/tcp/metrics_test.go
@@ -0,0 +1,54 @@
+package tcp
+
+import (
+ "testing"
+
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse"
+ ttransport "github.com/libp2p/go-libp2p/p2p/transport/testsuite"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestTcpTransportCollectsMetricsWithSharedTcpSocket(t *testing.T) {
+
+ peerA, ia := makeInsecureMuxer(t)
+ _, ib := makeInsecureMuxer(t)
+
+ upg, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ sharedTCPSocketA := tcpreuse.NewConnMgr(false, upg)
+ sharedTCPSocketB := tcpreuse.NewConnMgr(false, upg)
+
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, sharedTCPSocketA, WithMetrics())
+ require.NoError(t, err)
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ tb, err := NewTCPTransport(ub, nil, sharedTCPSocketB, WithMetrics())
+ require.NoError(t, err)
+
+ zero := "/ip4/127.0.0.1/tcp/0"
+
+ // Not running any test that needs more than 1 conn because the testsuite
+ // opens multiple conns via multiple listeners, which is not expected to work
+ // with the shared TCP socket.
+ subtestsToRun := []ttransport.TransportSubTestFn{
+ ttransport.SubtestProtocols,
+ ttransport.SubtestBasic,
+ ttransport.SubtestCancel,
+ ttransport.SubtestPingPong,
+
+ // Stolen from the stream muxer test suite.
+ ttransport.SubtestStress1Conn1Stream1Msg,
+ ttransport.SubtestStress1Conn1Stream100Msg,
+ ttransport.SubtestStress1Conn100Stream100Msg,
+ ttransport.SubtestStress1Conn1000Stream10Msg,
+ ttransport.SubtestStress1Conn100Stream100Msg10MB,
+ ttransport.SubtestStreamOpenStress,
+ ttransport.SubtestStreamReset,
+ }
+
+ ttransport.SubtestTransportWithFs(t, ta, tb, zero, peerA, subtestsToRun)
+}
diff --git a/p2p/transport/tcp/tcp.go b/p2p/transport/tcp/tcp.go
new file mode 100644
index 0000000000..05b867ee37
--- /dev/null
+++ b/p2p/transport/tcp/tcp.go
@@ -0,0 +1,358 @@
+package tcp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "syscall"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/net/reuseport"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ mafmt "github.com/multiformats/go-multiaddr-fmt"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+const defaultConnectTimeout = 5 * time.Second
+
+var log = logging.Logger("tcp-tpt")
+
+const keepAlivePeriod = 30 * time.Second
+
+type canKeepAlive interface {
+ SetKeepAlive(bool) error
+ SetKeepAlivePeriod(time.Duration) error
+}
+
+var _ canKeepAlive = &net.TCPConn{}
+
+// Deprecated: Use tcpreuse.ReuseportIsAvailable
+var ReuseportIsAvailable = tcpreuse.ReuseportIsAvailable
+
+func tryKeepAlive(conn net.Conn, keepAlive bool) {
+ keepAliveConn, ok := conn.(canKeepAlive)
+ if !ok {
+ log.Error("can't set TCP keepalives. net.Conn doesn't support SetKeepAlive", "conn_type", fmt.Sprintf("%T", conn))
+ return
+ }
+ if err := keepAliveConn.SetKeepAlive(keepAlive); err != nil {
+ // Sometimes we seem to get "invalid argument" results from this function on Darwin.
+ // This might be due to a closed connection, but I can't reproduce that on Linux.
+ //
+ // But there's nothing we can do about invalid arguments, so we'll drop this to a
+ // debug.
+ if errors.Is(err, os.ErrInvalid) || errors.Is(err, syscall.EINVAL) {
+ log.Debug("failed to enable TCP keepalive", "error", err)
+ } else {
+ log.Error("failed to enable TCP keepalive", "error", err)
+ }
+ return
+ }
+
+ if runtime.GOOS != "openbsd" {
+ if err := keepAliveConn.SetKeepAlivePeriod(keepAlivePeriod); err != nil {
+ log.Error("failed set keepalive period", "error", err)
+ }
+ }
+}
+
+// try to set linger on the connection, if possible.
+func tryLinger(conn net.Conn, sec int) {
+ type canLinger interface {
+ SetLinger(int) error
+ }
+
+ if lingerConn, ok := conn.(canLinger); ok {
+ _ = lingerConn.SetLinger(sec)
+ }
+}
+
+type tcpGatedMaListener struct {
+ transport.GatedMaListener
+ sec int
+}
+
+func (ll *tcpGatedMaListener) Accept() (manet.Conn, network.ConnManagementScope, error) {
+ c, scope, err := ll.GatedMaListener.Accept()
+ if err != nil {
+ if scope != nil {
+ log.Error("BUG: got non-nil scope but also an error", "error", err)
+ scope.Done()
+ }
+ return nil, nil, err
+ }
+ tryLinger(c, ll.sec)
+ tryKeepAlive(c, true)
+ return c, scope, nil
+}
+
+type Option func(*TcpTransport) error
+
+func DisableReuseport() Option {
+ return func(tr *TcpTransport) error {
+ tr.disableReuseport = true
+ return nil
+ }
+}
+
+func WithConnectionTimeout(d time.Duration) Option {
+ return func(tr *TcpTransport) error {
+ tr.connectTimeout = d
+ return nil
+ }
+}
+
+func WithMetrics() Option {
+ return func(tr *TcpTransport) error {
+ tr.enableMetrics = true
+ return nil
+ }
+}
+
+// WithDialerForAddr sets a custom dialer for the given address.
+// If set, it will be the *ONLY* dialer used.
+func WithDialerForAddr(d DialerForAddr) Option {
+ return func(tr *TcpTransport) error {
+ tr.overrideDialerForAddr = d
+ return nil
+ }
+}
+
+type ContextDialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// DialerForAddr is a function that returns a dialer for a given address.
+// Implementations must return either a ContextDialer or an error. It is
+// invalid to return nil, nil.
+type DialerForAddr func(raddr ma.Multiaddr) (ContextDialer, error)
+
+// TcpTransport is the TCP transport.
+type TcpTransport struct {
+ // Connection upgrader for upgrading insecure stream connections to
+ // secure multiplex connections.
+ upgrader transport.Upgrader
+
+ // optional custom dialer to use for dialing. If set, it will be the *ONLY* dialer
+ // used. The transport will not attempt to reuse the listen port to
+ // dial or the shared TCP transport for dialing.
+ overrideDialerForAddr DialerForAddr
+
+ disableReuseport bool // Explicitly disable reuseport.
+ enableMetrics bool
+
+ // share and demultiplex TCP listeners across multiple transports
+ sharedTcp *tcpreuse.ConnMgr
+
+ // TCP connect timeout
+ connectTimeout time.Duration
+
+ rcmgr network.ResourceManager
+
+ reuse reuseport.Transport
+
+ metricsCollector *aggregatingCollector
+}
+
+var _ transport.Transport = &TcpTransport{}
+var _ transport.DialUpdater = &TcpTransport{}
+
+// NewTCPTransport creates a tcp transport object that tracks dialers and listeners
+// created.
+func NewTCPTransport(upgrader transport.Upgrader, rcmgr network.ResourceManager, sharedTCP *tcpreuse.ConnMgr, opts ...Option) (*TcpTransport, error) {
+ if rcmgr == nil {
+ rcmgr = &network.NullResourceManager{}
+ }
+ tr := &TcpTransport{
+ upgrader: upgrader,
+ connectTimeout: defaultConnectTimeout, // can be set by using the WithConnectionTimeout option
+ rcmgr: rcmgr,
+ sharedTcp: sharedTCP,
+ }
+ for _, o := range opts {
+ if err := o(tr); err != nil {
+ return nil, err
+ }
+ }
+ return tr, nil
+}
+
+var dialMatcher = mafmt.And(mafmt.IP, mafmt.Base(ma.P_TCP))
+
+// CanDial returns true if this transport believes it can dial the given
+// multiaddr.
+func (t *TcpTransport) CanDial(addr ma.Multiaddr) bool {
+ return dialMatcher.Matches(addr)
+}
+
+func (t *TcpTransport) customDial(ctx context.Context, raddr ma.Multiaddr) (manet.Conn, error) {
+ // get the net.Dial friendly arguments from the remote addr
+ rnet, rnaddr, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+ dialer, err := t.overrideDialerForAddr(raddr)
+ if err != nil {
+ return nil, err
+ }
+ if dialer == nil {
+ return nil, fmt.Errorf("dialer for address %s is nil", raddr)
+ }
+
+ // ok, Dial!
+ var nconn net.Conn
+ switch rnet {
+ case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix":
+ nconn, err = dialer.DialContext(ctx, rnet, rnaddr)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized network: %s", rnet)
+ }
+
+ return manet.WrapNetConn(nconn)
+}
+
+func (t *TcpTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (manet.Conn, error) {
+ // Apply the deadline iff applicable
+ if t.connectTimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, t.connectTimeout)
+ defer cancel()
+ }
+
+ if t.overrideDialerForAddr != nil {
+ return t.customDial(ctx, raddr)
+ }
+
+ if t.sharedTcp != nil {
+ return t.sharedTcp.DialContext(ctx, raddr)
+ }
+
+ if t.UseReuseport() {
+ return t.reuse.DialContext(ctx, raddr)
+ }
+ var d manet.Dialer
+ return d.DialContext(ctx, raddr)
+}
+
+// Dial dials the peer at the remote address.
+func (t *TcpTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (transport.CapableConn, error) {
+ return t.DialWithUpdates(ctx, raddr, p, nil)
+}
+
+func (t *TcpTransport) DialWithUpdates(ctx context.Context, raddr ma.Multiaddr, p peer.ID, updateChan chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ connScope, err := t.rcmgr.OpenConnection(network.DirOutbound, true, raddr)
+ if err != nil {
+ log.Debug("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err)
+ return nil, err
+ }
+
+ c, err := t.dialWithScope(ctx, raddr, p, connScope, updateChan)
+ if err != nil {
+ connScope.Done()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (t *TcpTransport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope, updateChan chan<- transport.DialUpdate) (transport.CapableConn, error) {
+ if err := connScope.SetPeer(p); err != nil {
+ log.Debug("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err)
+ return nil, err
+ }
+ conn, err := t.maDial(ctx, raddr)
+ if err != nil {
+ return nil, err
+ }
+ // Set linger to 0 so we never get stuck in the TIME-WAIT state. When
+ // linger is 0, connections are _reset_ instead of closed with a FIN.
+ // This means we can immediately reuse the 5-tuple and reconnect.
+ tryLinger(conn, 0)
+ tryKeepAlive(conn, true)
+ c := conn
+ if t.enableMetrics {
+ var err error
+ c, err = newTracingConn(conn, t.metricsCollector, true)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if updateChan != nil {
+ select {
+ case updateChan <- transport.DialUpdate{Kind: transport.UpdateKindHandshakeProgressed, Addr: raddr}:
+ default:
+ // It is better to skip the update than to delay upgrading the connection
+ }
+ }
+ direction := network.DirOutbound
+ if ok, isClient, _ := network.GetSimultaneousConnect(ctx); ok && !isClient {
+ direction = network.DirInbound
+ }
+ return t.upgrader.Upgrade(ctx, t, c, direction, p, connScope)
+}
+
+// UseReuseport returns true if reuseport is enabled and available.
+func (t *TcpTransport) UseReuseport() bool {
+ return !t.disableReuseport && tcpreuse.ReuseportIsAvailable()
+}
+
+func (t *TcpTransport) unsharedMAListen(laddr ma.Multiaddr) (manet.Listener, error) {
+ if t.UseReuseport() {
+ return t.reuse.Listen(laddr)
+ }
+ return manet.Listen(laddr)
+}
+
+// Listen listens on the given multiaddr.
+func (t *TcpTransport) Listen(laddr ma.Multiaddr) (transport.Listener, error) {
+ var list transport.GatedMaListener
+ var err error
+ if t.sharedTcp != nil {
+ list, err = t.sharedTcp.DemultiplexedListen(laddr, tcpreuse.DemultiplexedConnType_MultistreamSelect)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ mal, err := t.unsharedMAListen(laddr)
+ if err != nil {
+ return nil, err
+ }
+ list = t.upgrader.GateMaListener(mal)
+ }
+
+ // Always wrap the listener with tcpGatedMaListener to apply TCP-specific configurations
+ tcpList := &tcpGatedMaListener{list, 0}
+
+ if t.enableMetrics {
+ // Wrap with tracing listener if metrics are enabled
+ return t.upgrader.UpgradeGatedMaListener(t, newTracingListener(tcpList, t.metricsCollector)), nil
+ }
+
+ // Regular path without metrics
+ return t.upgrader.UpgradeGatedMaListener(t, tcpList), nil
+}
+
+// Protocols returns the list of terminal protocols this transport can dial.
+func (t *TcpTransport) Protocols() []int {
+ return []int{ma.P_TCP}
+}
+
+// Proxy always returns false for the TCP transport.
+func (t *TcpTransport) Proxy() bool {
+ return false
+}
+
+func (t *TcpTransport) String() string {
+ return "TCP"
+}
diff --git a/p2p/transport/tcp/tcp_test.go b/p2p/transport/tcp/tcp_test.go
new file mode 100644
index 0000000000..8fff3ce46f
--- /dev/null
+++ b/p2p/transport/tcp/tcp_test.go
@@ -0,0 +1,280 @@
+package tcp
+
+import (
+ "context"
+ "errors"
+ "net"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse"
+ ttransport "github.com/libp2p/go-libp2p/p2p/transport/testsuite"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+var muxers = []tptu.StreamMuxer{{ID: "/yamux", Muxer: yamux.DefaultTransport}}
+
+func TestTcpTransport(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ peerA, ia := makeInsecureMuxer(t)
+ _, ib := makeInsecureMuxer(t)
+
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, nil)
+ require.NoError(t, err)
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ tb, err := NewTCPTransport(ub, nil, nil)
+ require.NoError(t, err)
+
+ zero := "/ip4/127.0.0.1/tcp/0"
+ ttransport.SubtestTransport(t, ta, tb, zero, peerA)
+
+ tcpreuse.EnvReuseportVal = false
+ }
+ tcpreuse.EnvReuseportVal = true
+}
+
+func TestTcpTransportWithMetrics(t *testing.T) {
+ peerA, ia := makeInsecureMuxer(t)
+ _, ib := makeInsecureMuxer(t)
+
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, nil, WithMetrics())
+ require.NoError(t, err)
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ tb, err := NewTCPTransport(ub, nil, nil, WithMetrics())
+ require.NoError(t, err)
+
+ zero := "/ip4/127.0.0.1/tcp/0"
+ ttransport.SubtestTransport(t, ta, tb, zero, peerA)
+}
+
+func TestResourceManager(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+
+ peerA, ia := makeInsecureMuxer(t)
+ _, ib := makeInsecureMuxer(t)
+
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, nil)
+ require.NoError(t, err)
+ ln, err := ta.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ tb, err := NewTCPTransport(ub, rcmgr, nil)
+ require.NoError(t, err)
+
+ t.Run("success", func(t *testing.T) {
+ scope := mocknetwork.NewMockConnManagementScope(ctrl)
+ rcmgr.EXPECT().OpenConnection(network.DirOutbound, true, ln.Multiaddr()).Return(scope, nil)
+ scope.EXPECT().SetPeer(peerA)
+ scope.EXPECT().PeerScope().Return(&network.NullScope{}).AnyTimes() // called by the upgrader
+ conn, err := tb.Dial(context.Background(), ln.Multiaddr(), peerA)
+ require.NoError(t, err)
+ scope.EXPECT().Done()
+ defer conn.Close()
+ })
+
+ t.Run("connection denied", func(t *testing.T) {
+ rerr := errors.New("nope")
+ rcmgr.EXPECT().OpenConnection(network.DirOutbound, true, ln.Multiaddr()).Return(nil, rerr)
+ _, err = tb.Dial(context.Background(), ln.Multiaddr(), peerA)
+ require.ErrorIs(t, err, rerr)
+ })
+
+ t.Run("peer denied", func(t *testing.T) {
+ scope := mocknetwork.NewMockConnManagementScope(ctrl)
+ rcmgr.EXPECT().OpenConnection(network.DirOutbound, true, ln.Multiaddr()).Return(scope, nil)
+ rerr := errors.New("nope")
+ scope.EXPECT().SetPeer(peerA).Return(rerr)
+ scope.EXPECT().Done()
+ _, err = tb.Dial(context.Background(), ln.Multiaddr(), peerA)
+ require.ErrorIs(t, err, rerr)
+ })
+}
+
+func TestTcpTransportCantDialDNS(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ dnsa, err := ma.NewMultiaddr("/dns4/example.com/tcp/1234")
+ require.NoError(t, err)
+
+ var u transport.Upgrader
+ tpt, err := NewTCPTransport(u, nil, nil)
+ require.NoError(t, err)
+
+ if tpt.CanDial(dnsa) {
+ t.Fatal("shouldn't be able to dial dns")
+ }
+
+ tcpreuse.EnvReuseportVal = false
+ }
+ tcpreuse.EnvReuseportVal = true
+}
+
+func TestTcpTransportCantListenUtp(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ utpa, err := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/utp")
+ require.NoError(t, err)
+
+ var u transport.Upgrader
+ tpt, err := NewTCPTransport(u, nil, nil)
+ require.NoError(t, err)
+
+ _, err = tpt.Listen(utpa)
+ require.Error(t, err, "shouldn't be able to listen on utp addr with tcp transport")
+
+ tcpreuse.EnvReuseportVal = false
+ }
+ tcpreuse.EnvReuseportVal = true
+}
+
+func TestDialWithUpdates(t *testing.T) {
+ peerA, ia := makeInsecureMuxer(t)
+ _, ib := makeInsecureMuxer(t)
+
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, nil)
+ require.NoError(t, err)
+ ln, err := ta.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ tb, err := NewTCPTransport(ub, nil, nil)
+ require.NoError(t, err)
+
+ updCh := make(chan transport.DialUpdate, 1)
+ conn, err := tb.DialWithUpdates(context.Background(), ln.Multiaddr(), peerA, updCh)
+ upd := <-updCh
+ require.Equal(t, transport.UpdateKindHandshakeProgressed, upd.Kind)
+ require.NotNil(t, conn)
+ require.NoError(t, err)
+
+ acceptAndClose := func() manet.Listener {
+ li, err := manet.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ go func() {
+ conn, err := li.Accept()
+ if err != nil {
+ return
+ }
+ conn.Close()
+ }()
+ return li
+ }
+ li := acceptAndClose()
+ defer li.Close()
+ // This dial will fail as acceptAndClose will not upgrade the connection
+ conn, err = tb.DialWithUpdates(context.Background(), li.Multiaddr(), peerA, updCh)
+ upd = <-updCh
+ require.Equal(t, transport.UpdateKindHandshakeProgressed, upd.Kind)
+ require.Nil(t, conn)
+ require.Error(t, err)
+}
+
+func makeInsecureMuxer(t *testing.T) (peer.ID, []sec.SecureTransport) {
+ t.Helper()
+ priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 256)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ return id, []sec.SecureTransport{insecure.NewWithIdentity(insecure.ID, id, priv)}
+}
+
+type errDialer struct {
+ err error
+}
+
+func (d errDialer) DialContext(_ context.Context, _, _ string) (net.Conn, error) {
+ return nil, d.err
+}
+
+func TestCustomOverrideTCPDialer(t *testing.T) {
+ t.Run("success", func(t *testing.T) {
+ peerA, ia := makeInsecureMuxer(t)
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, nil)
+ require.NoError(t, err)
+ ln, err := ta.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ _, ib := makeInsecureMuxer(t)
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ called := false
+ customDialer := func(_ ma.Multiaddr) (ContextDialer, error) {
+ called = true
+ return &net.Dialer{}, nil
+ }
+ tb, err := NewTCPTransport(ub, nil, nil, WithDialerForAddr(customDialer))
+ require.NoError(t, err)
+
+ conn, err := tb.Dial(context.Background(), ln.Multiaddr(), peerA)
+ require.NoError(t, err)
+ require.NotNil(t, conn)
+ require.True(t, called, "custom dialer should have been called")
+ conn.Close()
+ })
+
+ t.Run("errors", func(t *testing.T) {
+ peerA, ia := makeInsecureMuxer(t)
+ ua, err := tptu.New(ia, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ ta, err := NewTCPTransport(ua, nil, nil)
+ require.NoError(t, err)
+ ln, err := ta.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ for _, test := range []string{"error in factory", "error in custom dialer"} {
+ t.Run(test, func(t *testing.T) {
+ _, ib := makeInsecureMuxer(t)
+ ub, err := tptu.New(ib, muxers, nil, nil, nil)
+ require.NoError(t, err)
+ customErr := errors.New("custom dialer error")
+ customDialer := func(_ ma.Multiaddr) (ContextDialer, error) {
+ if test == "error in factory" {
+ return nil, customErr
+ } else {
+ return errDialer{err: customErr}, nil
+ }
+ }
+ tb, err := NewTCPTransport(ub, nil, nil, WithDialerForAddr(customDialer))
+ require.NoError(t, err)
+
+ conn, err := tb.Dial(context.Background(), ln.Multiaddr(), peerA)
+ require.Error(t, err)
+ require.ErrorContains(t, err, customErr.Error())
+ require.Nil(t, conn)
+ })
+ }
+ })
+}
diff --git a/p2p/transport/tcpreuse/connwithscope.go b/p2p/transport/tcpreuse/connwithscope.go
new file mode 100644
index 0000000000..23354b81cd
--- /dev/null
+++ b/p2p/transport/tcpreuse/connwithscope.go
@@ -0,0 +1,27 @@
+package tcpreuse
+
+import (
+ "fmt"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse/internal/sampledconn"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+type connWithScope struct {
+ sampledconn.ManetTCPConnInterface
+ ConnScope network.ConnManagementScope
+}
+
+func (c *connWithScope) Close() error {
+ defer c.ConnScope.Done()
+ return c.ManetTCPConnInterface.Close()
+}
+
+func manetConnWithScope(c manet.Conn, scope network.ConnManagementScope) (*connWithScope, error) {
+ if tcpconn, ok := c.(sampledconn.ManetTCPConnInterface); ok {
+ return &connWithScope{tcpconn, scope}, nil
+ }
+
+ return nil, fmt.Errorf("manet.Conn is not a TCP Conn")
+}
diff --git a/p2p/transport/tcpreuse/demultiplex.go b/p2p/transport/tcpreuse/demultiplex.go
new file mode 100644
index 0000000000..f9175ecfdb
--- /dev/null
+++ b/p2p/transport/tcpreuse/demultiplex.go
@@ -0,0 +1,100 @@
+package tcpreuse
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse/internal/sampledconn"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// This is reading the first 3 bytes of the first packet after the handshake.
+// It's set to the default TCP connect timeout in the TCP Transport.
+//
+// A var so we can change it in tests.
+var identifyConnTimeout = 5 * time.Second
+
+type DemultiplexedConnType int
+
+const (
+ DemultiplexedConnType_Unknown DemultiplexedConnType = iota
+ DemultiplexedConnType_MultistreamSelect
+ DemultiplexedConnType_HTTP
+ DemultiplexedConnType_TLS
+)
+
+func (t DemultiplexedConnType) String() string {
+ switch t {
+ case DemultiplexedConnType_MultistreamSelect:
+ return "MultistreamSelect"
+ case DemultiplexedConnType_HTTP:
+ return "HTTP"
+ case DemultiplexedConnType_TLS:
+ return "TLS"
+ default:
+ return fmt.Sprintf("Unknown(%d)", int(t))
+ }
+}
+
+func (t DemultiplexedConnType) IsKnown() bool {
+ return t >= 1 || t <= 3
+}
+
+// identifyConnType attempts to identify the connection type by peeking at the
+// first few bytes.
+// Its Callers must not use the passed in Conn after this function returns.
+// If an error is returned, the connection will be closed.
+func identifyConnType(c manet.Conn) (DemultiplexedConnType, manet.Conn, error) {
+ if err := c.SetReadDeadline(time.Now().Add(identifyConnTimeout)); err != nil {
+ closeErr := c.Close()
+ return 0, nil, errors.Join(err, closeErr)
+ }
+
+ s, peekedConn, err := sampledconn.PeekBytes(c)
+ if err != nil {
+ closeErr := c.Close()
+ return 0, nil, errors.Join(err, closeErr)
+ }
+
+ if err := peekedConn.SetReadDeadline(time.Time{}); err != nil {
+ closeErr := peekedConn.Close()
+ return 0, nil, errors.Join(err, closeErr)
+ }
+
+ if IsMultistreamSelect(s) {
+ return DemultiplexedConnType_MultistreamSelect, peekedConn, nil
+ }
+ if IsTLS(s) {
+ return DemultiplexedConnType_TLS, peekedConn, nil
+ }
+ if IsHTTP(s) {
+ return DemultiplexedConnType_HTTP, peekedConn, nil
+ }
+ return DemultiplexedConnType_Unknown, peekedConn, nil
+}
+
+// Matchers are implemented here instead of in the transports so we can easily fuzz them together.
+type Prefix = [3]byte
+
+func IsMultistreamSelect(s Prefix) bool {
+ return string(s[:]) == "\x13/m"
+}
+
+func IsHTTP(s Prefix) bool {
+ switch string(s[:]) {
+ case "GET", "HEA", "POS", "PUT", "DEL", "CON", "OPT", "TRA", "PAT":
+ return true
+ default:
+ return false
+ }
+}
+
+func IsTLS(s Prefix) bool {
+ switch string(s[:]) {
+ case "\x16\x03\x01", "\x16\x03\x02", "\x16\x03\x03":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/p2p/transport/tcpreuse/demultiplex_test.go b/p2p/transport/tcpreuse/demultiplex_test.go
new file mode 100644
index 0000000000..e201f2ca75
--- /dev/null
+++ b/p2p/transport/tcpreuse/demultiplex_test.go
@@ -0,0 +1,50 @@
+package tcpreuse
+
+import "testing"
+
+func FuzzClash(f *testing.F) {
+ // make untyped literals type correctly
+ add := func(a, b, c byte) { f.Add(a, b, c) }
+
+ // multistream-select
+ add('\x13', '/', 'm')
+ // http
+ add('G', 'E', 'T')
+ add('H', 'E', 'A')
+ add('P', 'O', 'S')
+ add('P', 'U', 'T')
+ add('D', 'E', 'L')
+ add('C', 'O', 'N')
+ add('O', 'P', 'T')
+ add('T', 'R', 'A')
+ add('P', 'A', 'T')
+ // tls
+ add('\x16', '\x03', '\x01')
+ add('\x16', '\x03', '\x02')
+ add('\x16', '\x03', '\x03')
+ add('\x16', '\x03', '\x04')
+
+ f.Fuzz(func(t *testing.T, a, b, c byte) {
+ s := Prefix{a, b, c}
+ var total uint
+
+ ms := IsMultistreamSelect(s)
+ if ms {
+ total++
+ }
+
+ http := IsHTTP(s)
+ if http {
+ total++
+ }
+
+ tls := IsTLS(s)
+ if tls {
+ total++
+ }
+
+ if total > 1 {
+ t.Errorf("clash on: %q; ms: %v; http: %v; tls: %v", s, ms, http, tls)
+ }
+ })
+}
diff --git a/p2p/transport/tcpreuse/dialer.go b/p2p/transport/tcpreuse/dialer.go
new file mode 100644
index 0000000000..d6ea1fc6a0
--- /dev/null
+++ b/p2p/transport/tcpreuse/dialer.go
@@ -0,0 +1,17 @@
+package tcpreuse
+
+import (
+ "context"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// DialContext is like Dial but takes a context.
+func (t *ConnMgr) DialContext(ctx context.Context, raddr ma.Multiaddr) (manet.Conn, error) {
+ if t.useReuseport() {
+ return t.reuse.DialContext(ctx, raddr)
+ }
+ var d manet.Dialer
+ return d.DialContext(ctx, raddr)
+}
diff --git a/p2p/transport/tcpreuse/internal/sampledconn/sampledconn.go b/p2p/transport/tcpreuse/internal/sampledconn/sampledconn.go
new file mode 100644
index 0000000000..ff1f8caf44
--- /dev/null
+++ b/p2p/transport/tcpreuse/internal/sampledconn/sampledconn.go
@@ -0,0 +1,79 @@
+package sampledconn
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+ "time"
+
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+const peekSize = 3
+
+type PeekedBytes = [peekSize]byte
+
+var ErrNotTCPConn = errors.New("passed conn is not a TCPConn")
+
+func PeekBytes(conn manet.Conn) (PeekedBytes, manet.Conn, error) {
+ if c, ok := conn.(ManetTCPConnInterface); ok {
+ return newWrappedSampledConn(c)
+ }
+
+ return PeekedBytes{}, nil, ErrNotTCPConn
+}
+
+type wrappedSampledConn struct {
+ ManetTCPConnInterface
+ peekedBytes PeekedBytes
+ bytesPeeked uint8
+}
+
+// tcpConnInterface is the interface for TCPConn's functions
+// NOTE: `SyscallConn() (syscall.RawConn, error)` is here to make using this as
+// a TCP Conn easier, but it's a potential footgun as you could skipped the
+// peeked bytes if using the fallback
+type tcpConnInterface interface {
+ net.Conn
+ syscall.Conn
+
+ CloseRead() error
+ CloseWrite() error
+
+ SetLinger(sec int) error
+ SetKeepAlive(keepalive bool) error
+ SetKeepAlivePeriod(d time.Duration) error
+ SetNoDelay(noDelay bool) error
+ MultipathTCP() (bool, error)
+
+ io.ReaderFrom
+ io.WriterTo
+}
+
+type ManetTCPConnInterface interface {
+ manet.Conn
+ tcpConnInterface
+}
+
+func newWrappedSampledConn(conn ManetTCPConnInterface) (PeekedBytes, *wrappedSampledConn, error) {
+ s := &wrappedSampledConn{ManetTCPConnInterface: conn}
+ n, err := io.ReadFull(conn, s.peekedBytes[:])
+ if err != nil {
+ if n == 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return s.peekedBytes, nil, err
+ }
+ return s.peekedBytes, s, nil
+}
+
+func (sc *wrappedSampledConn) Read(b []byte) (int, error) {
+ if int(sc.bytesPeeked) != len(sc.peekedBytes) {
+ red := copy(b, sc.peekedBytes[sc.bytesPeeked:])
+ sc.bytesPeeked += uint8(red)
+ return red, nil
+ }
+
+ return sc.ManetTCPConnInterface.Read(b)
+}
diff --git a/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go b/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go
new file mode 100644
index 0000000000..6c4e989b16
--- /dev/null
+++ b/p2p/transport/tcpreuse/internal/sampledconn/sampledconn_test.go
@@ -0,0 +1,178 @@
+package sampledconn
+
+import (
+ "io"
+ "syscall"
+ "testing"
+ "time"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSampledConn(t *testing.T) {
+ testCases := []string{
+ "platform",
+ "fallback",
+ }
+
+ // Start a TCP server
+ listener, err := manet.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ assert.NoError(t, err)
+ defer listener.Close()
+
+ serverAddr := listener.Multiaddr()
+
+ // Server goroutine
+ go func() {
+ for i := 0; i < len(testCases); i++ {
+ conn, err := listener.Accept()
+ assert.NoError(t, err)
+ defer conn.Close()
+
+ // Write some data to the connection
+ _, err = conn.Write([]byte("hello"))
+ assert.NoError(t, err)
+ }
+ }()
+
+ // Give the server a moment to start
+ time.Sleep(100 * time.Millisecond)
+
+ for _, tc := range testCases {
+ t.Run(tc, func(t *testing.T) {
+ // Create a TCP client
+ clientConn, err := manet.Dial(serverAddr)
+ assert.NoError(t, err)
+ defer clientConn.Close()
+
+ if tc == "platform" {
+ // Wrap the client connection in SampledConn
+ peeked, clientConn, err := PeekBytes(clientConn.(interface {
+ manet.Conn
+ syscall.Conn
+ }))
+ assert.NoError(t, err)
+ assert.Equal(t, "hel", string(peeked[:]))
+
+ buf := make([]byte, 5)
+ _, err = io.ReadFull(clientConn, buf)
+ assert.NoError(t, err)
+ assert.Equal(t, "hello", string(buf))
+ } else {
+ // Wrap the client connection in SampledConn
+ sample, sampledConn, err := newWrappedSampledConn(clientConn.(ManetTCPConnInterface))
+ assert.NoError(t, err)
+ assert.Equal(t, "hel", string(sample[:]))
+
+ buf := make([]byte, 5)
+ _, err = io.ReadFull(sampledConn, buf)
+ assert.NoError(t, err)
+ assert.Equal(t, "hello", string(buf))
+
+ }
+ })
+ }
+}
+
+func spawnServerAndClientConn(t *testing.T) (serverConn manet.Conn, clientConn manet.Conn) {
+ serverConnChan := make(chan manet.Conn, 1)
+
+ listener, err := manet.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0"))
+ assert.NoError(t, err)
+ defer listener.Close()
+
+ serverAddr := listener.Multiaddr()
+
+ // Server goroutine
+ go func() {
+ conn, err := listener.Accept()
+ assert.NoError(t, err)
+ serverConnChan <- conn
+ }()
+
+ // Give the server a moment to start
+ time.Sleep(100 * time.Millisecond)
+
+ // Create a TCP client
+ clientConn, err = manet.Dial(serverAddr)
+ assert.NoError(t, err)
+
+ return <-serverConnChan, clientConn
+}
+
+func TestHandleNoBytes(t *testing.T) {
+ serverConn, clientConn := spawnServerAndClientConn(t)
+ defer clientConn.Close()
+
+ // Server goroutine
+ go func() {
+ serverConn.Close()
+ }()
+ _, _, err := PeekBytes(clientConn.(interface {
+ manet.Conn
+ syscall.Conn
+ }))
+ assert.ErrorIs(t, err, io.ErrUnexpectedEOF)
+}
+
+func TestHandle1ByteAndClose(t *testing.T) {
+ serverConn, clientConn := spawnServerAndClientConn(t)
+ defer clientConn.Close()
+
+ // Server goroutine
+ go func() {
+ defer serverConn.Close()
+ _, err := serverConn.Write([]byte("h"))
+ assert.NoError(t, err)
+ }()
+ _, _, err := PeekBytes(clientConn.(interface {
+ manet.Conn
+ syscall.Conn
+ }))
+ assert.ErrorIs(t, err, io.ErrUnexpectedEOF)
+}
+
+func TestSlowBytes(t *testing.T) {
+ serverConn, clientConn := spawnServerAndClientConn(t)
+
+ interval := 100 * time.Millisecond
+
+ // Server goroutine
+ go func() {
+ defer serverConn.Close()
+
+ time.Sleep(interval)
+ _, err := serverConn.Write([]byte("h"))
+ assert.NoError(t, err)
+ time.Sleep(interval)
+ _, err = serverConn.Write([]byte("e"))
+ assert.NoError(t, err)
+ time.Sleep(interval)
+ _, err = serverConn.Write([]byte("l"))
+ assert.NoError(t, err)
+ time.Sleep(interval)
+ _, err = serverConn.Write([]byte("lo"))
+ assert.NoError(t, err)
+ }()
+
+ defer clientConn.Close()
+
+ err := clientConn.SetReadDeadline(time.Now().Add(interval * 10))
+ require.NoError(t, err)
+
+ peeked, clientConn, err := PeekBytes(clientConn.(interface {
+ manet.Conn
+ syscall.Conn
+ }))
+ assert.NoError(t, err)
+ assert.Equal(t, "hel", string(peeked[:]))
+
+ buf := make([]byte, 5)
+ _, err = io.ReadFull(clientConn, buf)
+ assert.NoError(t, err)
+ assert.Equal(t, "hello", string(buf))
+}
diff --git a/p2p/transport/tcpreuse/listener.go b/p2p/transport/tcpreuse/listener.go
new file mode 100644
index 0000000000..eacd9aeacf
--- /dev/null
+++ b/p2p/transport/tcpreuse/listener.go
@@ -0,0 +1,309 @@
+package tcpreuse
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/libp2p/go-libp2p/p2p/net/reuseport"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+const acceptQueueSize = 64 // It is fine to read 3 bytes from 64 connections in parallel.
+
+// How long we wait for a connection to be accepted before dropping it.
+const acceptTimeout = 30 * time.Second
+
+var log = logging.Logger("tcp-demultiplex")
+
+// ConnMgr enables you to share the same listen address between TCP and WebSocket transports.
+type ConnMgr struct {
+ enableReuseport bool
+ reuse reuseport.Transport
+ upgrader transport.Upgrader
+
+ mx sync.Mutex
+ listeners map[string]*multiplexedListener
+}
+
+func NewConnMgr(enableReuseport bool, upgrader transport.Upgrader) *ConnMgr {
+ return &ConnMgr{
+ enableReuseport: enableReuseport,
+ reuse: reuseport.Transport{},
+ upgrader: upgrader,
+ listeners: make(map[string]*multiplexedListener),
+ }
+}
+
+func (t *ConnMgr) gatedMaListen(listenAddr ma.Multiaddr) (transport.GatedMaListener, error) {
+ var mal manet.Listener
+ var err error
+ if t.useReuseport() {
+ mal, err = t.reuse.Listen(listenAddr)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ mal, err = manet.Listen(listenAddr)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return t.upgrader.GateMaListener(mal), nil
+}
+
+func (t *ConnMgr) useReuseport() bool {
+ return t.enableReuseport && ReuseportIsAvailable()
+}
+
+func getTCPAddr(listenAddr ma.Multiaddr) (ma.Multiaddr, error) {
+ haveTCP := false
+ addr, _ := ma.SplitFunc(listenAddr, func(c ma.Component) bool {
+ if haveTCP {
+ return true
+ }
+ if c.Protocol().Code == ma.P_TCP {
+ haveTCP = true
+ }
+ return false
+ })
+ if !haveTCP {
+ return nil, fmt.Errorf("invalid listen addr %s, need tcp address", listenAddr)
+ }
+ return addr, nil
+}
+
+// DemultiplexedListen returns a listener for laddr listening for `connType` connections. The connections
+// accepted from returned listeners need to be upgraded with a `transport.Upgrader`.
+// NOTE: All listeners for port 0 share the same underlying socket, so they have the same specific port.
+func (t *ConnMgr) DemultiplexedListen(laddr ma.Multiaddr, connType DemultiplexedConnType) (transport.GatedMaListener, error) {
+ if !connType.IsKnown() {
+ return nil, fmt.Errorf("unknown connection type: %s", connType)
+ }
+ laddr, err := getTCPAddr(laddr)
+ if err != nil {
+ return nil, err
+ }
+
+ t.mx.Lock()
+ defer t.mx.Unlock()
+ ml, ok := t.listeners[laddr.String()]
+ if ok {
+ dl, err := ml.DemultiplexedListen(connType)
+ if err != nil {
+ return nil, err
+ }
+ return dl, nil
+ }
+
+ gmal, err := t.gatedMaListen(laddr)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancelFunc := func() error {
+ cancel()
+ t.mx.Lock()
+ defer t.mx.Unlock()
+ delete(t.listeners, laddr.String())
+ delete(t.listeners, gmal.Multiaddr().String())
+ return gmal.Close()
+ }
+ ml = &multiplexedListener{
+ GatedMaListener: gmal,
+ listeners: make(map[DemultiplexedConnType]*demultiplexedListener),
+ ctx: ctx,
+ closeFn: cancelFunc,
+ }
+ t.listeners[laddr.String()] = ml
+ t.listeners[gmal.Multiaddr().String()] = ml
+
+ dl, err := ml.DemultiplexedListen(connType)
+ if err != nil {
+ cerr := ml.Close()
+ return nil, errors.Join(err, cerr)
+ }
+
+ ml.wg.Add(1)
+ go ml.run()
+
+ return dl, nil
+}
+
+var _ transport.GatedMaListener = &demultiplexedListener{}
+
+type multiplexedListener struct {
+ transport.GatedMaListener
+ listeners map[DemultiplexedConnType]*demultiplexedListener
+ mx sync.RWMutex
+
+ ctx context.Context
+ closeFn func() error
+ wg sync.WaitGroup
+}
+
+var ErrListenerExists = errors.New("listener already exists for this conn type on this address")
+
+func (m *multiplexedListener) DemultiplexedListen(connType DemultiplexedConnType) (transport.GatedMaListener, error) {
+ if !connType.IsKnown() {
+ return nil, fmt.Errorf("unknown connection type: %s", connType)
+ }
+
+ m.mx.Lock()
+ defer m.mx.Unlock()
+ if _, ok := m.listeners[connType]; ok {
+ return nil, ErrListenerExists
+ }
+
+ ctx, cancel := context.WithCancel(m.ctx)
+ l := &demultiplexedListener{
+ buffer: make(chan *connWithScope),
+ inner: m.GatedMaListener,
+ ctx: ctx,
+ cancelFunc: cancel,
+ closeFn: func() error { m.removeDemultiplexedListener(connType); return nil },
+ }
+
+ m.listeners[connType] = l
+
+ return l, nil
+}
+
+func (m *multiplexedListener) run() error {
+ defer m.Close()
+ defer m.wg.Done()
+ acceptQueue := make(chan struct{}, acceptQueueSize)
+ for {
+ c, connScope, err := m.GatedMaListener.Accept()
+ if err != nil {
+ return err
+ }
+ ctx, cancelCtx := context.WithTimeout(m.ctx, acceptTimeout)
+ select {
+ case acceptQueue <- struct{}{}:
+ case <-ctx.Done():
+ cancelCtx()
+ connScope.Done()
+ c.Close()
+ log.Debug("accept queue full, dropping connection", "remote_addr", c.RemoteMultiaddr())
+ continue
+ case <-m.ctx.Done():
+ cancelCtx()
+ connScope.Done()
+ c.Close()
+ log.Debug("listener closed; dropping connection", "remote_addr", c.RemoteMultiaddr())
+ continue
+ }
+
+ m.wg.Add(1)
+ go func() {
+ defer func() { <-acceptQueue }()
+ defer m.wg.Done()
+ defer cancelCtx()
+ t, c, err := identifyConnType(c)
+ if err != nil {
+ // conn closed by identifyConnType
+ connScope.Done()
+ log.Debug("error demultiplexing connection", "error", err)
+ return
+ }
+
+ connWithScope, err := manetConnWithScope(c, connScope)
+ if err != nil {
+ connScope.Done()
+ closeErr := c.Close()
+ err = errors.Join(err, closeErr)
+ log.Debug("error wrapping connection with scope", "error", err)
+ return
+ }
+
+ m.mx.RLock()
+ demux, ok := m.listeners[t]
+ m.mx.RUnlock()
+ if !ok {
+ closeErr := connWithScope.Close()
+ if closeErr != nil {
+ log.Debug("no registered listener for demultiplex connection. Error closing the connection", "type", t, "close_error", closeErr)
+ } else {
+ log.Debug("no registered listener for demultiplex connection", "type", t)
+ }
+ return
+ }
+
+ select {
+ case demux.buffer <- connWithScope:
+ case <-ctx.Done():
+ log.Debug("accept timeout; dropping connection", "remote", connWithScope.RemoteMultiaddr())
+ connWithScope.Close()
+ }
+ }()
+ }
+}
+
+func (m *multiplexedListener) Close() error {
+ m.mx.Lock()
+ for _, l := range m.listeners {
+ l.cancelFunc()
+ }
+ err := m.closeListener()
+ m.mx.Unlock()
+ m.wg.Wait()
+ return err
+}
+
+func (m *multiplexedListener) closeListener() error {
+ lerr := m.GatedMaListener.Close()
+ cerr := m.closeFn()
+ return errors.Join(lerr, cerr)
+}
+
+func (m *multiplexedListener) removeDemultiplexedListener(c DemultiplexedConnType) {
+ m.mx.Lock()
+ defer m.mx.Unlock()
+
+ delete(m.listeners, c)
+ if len(m.listeners) == 0 {
+ m.closeListener()
+ m.mx.Unlock()
+ m.wg.Wait()
+ m.mx.Lock()
+ }
+}
+
+type demultiplexedListener struct {
+ buffer chan *connWithScope
+ inner transport.GatedMaListener
+ ctx context.Context
+ cancelFunc context.CancelFunc
+ closeFn func() error
+}
+
+func (m *demultiplexedListener) Accept() (manet.Conn, network.ConnManagementScope, error) {
+ select {
+ case c := <-m.buffer:
+ return c.ManetTCPConnInterface, c.ConnScope, nil
+ case <-m.ctx.Done():
+ return nil, nil, transport.ErrListenerClosed
+ }
+}
+
+func (m *demultiplexedListener) Close() error {
+ m.cancelFunc()
+ return m.closeFn()
+}
+
+func (m *demultiplexedListener) Multiaddr() ma.Multiaddr {
+ return m.inner.Multiaddr()
+}
+
+func (m *demultiplexedListener) Addr() net.Addr {
+ return m.inner.Addr()
+}
diff --git a/p2p/transport/tcpreuse/listener_test.go b/p2p/transport/tcpreuse/listener_test.go
new file mode 100644
index 0000000000..0f91d4992d
--- /dev/null
+++ b/p2p/transport/tcpreuse/listener_test.go
@@ -0,0 +1,497 @@
+package tcpreuse
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "fmt"
+ "math/big"
+ "net"
+ "net/http"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/gorilla/websocket"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multistream"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func selfSignedTLSConfig(t *testing.T) *tls.Config {
+ t.Helper()
+ priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+
+ certTemplate := x509.Certificate{
+ SerialNumber: &big.Int{},
+ Subject: pkix.Name{
+ Organization: []string{"Test"},
+ },
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &certTemplate, &certTemplate, &priv.PublicKey, priv)
+ require.NoError(t, err)
+
+ cert := tls.Certificate{
+ Certificate: [][]byte{derBytes},
+ PrivateKey: priv,
+ }
+
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+ return tlsConfig
+}
+
+type maListener struct {
+ transport.GatedMaListener
+}
+
+var _ manet.Listener = &maListener{}
+
+func (ml *maListener) Accept() (manet.Conn, error) {
+ c, _, err := ml.GatedMaListener.Accept()
+ return c, err
+}
+
+type wsHandler struct{ conns chan *websocket.Conn }
+
+func (wh wsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ u := websocket.Upgrader{}
+ c, _ := u.Upgrade(w, r, http.Header{})
+ wh.conns <- c
+}
+
+func upgrader(t *testing.T) transport.Upgrader {
+ t.Helper()
+ upd, err := tptu.New(nil, nil, nil, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ return upd
+}
+
+func TestListenerSingle(t *testing.T) {
+ listenAddr := ma.StringCast("/ip4/0.0.0.0/tcp/0")
+ const N = 64
+ for _, enableReuseport := range []bool{true, false} {
+ t.Run(fmt.Sprintf("multistream-reuseport:%v", enableReuseport), func(t *testing.T) {
+ cm := NewConnMgr(enableReuseport, upgrader(t))
+ l, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_MultistreamSelect)
+ require.NoError(t, err)
+ go func() {
+ d := net.Dialer{}
+ for i := 0; i < N; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ conn, err := d.DialContext(ctx, l.Addr().Network(), l.Addr().String())
+ if err != nil {
+ t.Error("failed to dial", err, i)
+ return
+ }
+ lconn := multistream.NewMSSelect(conn, "a")
+ buf := make([]byte, 10)
+ _, err = lconn.Write([]byte("hello-multistream"))
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = lconn.Read(buf)
+ if err == nil {
+ t.Error("expected EOF got nil")
+ }
+ }()
+ }
+ }()
+
+ var wg sync.WaitGroup
+ for i := 0; i < N; i++ {
+ c, _, err := l.Accept()
+ require.NoError(t, err)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ cc := multistream.NewMSSelect(c, "a")
+ defer cc.Close()
+ buf := make([]byte, 30)
+ n, err := cc.Read(buf)
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Equal(t, "hello-multistream", string(buf[:n])) {
+ return
+ }
+ }()
+ }
+ wg.Wait()
+ })
+
+ t.Run(fmt.Sprintf("WebSocket-reuseport:%v", enableReuseport), func(t *testing.T) {
+ cm := NewConnMgr(enableReuseport, upgrader(t))
+ l, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_HTTP)
+ require.NoError(t, err)
+ wh := wsHandler{conns: make(chan *websocket.Conn, acceptQueueSize)}
+ go func() {
+ http.Serve(manet.NetListener(&maListener{GatedMaListener: l}), wh)
+ }()
+ go func() {
+ d := websocket.Dialer{}
+ for i := 0; i < N; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ conn, _, err := d.DialContext(ctx, fmt.Sprintf("ws://%s", l.Addr().String()), http.Header{})
+ if err != nil {
+ t.Error("failed to dial", err, i)
+ return
+ }
+ err = conn.WriteMessage(websocket.TextMessage, []byte("hello"))
+ if err != nil {
+ t.Error(err)
+ }
+ _, _, err = conn.ReadMessage()
+ if err == nil {
+ t.Error("expected EOF got nil")
+ }
+ }()
+ }
+ }()
+ var wg sync.WaitGroup
+ for i := 0; i < N; i++ {
+ c := <-wh.conns
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer c.Close()
+ msgType, buf, err := c.ReadMessage()
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Equal(t, msgType, websocket.TextMessage) {
+ return
+ }
+ if !assert.Equal(t, "hello", string(buf)) {
+ return
+ }
+ }()
+ }
+ wg.Wait()
+ })
+
+ t.Run(fmt.Sprintf("WebSocketTLS-reuseport:%v", enableReuseport), func(t *testing.T) {
+ cm := NewConnMgr(enableReuseport, upgrader(t))
+ l, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_TLS)
+ require.NoError(t, err)
+ defer l.Close()
+ wh := wsHandler{conns: make(chan *websocket.Conn, acceptQueueSize)}
+ go func() {
+ s := http.Server{Handler: wh, TLSConfig: selfSignedTLSConfig(t)}
+ s.ServeTLS(manet.NetListener(&maListener{GatedMaListener: l}), "", "")
+ }()
+ go func() {
+ d := websocket.Dialer{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
+ for i := 0; i < N; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ conn, _, err := d.DialContext(ctx, fmt.Sprintf("wss://%s", l.Addr().String()), http.Header{})
+ if err != nil {
+ t.Error("failed to dial", err, i)
+ return
+ }
+ err = conn.WriteMessage(websocket.TextMessage, []byte("hello"))
+ if err != nil {
+ t.Error(err)
+ }
+ _, _, err = conn.ReadMessage()
+ if err == nil {
+ t.Error("expected EOF got nil")
+ }
+ }()
+ }
+ }()
+ var wg sync.WaitGroup
+ for i := 0; i < N; i++ {
+ c := <-wh.conns
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer c.Close()
+ msgType, buf, err := c.ReadMessage()
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Equal(t, msgType, websocket.TextMessage) {
+ return
+ }
+ if !assert.Equal(t, "hello", string(buf)) {
+ return
+ }
+ }()
+ }
+ wg.Wait()
+ })
+ }
+}
+
+func TestListenerMultiplexed(t *testing.T) {
+ listenAddr := ma.StringCast("/ip4/0.0.0.0/tcp/0")
+ const N = 20
+ for _, enableReuseport := range []bool{true, false} {
+ cm := NewConnMgr(enableReuseport, upgrader(t))
+ msl, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_MultistreamSelect)
+ require.NoError(t, err)
+ defer msl.Close()
+
+ wsl, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_HTTP)
+ require.NoError(t, err)
+ defer wsl.Close()
+ require.Equal(t, wsl.Multiaddr(), msl.Multiaddr())
+ wh := wsHandler{conns: make(chan *websocket.Conn, acceptQueueSize)}
+ go func() {
+ http.Serve(manet.NetListener(&maListener{GatedMaListener: wsl}), wh)
+ }()
+
+ wssl, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_TLS)
+ require.NoError(t, err)
+ defer wssl.Close()
+ require.Equal(t, wssl.Multiaddr(), wsl.Multiaddr())
+ whs := wsHandler{conns: make(chan *websocket.Conn, acceptQueueSize)}
+ go func() {
+ s := http.Server{Handler: whs, TLSConfig: selfSignedTLSConfig(t)}
+ s.ServeTLS(manet.NetListener(&maListener{GatedMaListener: wssl}), "", "")
+ }()
+
+ // multistream connections
+ go func() {
+ d := net.Dialer{}
+ for i := 0; i < N; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ conn, err := d.DialContext(ctx, msl.Addr().Network(), msl.Addr().String())
+ if err != nil {
+ t.Error("failed to dial", err, i)
+ return
+ }
+ lconn := multistream.NewMSSelect(conn, "a")
+ buf := make([]byte, 10)
+ _, err = lconn.Write([]byte("multistream"))
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = lconn.Read(buf)
+ if err == nil {
+ t.Error("expected EOF got nil")
+ }
+ }()
+ }
+ }()
+
+ // ws connections
+ go func() {
+ d := websocket.Dialer{}
+ for i := 0; i < N; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ conn, _, err := d.DialContext(ctx, fmt.Sprintf("ws://%s", msl.Addr().String()), http.Header{})
+ if err != nil {
+ t.Error("failed to dial", err, i)
+ return
+ }
+ err = conn.WriteMessage(websocket.TextMessage, []byte("websocket"))
+ if err != nil {
+ t.Error(err)
+ }
+ _, _, err = conn.ReadMessage()
+ if err == nil {
+ t.Error("expected EOF got nil")
+ }
+ }()
+ }
+ }()
+
+ // wss connections
+ go func() {
+ d := websocket.Dialer{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
+ for i := 0; i < N; i++ {
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ conn, _, err := d.DialContext(ctx, fmt.Sprintf("wss://%s", msl.Addr().String()), http.Header{})
+ if err != nil {
+ t.Error("failed to dial", err, i)
+ return
+ }
+ err = conn.WriteMessage(websocket.TextMessage, []byte("websocket-tls"))
+ if err != nil {
+ t.Error(err)
+ }
+ _, _, err = conn.ReadMessage()
+ if err == nil {
+ t.Error("expected EOF got nil")
+ }
+ }()
+ }
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < N; i++ {
+ c, _, err := msl.Accept()
+ if !assert.NoError(t, err) {
+ return
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ cc := multistream.NewMSSelect(c, "a")
+ defer cc.Close()
+ buf := make([]byte, 20)
+ n, err := cc.Read(buf)
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Equal(t, "multistream", string(buf[:n])) {
+ return
+ }
+ }()
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < N; i++ {
+ c := <-wh.conns
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer c.Close()
+ msgType, buf, err := c.ReadMessage()
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Equal(t, msgType, websocket.TextMessage) {
+ return
+ }
+ if !assert.Equal(t, "websocket", string(buf)) {
+ return
+ }
+ }()
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < N; i++ {
+ c := <-whs.conns
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer c.Close()
+ msgType, buf, err := c.ReadMessage()
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.Equal(t, msgType, websocket.TextMessage) {
+ return
+ }
+ if !assert.Equal(t, "websocket-tls", string(buf)) {
+ return
+ }
+ }()
+ }
+ }()
+ wg.Wait()
+ }
+}
+
+func TestListenerClose(t *testing.T) {
+ testClose := func(listenAddr ma.Multiaddr) {
+ // listen on port 0
+ cm := NewConnMgr(false, upgrader(t))
+ ml, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_MultistreamSelect)
+ require.NoError(t, err)
+ wl, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_HTTP)
+ require.NoError(t, err)
+ require.Equal(t, wl.Multiaddr(), ml.Multiaddr())
+ wl.Close()
+
+ wl, err = cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_HTTP)
+ require.NoError(t, err)
+ require.Equal(t, wl.Multiaddr(), ml.Multiaddr())
+
+ ml.Close()
+
+ mll, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_MultistreamSelect)
+ require.NoError(t, err)
+ require.Equal(t, wl.Multiaddr(), ml.Multiaddr())
+
+ mll.Close()
+ wl.Close()
+
+ ml, err = cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_MultistreamSelect)
+ require.NoError(t, err)
+
+ // Now listen on the specific port previously used
+ listenAddr = ml.Multiaddr()
+ wl, err = cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_HTTP)
+ require.NoError(t, err)
+ require.Equal(t, wl.Multiaddr(), ml.Multiaddr())
+ wl.Close()
+
+ wl, err = cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_HTTP)
+ require.NoError(t, err)
+ require.Equal(t, wl.Multiaddr(), ml.Multiaddr())
+
+ ml.Close()
+ wl.Close()
+ }
+ listenAddrs := []ma.Multiaddr{ma.StringCast("/ip4/0.0.0.0/tcp/0"), ma.StringCast("/ip6/::/tcp/0")}
+ for _, listenAddr := range listenAddrs {
+ testClose(listenAddr)
+ }
+}
+
+func setDeferReset[T any](t testing.TB, ptr *T, val T) {
+ t.Helper()
+ orig := *ptr
+ *ptr = val
+ t.Cleanup(func() { *ptr = orig })
+}
+
+// TestHitTimeout asserts that we don't panic in case we fail to peek at the connection.
+func TestHitTimeout(t *testing.T) {
+ setDeferReset(t, &identifyConnTimeout, 100*time.Millisecond)
+ // listen on port 0
+ cm := NewConnMgr(false, upgrader(t))
+
+ listenAddr := ma.StringCast("/ip4/127.0.0.1/tcp/0")
+ ml, err := cm.DemultiplexedListen(listenAddr, DemultiplexedConnType_MultistreamSelect)
+ require.NoError(t, err)
+ defer ml.Close()
+
+ tcpConn, err := net.Dial(ml.Addr().Network(), ml.Addr().String())
+ require.NoError(t, err)
+
+ // Stall tcp conn for over the timeout.
+ time.Sleep(identifyConnTimeout + 100*time.Millisecond)
+
+ tcpConn.Close()
+}
diff --git a/p2p/transport/tcpreuse/reuseport.go b/p2p/transport/tcpreuse/reuseport.go
new file mode 100644
index 0000000000..d11f814de6
--- /dev/null
+++ b/p2p/transport/tcpreuse/reuseport.go
@@ -0,0 +1,35 @@
+package tcpreuse
+
+import (
+ "os"
+ "strings"
+
+ "github.com/libp2p/go-reuseport"
+)
+
+// envReuseport is the env variable name used to turn off reuse port.
+// It default to true.
+const envReuseport = "LIBP2P_TCP_REUSEPORT"
+
+// EnvReuseportVal stores the value of envReuseport. defaults to true.
+var EnvReuseportVal = true
+
+func init() {
+ v := strings.ToLower(os.Getenv(envReuseport))
+ if v == "false" || v == "f" || v == "0" {
+ EnvReuseportVal = false
+ log.Info("REUSEPORT disabled", "LIBP2P_TCP_REUSEPORT", v)
+ }
+}
+
+// ReuseportIsAvailable returns whether reuseport is available to be used. This
+// is here because we want to be able to turn reuseport on and off selectively.
+// For now we use an ENV variable, as this handles our pressing need:
+//
+// LIBP2P_TCP_REUSEPORT=false ipfs daemon
+//
+// If this becomes a sought after feature, we could add this to the config.
+// In the end, reuseport is a stop-gap.
+func ReuseportIsAvailable() bool {
+ return EnvReuseportVal && reuseport.Available()
+}
diff --git a/p2p/transport/testsuite/stream_suite.go b/p2p/transport/testsuite/stream_suite.go
new file mode 100644
index 0000000000..b139976b91
--- /dev/null
+++ b/p2p/transport/testsuite/stream_suite.go
@@ -0,0 +1,454 @@
+package ttransport
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ crand "crypto/rand"
+ mrand "math/rand"
+
+ "github.com/libp2p/go-libp2p-testing/race"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var randomness []byte
+
+var StressTestTimeout = 1 * time.Minute
+
+func init() {
+ // read 1MB of randomness
+ randomness = make([]byte, 1<<20)
+ if _, err := crand.Read(randomness); err != nil {
+ panic(err)
+ }
+
+ if timeout := os.Getenv("TEST_STRESS_TIMEOUT_MS"); timeout != "" {
+ if v, err := strconv.ParseInt(timeout, 10, 32); err == nil {
+ StressTestTimeout = time.Duration(v) * time.Millisecond
+ }
+ }
+}
+
+type Options struct {
+ ConnNum int
+ StreamNum int
+ MsgNum int
+ MsgMin int
+ MsgMax int
+}
+
+func fullClose(t *testing.T, s network.MuxedStream) {
+ if err := s.CloseWrite(); err != nil {
+ t.Error(err)
+ s.Reset()
+ return
+ }
+ b, err := io.ReadAll(s)
+ if err != nil {
+ t.Error(err)
+ }
+ if len(b) != 0 {
+ t.Error("expected to be done reading")
+ }
+ if err := s.Close(); err != nil {
+ t.Error(err)
+ }
+}
+
+func randBuf(size int) []byte {
+ n := len(randomness) - size
+ if size < 1 {
+ panic(fmt.Errorf("requested too large buffer (%d). max is %d", size, len(randomness)))
+ }
+
+ start := mrand.Intn(n)
+ return randomness[start : start+size]
+}
+
+func echoStream(t *testing.T, s network.MuxedStream) {
+ // echo everything
+ if _, err := io.Copy(s, s); err != nil {
+ t.Error(err)
+ }
+}
+
+func echo(t *testing.T, c transport.CapableConn) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+ for {
+ str, err := c.AcceptStream()
+ if err != nil {
+ break
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ defer str.Close()
+ echoStream(t, str)
+ }()
+ }
+}
+
+func serve(t *testing.T, l transport.Listener) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ for {
+ c, err := l.Accept()
+ if err != nil {
+ return
+ }
+ defer c.Close()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ echo(t, c)
+ }()
+ }
+}
+
+func SubtestStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID, opt Options) {
+ msgsize := 1 << 11
+
+ rateLimitN := 5000 // max of 5k funcs, because -race has 8k max.
+ rateLimitChan := make(chan struct{}, rateLimitN)
+ for i := 0; i < rateLimitN; i++ {
+ rateLimitChan <- struct{}{}
+ }
+
+ rateLimit := func(f func()) {
+ <-rateLimitChan
+ f()
+ rateLimitChan <- struct{}{}
+ }
+
+ writeStream := func(s network.MuxedStream, bufs chan<- []byte) {
+ for i := 0; i < opt.MsgNum; i++ {
+ buf := randBuf(msgsize)
+ bufs <- buf
+ if _, err := s.Write(buf); err != nil {
+ t.Errorf("s.Write(buf): %s", err)
+ return
+ }
+ }
+ }
+
+ readStream := func(s network.MuxedStream, bufs <-chan []byte) {
+ buf2 := make([]byte, msgsize)
+ i := 0
+ for buf1 := range bufs {
+ i++
+
+ if _, err := io.ReadFull(s, buf2); err != nil {
+ t.Errorf("io.ReadFull(s, buf2): %s", err)
+ return
+ }
+ if !bytes.Equal(buf1, buf2) {
+ t.Errorf("buffers not equal (%x != %x)", buf1[:3], buf2[:3])
+ return
+ }
+ }
+ }
+
+ openStreamAndRW := func(c network.MuxedConn) {
+ s, err := c.OpenStream(context.Background())
+ if err != nil {
+ t.Errorf("failed to create NewStream: %s", err)
+ return
+ }
+
+ bufs := make(chan []byte, opt.MsgNum)
+ go func() {
+ writeStream(s, bufs)
+ close(bufs)
+ }()
+
+ readStream(s, bufs)
+ fullClose(t, s)
+ }
+
+ openConnAndRW := func() {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ l, err := ta.Listen(maddr)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer l.Close()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ serve(t, l)
+ }()
+
+ c, err := tb.Dial(context.Background(), l.Multiaddr(), peerA)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer c.Close()
+
+ // serve the outgoing conn, because some muxers assume
+ // that we _always_ call serve. (this is an error?)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ echo(t, c)
+ }()
+
+ var openWg sync.WaitGroup
+ for i := 0; i < opt.StreamNum; i++ {
+ openWg.Add(1)
+ go rateLimit(func() {
+ defer openWg.Done()
+ openStreamAndRW(c)
+ })
+ }
+ openWg.Wait()
+ }
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+ for i := 0; i < opt.ConnNum; i++ {
+ wg.Add(1)
+ go rateLimit(func() {
+ defer wg.Done()
+ openConnAndRW()
+ })
+ }
+}
+
+func SubtestStreamOpenStress(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ l, err := ta.Listen(maddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ count := 10000
+ workers := 5
+
+ if race.WithRace() {
+ // the race detector can only deal with 8128 simultaneous goroutines, so let's make sure we don't go overboard.
+ count = 1000
+ }
+
+ var (
+ connA, connB transport.CapableConn
+ )
+
+ accepted := make(chan error, 1)
+ go func() {
+ var err error
+ connA, err = l.Accept()
+ accepted <- err
+ }()
+ connB, err = tb.Dial(context.Background(), l.Multiaddr(), peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = <-accepted
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ defer func() {
+ if connA != nil {
+ connA.Close()
+ }
+ if connB != nil {
+ connB.Close()
+ }
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for j := 0; j < workers; j++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < count; i++ {
+ s, err := connA.OpenStream(context.Background())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ fullClose(t, s)
+ }()
+ }
+ }()
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < count*workers; i++ {
+ str, err := connB.AcceptStream()
+ if err != nil {
+ break
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ fullClose(t, str)
+ }()
+ }
+ }()
+
+ timeout := time.After(StressTestTimeout)
+ done := make(chan struct{})
+
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+
+ select {
+ case <-timeout:
+ t.Fatal("timed out receiving streams")
+ case <-done:
+ }
+}
+
+func SubtestStreamReset(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ l, err := ta.Listen(maddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ muxa, err := l.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer muxa.Close()
+
+ s, err := muxa.OpenStream(context.Background())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer s.Close()
+
+ // Some transports won't open the stream until we write. That's
+ // fine.
+ _, _ = s.Write([]byte("foo"))
+
+ time.Sleep(time.Millisecond * 50)
+
+ _, err = s.Write([]byte("bar"))
+ if err == nil {
+ t.Error("should have failed to write")
+ }
+
+ }()
+
+ muxb, err := tb.Dial(context.Background(), l.Multiaddr(), peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer muxb.Close()
+
+ str, err := muxb.AcceptStream()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ str.Reset()
+}
+
+func SubtestStress1Conn1Stream1Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ SubtestStress(t, ta, tb, maddr, peerA, Options{
+ ConnNum: 1,
+ StreamNum: 1,
+ MsgNum: 1,
+ MsgMax: 100,
+ MsgMin: 100,
+ })
+}
+
+func SubtestStress1Conn1Stream100Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ SubtestStress(t, ta, tb, maddr, peerA, Options{
+ ConnNum: 1,
+ StreamNum: 1,
+ MsgNum: 100,
+ MsgMax: 100,
+ MsgMin: 100,
+ })
+}
+
+func SubtestStress1Conn100Stream100Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ SubtestStress(t, ta, tb, maddr, peerA, Options{
+ ConnNum: 1,
+ StreamNum: 100,
+ MsgNum: 100,
+ MsgMax: 100,
+ MsgMin: 100,
+ })
+}
+
+func SubtestStressManyConn10Stream50Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ connNum := 5
+ if runtime.GOOS == "linux" {
+ // Linux can handle a higher number of conns here than other platforms in CI.
+ // See https://github.com/libp2p/go-libp2p/issues/1498.
+ connNum = 50
+ }
+ SubtestStress(t, ta, tb, maddr, peerA, Options{
+ ConnNum: connNum,
+ StreamNum: 10,
+ MsgNum: 50,
+ MsgMax: 100,
+ MsgMin: 100,
+ })
+}
+
+func SubtestStress1Conn1000Stream10Msg(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ SubtestStress(t, ta, tb, maddr, peerA, Options{
+ ConnNum: 1,
+ StreamNum: 1000,
+ MsgNum: 10,
+ MsgMax: 100,
+ MsgMin: 100,
+ })
+}
+
+func SubtestStress1Conn100Stream100Msg10MB(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ SubtestStress(t, ta, tb, maddr, peerA, Options{
+ ConnNum: 1,
+ StreamNum: 100,
+ MsgNum: 100,
+ MsgMax: 10000,
+ MsgMin: 1000,
+ })
+}
diff --git a/p2p/transport/testsuite/transport_suite.go b/p2p/transport/testsuite/transport_suite.go
new file mode 100644
index 0000000000..a24b2c6a72
--- /dev/null
+++ b/p2p/transport/testsuite/transport_suite.go
@@ -0,0 +1,305 @@
+package ttransport
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+var testData = []byte("this is some test data")
+
+func SubtestProtocols(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, _ peer.ID) {
+ rawIPAddr, _ := ma.NewMultiaddr("/ip4/1.2.3.4")
+ if ta.CanDial(rawIPAddr) || tb.CanDial(rawIPAddr) {
+ t.Error("nothing should be able to dial raw IP")
+ }
+
+ tprotos := make(map[int]bool)
+ for _, p := range ta.Protocols() {
+ tprotos[p] = true
+ }
+
+ if !ta.Proxy() {
+ protos := maddr.Protocols()
+ proto := protos[len(protos)-1]
+ if !tprotos[proto.Code] {
+ t.Errorf("transport should have reported that it supports protocol '%s' (%d)", proto.Name, proto.Code)
+ }
+ } else {
+ found := false
+ for _, proto := range maddr.Protocols() {
+ if tprotos[proto.Code] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("didn't find any matching proxy protocols in maddr: %s", maddr)
+ }
+ }
+}
+
+func SubtestBasic(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ list, err := ta.Listen(maddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer list.Close()
+
+ var (
+ connA, connB transport.CapableConn
+ done = make(chan struct{})
+ )
+ defer func() {
+ <-done
+ if connA != nil {
+ connA.Close()
+ }
+ if connB != nil {
+ connB.Close()
+ }
+ }()
+
+ go func() {
+ defer close(done)
+ var err error
+ connB, err = list.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ s, err := connB.AcceptStream()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ buf, err := io.ReadAll(s)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if !bytes.Equal(testData, buf) {
+ t.Errorf("expected %s, got %s", testData, buf)
+ }
+
+ n, err := s.Write(testData)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if n != len(testData) {
+ t.Error(err)
+ return
+ }
+
+ err = s.Close()
+ if err != nil {
+ t.Error(err)
+ }
+ }()
+
+ if !tb.CanDial(list.Multiaddr()) {
+ t.Error("CanDial should have returned true")
+ }
+
+ connA, err = tb.Dial(ctx, list.Multiaddr(), peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ s, err := connA.OpenStream(context.Background())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n, err := s.Write(testData)
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ if n != len(testData) {
+ t.Fatalf("failed to write enough data (a->b)")
+ return
+ }
+
+ if err = s.CloseWrite(); err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ buf, err := io.ReadAll(s)
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ if !bytes.Equal(testData, buf) {
+ t.Errorf("expected %s, got %s", testData, buf)
+ }
+
+ if err = s.Close(); err != nil {
+ t.Fatal(err)
+ return
+ }
+}
+
+func SubtestPingPong(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ streams := 100
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ list, err := ta.Listen(maddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer list.Close()
+
+ var (
+ connA, connB transport.CapableConn
+ )
+ defer func() {
+ if connA != nil {
+ connA.Close()
+ }
+ if connB != nil {
+ connB.Close()
+ }
+ }()
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var err error
+ connA, err = list.Accept()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var sWg sync.WaitGroup
+ for i := 0; i < streams; i++ {
+ s, err := connA.AcceptStream()
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ sWg.Add(1)
+ go func() {
+ defer sWg.Done()
+
+ data, err := io.ReadAll(s)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ if !bytes.HasPrefix(data, testData) {
+ t.Errorf("expected %q to have prefix %q", string(data), string(testData))
+ }
+
+ n, err := s.Write(data)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+
+ if n != len(data) {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ s.Close()
+ }()
+ }
+ sWg.Wait()
+ }()
+
+ if !tb.CanDial(list.Multiaddr()) {
+ t.Error("CanDial should have returned true")
+ }
+
+ connB, err = tb.Dial(ctx, list.Multiaddr(), peerA)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < streams; i++ {
+ s, err := connB.OpenStream(context.Background())
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ data := []byte(fmt.Sprintf("%s - %d", testData, i))
+ n, err := s.Write(data)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+
+ if n != len(data) {
+ s.Reset()
+ t.Error("failed to write enough data (a->b)")
+ return
+ }
+ if err = s.CloseWrite(); err != nil {
+ t.Error(err)
+ return
+ }
+
+ ret, err := io.ReadAll(s)
+ if err != nil {
+ s.Reset()
+ t.Error(err)
+ return
+ }
+ if !bytes.Equal(data, ret) {
+ t.Errorf("expected %q, got %q", string(data), string(ret))
+ }
+
+ if err = s.Close(); err != nil {
+ t.Error(err)
+ return
+ }
+ }(i)
+ }
+ wg.Wait()
+}
+
+func SubtestCancel(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
+ list, err := ta.Listen(maddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer list.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ c, err := tb.Dial(ctx, list.Multiaddr(), peerA)
+ if err == nil {
+ c.Close()
+ t.Fatal("dial should have failed")
+ }
+}
diff --git a/p2p/transport/testsuite/utils_suite.go b/p2p/transport/testsuite/utils_suite.go
new file mode 100644
index 0000000000..8b002f8900
--- /dev/null
+++ b/p2p/transport/testsuite/utils_suite.go
@@ -0,0 +1,53 @@
+package ttransport
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+type TransportSubTestFn func(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID)
+
+var Subtests = []TransportSubTestFn{
+ SubtestProtocols,
+ SubtestBasic,
+ SubtestCancel,
+ SubtestPingPong,
+
+ // Stolen from the stream muxer test suite.
+ SubtestStress1Conn1Stream1Msg,
+ SubtestStress1Conn1Stream100Msg,
+ SubtestStress1Conn100Stream100Msg,
+ SubtestStressManyConn10Stream50Msg,
+ SubtestStress1Conn1000Stream10Msg,
+ SubtestStress1Conn100Stream100Msg10MB,
+ SubtestStreamOpenStress,
+ SubtestStreamReset,
+}
+
+func getFunctionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func SubtestTransport(t *testing.T, ta, tb transport.Transport, addr string, peerA peer.ID) {
+ t.Helper()
+ SubtestTransportWithFs(t, ta, tb, addr, peerA, Subtests)
+}
+
+func SubtestTransportWithFs(t *testing.T, ta, tb transport.Transport, addr string, peerA peer.ID, tests []TransportSubTestFn) {
+ maddr, err := ma.NewMultiaddr(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, f := range tests {
+ t.Run(getFunctionName(f), func(t *testing.T) {
+ f(t, ta, tb, maddr, peerA)
+ })
+ }
+}
diff --git a/p2p/transport/webrtc/connection.go b/p2p/transport/webrtc/connection.go
new file mode 100644
index 0000000000..d75c309c51
--- /dev/null
+++ b/p2p/transport/webrtc/connection.go
@@ -0,0 +1,283 @@
+package libp2pwebrtc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "sync"
+ "sync/atomic"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/pion/datachannel"
+ "github.com/pion/sctp"
+ "github.com/pion/webrtc/v4"
+)
+
+var _ tpt.CapableConn = &connection{}
+
+const maxAcceptQueueLen = 256
+
+type errConnectionTimeout struct{}
+
+var _ net.Error = &errConnectionTimeout{}
+
+func (errConnectionTimeout) Error() string { return "connection timeout" }
+func (errConnectionTimeout) Timeout() bool { return true }
+func (errConnectionTimeout) Temporary() bool { return false }
+
+var errConnClosed = errors.New("connection closed")
+
+type dataChannel struct {
+ stream datachannel.ReadWriteCloser
+ channel *webrtc.DataChannel
+}
+
+type connection struct {
+ pc *webrtc.PeerConnection
+ transport *WebRTCTransport
+ scope network.ConnManagementScope
+
+ closeOnce sync.Once
+ closeErr error
+
+ localPeer peer.ID
+ localMultiaddr ma.Multiaddr
+
+ remotePeer peer.ID
+ remoteKey ic.PubKey
+ remoteMultiaddr ma.Multiaddr
+
+ m sync.Mutex
+ streams map[uint16]*stream
+ nextStreamID atomic.Int32
+
+ acceptQueue chan dataChannel
+
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func newConnection(
+ direction network.Direction,
+ pc *webrtc.PeerConnection,
+ transport *WebRTCTransport,
+ scope network.ConnManagementScope,
+
+ localPeer peer.ID,
+ localMultiaddr ma.Multiaddr,
+
+ remotePeer peer.ID,
+ remoteKey ic.PubKey,
+ remoteMultiaddr ma.Multiaddr,
+ incomingDataChannels chan dataChannel,
+ peerConnectionClosedCh chan struct{},
+) (*connection, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ c := &connection{
+ pc: pc,
+ transport: transport,
+ scope: scope,
+
+ localPeer: localPeer,
+ localMultiaddr: localMultiaddr,
+
+ remotePeer: remotePeer,
+ remoteKey: remoteKey,
+ remoteMultiaddr: remoteMultiaddr,
+ ctx: ctx,
+ cancel: cancel,
+ streams: make(map[uint16]*stream),
+
+ acceptQueue: incomingDataChannels,
+ }
+ switch direction {
+ case network.DirInbound:
+ c.nextStreamID.Store(1)
+ case network.DirOutbound:
+ // stream ID 0 is used for the Noise handshake stream
+ c.nextStreamID.Store(2)
+ }
+
+ pc.OnConnectionStateChange(c.onConnectionStateChange)
+ pc.SCTP().OnClose(func(err error) {
+ if err != nil {
+ c.closeWithError(fmt.Errorf("%w: %w", errConnClosed, err))
+ }
+ c.closeWithError(errConnClosed)
+ })
+ select {
+ case <-peerConnectionClosedCh:
+ c.Close()
+ return nil, errConnClosed
+ default:
+ }
+ return c, nil
+}
+
+// ConnState implements transport.CapableConn
+func (c *connection) ConnState() network.ConnectionState {
+ return network.ConnectionState{Transport: "webrtc-direct"}
+}
+
+// Close closes the underlying peerconnection.
+func (c *connection) Close() error {
+ c.closeWithError(errConnClosed)
+ return nil
+}
+
+// CloseWithError closes the connection ignoring the error code. As there's no way to signal
+// the remote peer on closing the underlying peerconnection, we ignore the error code.
+func (c *connection) CloseWithError(_ network.ConnErrorCode) error {
+ return c.Close()
+}
+
+// closeWithError is used to Close the connection when the underlying DTLS connection fails
+func (c *connection) closeWithError(err error) {
+ c.closeOnce.Do(func() {
+ c.closeErr = err
+ // cancel must be called after closeErr is set. This ensures interested goroutines waiting on
+ // ctx.Done can read closeErr without holding the conn lock.
+ c.cancel()
+ // closing peerconnection will close the datachannels associated with the streams
+ c.pc.Close()
+
+ c.m.Lock()
+ streams := c.streams
+ c.streams = nil
+ c.m.Unlock()
+ for _, s := range streams {
+ s.closeForShutdown(err)
+ }
+ c.scope.Done()
+ })
+}
+
+func (c *connection) IsClosed() bool {
+ return c.ctx.Err() != nil
+}
+
+func (c *connection) OpenStream(ctx context.Context) (network.MuxedStream, error) {
+ if c.IsClosed() {
+ return nil, c.closeErr
+ }
+
+ id := c.nextStreamID.Add(2) - 2
+ if id > math.MaxUint16 {
+ return nil, errors.New("exhausted stream ID space")
+ }
+ streamID := uint16(id)
+ dc, err := c.pc.CreateDataChannel("", &webrtc.DataChannelInit{ID: &streamID})
+ if err != nil {
+ return nil, err
+ }
+ rwc, err := c.detachChannel(ctx, dc)
+ if err != nil {
+ // There's a race between webrtc.SCTP.OnClose callback and the underlying
+ // association closing. It's nicer to close the connection here.
+ if errors.Is(err, sctp.ErrStreamClosed) {
+ c.closeWithError(errConnClosed)
+ return nil, c.closeErr
+ }
+ dc.Close()
+ return nil, fmt.Errorf("detach channel failed for stream(%d): %w", streamID, err)
+ }
+ str := newStream(dc, rwc, maxSendMessageSize, func() { c.removeStream(streamID) })
+ if err := c.addStream(str); err != nil {
+ str.Reset()
+ return nil, fmt.Errorf("failed to add stream(%d) to connection: %w", streamID, err)
+ }
+ return str, nil
+}
+
+func (c *connection) AcceptStream() (network.MuxedStream, error) {
+ select {
+ case <-c.ctx.Done():
+ return nil, c.closeErr
+ case dc := <-c.acceptQueue:
+ str := newStream(dc.channel, dc.stream, maxSendMessageSize, func() { c.removeStream(*dc.channel.ID()) })
+ if err := c.addStream(str); err != nil {
+ str.Reset()
+ return nil, err
+ }
+ return str, nil
+ }
+}
+
+func (c *connection) LocalPeer() peer.ID { return c.localPeer }
+func (c *connection) RemotePeer() peer.ID { return c.remotePeer }
+func (c *connection) RemotePublicKey() ic.PubKey { return c.remoteKey }
+func (c *connection) LocalMultiaddr() ma.Multiaddr { return c.localMultiaddr }
+func (c *connection) RemoteMultiaddr() ma.Multiaddr { return c.remoteMultiaddr }
+func (c *connection) Scope() network.ConnScope { return c.scope }
+func (c *connection) Transport() tpt.Transport { return c.transport }
+
+func (c *connection) addStream(str *stream) error {
+ c.m.Lock()
+ defer c.m.Unlock()
+ if c.streams == nil {
+ return c.closeErr
+ }
+ if _, ok := c.streams[str.id]; ok {
+ return errors.New("stream ID already exists")
+ }
+ c.streams[str.id] = str
+ return nil
+}
+
+func (c *connection) removeStream(id uint16) {
+ c.m.Lock()
+ defer c.m.Unlock()
+ delete(c.streams, id)
+}
+
+func (c *connection) onConnectionStateChange(state webrtc.PeerConnectionState) {
+ if state == webrtc.PeerConnectionStateFailed || state == webrtc.PeerConnectionStateClosed {
+ c.closeWithError(errConnectionTimeout{})
+ }
+}
+
+// detachChannel detaches an outgoing channel by taking into account the context
+// passed to `OpenStream` as well the closure of the underlying peerconnection
+//
+// The underlying SCTP stream for a datachannel implements a net.Conn interface.
+// However, the datachannel creates a goroutine which continuously reads from
+// the SCTP stream and surfaces the data using an OnMessage callback.
+//
+// The actual abstractions are as follows: webrtc.DataChannel
+// wraps pion.DataChannel, which wraps sctp.Stream.
+//
+// The goroutine for reading, Detach method,
+// and the OnMessage callback are present at the webrtc.DataChannel level.
+// Detach provides us abstracted access to the underlying pion.DataChannel,
+// which allows us to issue Read calls to the datachannel.
+// This was desired because it was not feasible to introduce backpressure
+// with the OnMessage callbacks. The tradeoff is a change in the semantics of
+// the OnOpen callback, and having to force close Read locally.
+func (c *connection) detachChannel(ctx context.Context, dc *webrtc.DataChannel) (datachannel.ReadWriteCloser, error) {
+ done := make(chan struct{})
+
+ var rwc datachannel.ReadWriteCloser
+ var err error
+ // OnOpen will return immediately for detached datachannels
+ // refer: https://github.com/pion/webrtc/blob/7ab3174640b3ce15abebc2516a2ca3939b5f105f/datachannel.go#L278-L282
+ dc.OnOpen(func() {
+ rwc, err = dc.Detach()
+ // this is safe since the function should return instantly if the peerconnection is closed
+ close(done)
+ })
+ select {
+ case <-c.ctx.Done():
+ return nil, c.closeErr
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-done:
+ return rwc, err
+ }
+}
diff --git a/p2p/transport/webrtc/fingerprint.go b/p2p/transport/webrtc/fingerprint.go
new file mode 100644
index 0000000000..0b1fe488b4
--- /dev/null
+++ b/p2p/transport/webrtc/fingerprint.go
@@ -0,0 +1,53 @@
+package libp2pwebrtc
+
+import (
+ "crypto"
+ "crypto/x509"
+ "errors"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multibase"
+ mh "github.com/multiformats/go-multihash"
+ "github.com/pion/webrtc/v4"
+)
+
+// parseFingerprint is forked from pion to avoid bytes to string alloc,
+// and to avoid the entire hex interspersing when we do not need it anyway
+
+var errHashUnavailable = errors.New("fingerprint: hash algorithm is not linked into the binary")
+
+// parseFingerprint creates a fingerprint for a certificate using the specified hash algorithm
+func parseFingerprint(cert *x509.Certificate, algo crypto.Hash) ([]byte, error) {
+ if !algo.Available() {
+ return nil, errHashUnavailable
+ }
+ h := algo.New()
+ // Hash.Writer is specified to be never returning an error.
+ // https://golang.org/pkg/hash/#Hash
+ h.Write(cert.Raw)
+ return h.Sum(nil), nil
+}
+
+func decodeRemoteFingerprint(maddr ma.Multiaddr) (*mh.DecodedMultihash, error) {
+ remoteFingerprintMultibase, err := maddr.ValueForProtocol(ma.P_CERTHASH)
+ if err != nil {
+ return nil, err
+ }
+ _, data, err := multibase.Decode(remoteFingerprintMultibase)
+ if err != nil {
+ return nil, err
+ }
+ return mh.Decode(data)
+}
+
+func encodeDTLSFingerprint(fp webrtc.DTLSFingerprint) (string, error) {
+ digest, err := decodeInterspersedHexFromASCIIString(fp.Value)
+ if err != nil {
+ return "", err
+ }
+ encoded, err := mh.Encode(digest, mh.SHA2_256)
+ if err != nil {
+ return "", err
+ }
+ return multibase.Encode(multibase.Base64url, encoded)
+}
diff --git a/p2p/transport/webrtc/hex.go b/p2p/transport/webrtc/hex.go
new file mode 100644
index 0000000000..482036540a
--- /dev/null
+++ b/p2p/transport/webrtc/hex.go
@@ -0,0 +1,70 @@
+package libp2pwebrtc
+
+// The code in this file is adapted from the Go standard library's hex package.
+// As found in https://cs.opensource.google/go/go/+/refs/tags/go1.20.2:src/encoding/hex/hex.go
+//
+// The reason we adapted the original code is to allow us to deal with interspersed requirements
+// while at the same time hex encoding/decoding, without having to do so in two passes.
+
+import (
+ "encoding/hex"
+ "errors"
+)
+
+// encodeInterspersedHex encodes a byte slice into a string of hex characters,
+// separating each encoded byte with a colon (':').
+//
+// Example: { 0x01, 0x02, 0x03 } -> "01:02:03"
+func encodeInterspersedHex(src []byte) string {
+ if len(src) == 0 {
+ return ""
+ }
+ s := hex.EncodeToString(src)
+ n := len(s)
+ // Determine number of colons
+ colons := n / 2
+ if n%2 == 0 {
+ colons--
+ }
+ buffer := make([]byte, n+colons)
+
+ for i, j := 0, 0; i < n; i, j = i+2, j+3 {
+ copy(buffer[j:j+2], s[i:i+2])
+ if j+3 < len(buffer) {
+ buffer[j+2] = ':'
+ }
+ }
+ return string(buffer)
+}
+
+var errUnexpectedIntersperseHexChar = errors.New("unexpected character in interspersed hex string")
+
+// decodeInterspersedHexFromASCIIString decodes an ASCII string of hex characters into a byte slice,
+// where the hex characters are expected to be separated by a colon (':').
+//
+// NOTE that this function returns an error in case the input string contains non-ASCII characters.
+//
+// Example: "01:02:03" -> { 0x01, 0x02, 0x03 }
+func decodeInterspersedHexFromASCIIString(s string) ([]byte, error) {
+ n := len(s)
+ buffer := make([]byte, n/3*2+n%3)
+ j := 0
+ for i := 0; i < n; i++ {
+ if i%3 == 2 {
+ if s[i] != ':' {
+ return nil, errUnexpectedIntersperseHexChar
+ }
+ } else {
+ if s[i] == ':' {
+ return nil, errUnexpectedIntersperseHexChar
+ }
+ buffer[j] = s[i]
+ j++
+ }
+ }
+ dst := make([]byte, hex.DecodedLen(len(buffer)))
+ if _, err := hex.Decode(dst, buffer); err != nil {
+ return nil, err
+ }
+ return dst, nil
+}
diff --git a/p2p/transport/webrtc/hex_test.go b/p2p/transport/webrtc/hex_test.go
new file mode 100644
index 0000000000..c8a7147498
--- /dev/null
+++ b/p2p/transport/webrtc/hex_test.go
@@ -0,0 +1,132 @@
+package libp2pwebrtc
+
+import (
+ "encoding/hex"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestEncodeInterspersedHex(t *testing.T) {
+ b, err := hex.DecodeString("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
+ require.NoError(t, err)
+ require.Equal(t, "ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad", encodeInterspersedHex(b))
+}
+
+func BenchmarkEncodeInterspersedHex(b *testing.B) {
+ data, err := hex.DecodeString("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
+ require.NoError(b, err)
+
+ for i := 0; i < b.N; i++ {
+ encodeInterspersedHex(data)
+ }
+}
+
+func TestDecodeInterpersedHexStringLowerCase(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad")
+ require.NoError(t, err)
+ require.Equal(t, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", hex.EncodeToString(b))
+}
+
+func TestDecodeInterpersedHexStringMixedCase(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("Ba:78:16:BF:8F:01:cf:ea:41:41:40:De:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:FF:61:f2:00:15:ad")
+ require.NoError(t, err)
+ require.Equal(t, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", hex.EncodeToString(b))
+}
+
+func TestDecodeInterpersedHexStringOneByte(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("ba")
+ require.NoError(t, err)
+ require.Equal(t, "ba", hex.EncodeToString(b))
+}
+
+func TestDecodeInterpersedHexBytesLowerCase(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad")
+ require.NoError(t, err)
+ require.Equal(t, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", hex.EncodeToString(b))
+}
+
+func BenchmarkDecode(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _, err := decodeInterspersedHexFromASCIIString("ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad")
+ require.NoError(b, err)
+ }
+}
+
+func TestDecodeInterpersedHexBytesMixedCase(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("Ba:78:16:BF:8F:01:cf:ea:41:41:40:De:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:FF:61:f2:00:15:ad")
+ require.NoError(t, err)
+ require.Equal(t, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", hex.EncodeToString(b))
+}
+
+func TestDecodeInterpersedHexBytesOneByte(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("ba")
+ require.NoError(t, err)
+ require.Equal(t, "ba", hex.EncodeToString(b))
+}
+
+func TestEncodeInterperseHexNilSlice(t *testing.T) {
+ require.Equal(t, "", encodeInterspersedHex(nil))
+ require.Equal(t, "", encodeInterspersedHex([]byte{}))
+}
+
+func TestDecodeInterspersedHexEmpty(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("")
+ require.NoError(t, err)
+ require.Equal(t, []byte{}, b)
+}
+
+func TestDecodeInterpersedHexFromASCIIStringEmpty(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("")
+ require.NoError(t, err)
+ require.Equal(t, []byte{}, b)
+}
+
+func TestDecodeInterpersedHexInvalid(t *testing.T) {
+ for _, v := range []string{"0", "0000", "000"} {
+ _, err := decodeInterspersedHexFromASCIIString(v)
+ require.Error(t, err)
+ }
+}
+
+func TestDecodeInterpersedHexValid(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("00")
+ require.NoError(t, err)
+ require.Equal(t, []byte{0}, b)
+}
+
+func TestDecodeInterpersedHexFromASCIIStringInvalid(t *testing.T) {
+ for _, v := range []string{"0", "0000", "000"} {
+ _, err := decodeInterspersedHexFromASCIIString(v)
+ require.Error(t, err)
+ }
+}
+
+func TestDecodeInterpersedHexFromASCIIStringValid(t *testing.T) {
+ b, err := decodeInterspersedHexFromASCIIString("00")
+ require.NoError(t, err)
+ require.Equal(t, []byte{0}, b)
+}
+
+func FuzzInterpersedHex(f *testing.F) {
+ f.Fuzz(func(t *testing.T, b []byte) {
+ decoded, err := decodeInterspersedHexFromASCIIString(string(b))
+ if err != nil {
+ return
+ }
+ encoded := encodeInterspersedHex(decoded)
+ require.Equal(t, strings.ToLower(string(b)), encoded)
+ })
+}
+
+func FuzzInterspersedHexASCII(f *testing.F) {
+ f.Fuzz(func(t *testing.T, s string) {
+ decoded, err := decodeInterspersedHexFromASCIIString(s)
+ if err != nil {
+ return
+ }
+ encoded := encodeInterspersedHex(decoded)
+ require.Equal(t, strings.ToLower(s), encoded)
+ })
+}
diff --git a/p2p/transport/webrtc/listener.go b/p2p/transport/webrtc/listener.go
new file mode 100644
index 0000000000..4616f0cfb6
--- /dev/null
+++ b/p2p/transport/webrtc/listener.go
@@ -0,0 +1,352 @@
+package libp2pwebrtc
+
+import (
+ "context"
+ "crypto"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/udpmux"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+ "github.com/pion/webrtc/v4"
+)
+
+type connMultiaddrs struct {
+ local, remote ma.Multiaddr
+}
+
+var _ network.ConnMultiaddrs = &connMultiaddrs{}
+
+func (c *connMultiaddrs) LocalMultiaddr() ma.Multiaddr { return c.local }
+func (c *connMultiaddrs) RemoteMultiaddr() ma.Multiaddr { return c.remote }
+
+const (
+ candidateSetupTimeout = 10 * time.Second
+ // This is higher than other transports(64) as there's no way to detect a peer that has gone away after
+ // sending the initial connection request message(STUN Binding request). Such peers take up a goroutine
+ // till connection timeout. As the number of handshakes in parallel is still guarded by the resource
+ // manager, this higher number is okay.
+ DefaultMaxInFlightConnections = 128
+)
+
+type listener struct {
+ transport *WebRTCTransport
+
+ mux *udpmux.UDPMux
+
+ config webrtc.Configuration
+ localFingerprint webrtc.DTLSFingerprint
+ localFingerprintMultibase string
+
+ localAddr net.Addr
+ localMultiaddr ma.Multiaddr
+
+ // buffered incoming connections
+ acceptQueue chan tpt.CapableConn
+
+ // used to control the lifecycle of the listener
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+}
+
+var _ tpt.Listener = &listener{}
+
+func newListener(transport *WebRTCTransport, laddr ma.Multiaddr, socket net.PacketConn, config webrtc.Configuration) (*listener, error) {
+ localFingerprints, err := config.Certificates[0].GetFingerprints()
+ if err != nil {
+ return nil, err
+ }
+
+ localMh, err := hex.DecodeString(strings.ReplaceAll(localFingerprints[0].Value, ":", ""))
+ if err != nil {
+ return nil, err
+ }
+ localMhBuf, err := multihash.Encode(localMh, multihash.SHA2_256)
+ if err != nil {
+ return nil, err
+ }
+ localFpMultibase, err := multibase.Encode(multibase.Base64url, localMhBuf)
+ if err != nil {
+ return nil, err
+ }
+
+ l := &listener{
+ transport: transport,
+ config: config,
+ localFingerprint: localFingerprints[0],
+ localFingerprintMultibase: localFpMultibase,
+ localMultiaddr: laddr,
+ localAddr: socket.LocalAddr(),
+ acceptQueue: make(chan tpt.CapableConn),
+ }
+
+ l.ctx, l.cancel = context.WithCancel(context.Background())
+ l.mux = udpmux.NewUDPMux(socket)
+ l.mux.Start()
+
+ l.wg.Add(1)
+ go func() {
+ defer l.wg.Done()
+ l.listen()
+ }()
+
+ return l, err
+}
+
+func (l *listener) listen() {
+ // Accepting a connection requires instantiating a peerconnection and a noise connection
+ // which is expensive. We therefore limit the number of in-flight connection requests. A
+ // connection is considered to be in flight from the instant it is handled until it is
+ // dequeued by a call to Accept, or errors out in some way.
+ inFlightSemaphore := make(chan struct{}, l.transport.maxInFlightConnections)
+ for {
+ select {
+ case inFlightSemaphore <- struct{}{}:
+ case <-l.ctx.Done():
+ return
+ }
+
+ candidate, err := l.mux.Accept(l.ctx)
+ if err != nil {
+ if l.ctx.Err() == nil {
+ log.Debug("accepting candidate failed", "error", err)
+ }
+ return
+ }
+
+ go func() {
+ defer func() { <-inFlightSemaphore }()
+
+ ctx, cancel := context.WithTimeout(l.ctx, candidateSetupTimeout)
+ defer cancel()
+
+ conn, err := l.handleCandidate(ctx, candidate)
+ if err != nil {
+ l.mux.RemoveConnByUfrag(candidate.Ufrag)
+ log.Debug("could not accept connection", "ufrag", candidate.Ufrag, "error", err)
+ return
+ }
+
+ select {
+ case <-l.ctx.Done():
+ log.Debug("dropping connection, listener closed")
+ conn.Close()
+ case l.acceptQueue <- conn:
+ // acceptQueue is an unbuffered channel, so this blocks until the connection is accepted.
+ }
+ }()
+ }
+}
+
+func (l *listener) handleCandidate(ctx context.Context, candidate udpmux.Candidate) (tpt.CapableConn, error) {
+ remoteMultiaddr, err := manet.FromNetAddr(candidate.Addr)
+ if err != nil {
+ return nil, err
+ }
+ if l.transport.gater != nil {
+ localAddr, _ := ma.SplitFunc(l.localMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH })
+ if !l.transport.gater.InterceptAccept(&connMultiaddrs{local: localAddr, remote: remoteMultiaddr}) {
+ // The connection attempt is rejected before we can send the client an error.
+ // This means that the connection attempt will time out.
+ return nil, errors.New("connection gated")
+ }
+ }
+ scope, err := l.transport.rcmgr.OpenConnection(network.DirInbound, false, remoteMultiaddr)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := l.setupConnection(ctx, scope, remoteMultiaddr, candidate)
+ if err != nil {
+ scope.Done()
+ return nil, err
+ }
+ if l.transport.gater != nil && !l.transport.gater.InterceptSecured(network.DirInbound, conn.RemotePeer(), conn) {
+ conn.Close()
+ return nil, errors.New("connection gated")
+ }
+ return conn, nil
+}
+
+func (l *listener) setupConnection(
+ ctx context.Context, scope network.ConnManagementScope,
+ remoteMultiaddr ma.Multiaddr, candidate udpmux.Candidate,
+) (tConn tpt.CapableConn, err error) {
+ var w webRTCConnection
+ defer func() {
+ if err != nil {
+ if w.PeerConnection != nil {
+ _ = w.PeerConnection.Close()
+ }
+ if tConn != nil {
+ _ = tConn.Close()
+ }
+ }
+ }()
+
+ settingEngine := webrtc.SettingEngine{LoggerFactory: pionLoggerFactory}
+ settingEngine.SetAnsweringDTLSRole(webrtc.DTLSRoleServer)
+ settingEngine.SetICECredentials(candidate.Ufrag, candidate.Ufrag)
+ settingEngine.SetLite(true)
+ settingEngine.SetICEUDPMux(l.mux)
+ settingEngine.SetIncludeLoopbackCandidate(true)
+ settingEngine.DisableCertificateFingerprintVerification(true)
+ settingEngine.SetICETimeouts(
+ l.transport.peerConnectionTimeouts.Disconnect,
+ l.transport.peerConnectionTimeouts.Failed,
+ l.transport.peerConnectionTimeouts.Keepalive,
+ )
+ // This is higher than the path MTU due to a bug in the sctp chunking logic.
+ // Remove this after https://github.com/pion/sctp/pull/301 is included
+ // in a release.
+ settingEngine.SetReceiveMTU(udpmux.ReceiveBufSize)
+ settingEngine.DetachDataChannels()
+ settingEngine.SetSCTPMaxReceiveBufferSize(sctpReceiveBufferSize)
+ if err := scope.ReserveMemory(sctpReceiveBufferSize, network.ReservationPriorityMedium); err != nil {
+ return nil, err
+ }
+
+ w, err = newWebRTCConnection(settingEngine, l.config)
+ if err != nil {
+ return nil, fmt.Errorf("instantiating peer connection failed: %w", err)
+ }
+
+ errC := addOnConnectionStateChangeCallback(w.PeerConnection)
+ // Infer the client SDP from the incoming STUN message by setting the ice-ufrag.
+ if err := w.PeerConnection.SetRemoteDescription(webrtc.SessionDescription{
+ SDP: createClientSDP(candidate.Addr, candidate.Ufrag),
+ Type: webrtc.SDPTypeOffer,
+ }); err != nil {
+ return nil, err
+ }
+ answer, err := w.PeerConnection.CreateAnswer(nil)
+ if err != nil {
+ return nil, err
+ }
+ if err := w.PeerConnection.SetLocalDescription(answer); err != nil {
+ return nil, err
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case err := <-errC:
+ if err != nil {
+ return nil, fmt.Errorf("peer connection failed for ufrag: %s", candidate.Ufrag)
+ }
+ }
+
+ // Run the noise handshake.
+ rwc, err := detachHandshakeDataChannel(ctx, w.HandshakeDataChannel)
+ if err != nil {
+ return nil, err
+ }
+ handshakeChannel := newStream(w.HandshakeDataChannel, rwc, maxSendMessageSize, nil)
+ // we do not yet know A's peer ID so accept any inbound
+ remotePubKey, err := l.transport.noiseHandshake(ctx, w.PeerConnection, handshakeChannel, "", crypto.SHA256, true)
+ if err != nil {
+ return nil, err
+ }
+ remotePeer, err := peer.IDFromPublicKey(remotePubKey)
+ if err != nil {
+ return nil, err
+ }
+ // earliest point where we know the remote's peerID
+ if err := scope.SetPeer(remotePeer); err != nil {
+ return nil, err
+ }
+
+ localMultiaddrWithoutCerthash, _ := ma.SplitFunc(l.localMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH })
+ conn, err := newConnection(
+ network.DirInbound,
+ w.PeerConnection,
+ l.transport,
+ scope,
+ l.transport.localPeerId,
+ localMultiaddrWithoutCerthash,
+ remotePeer,
+ remotePubKey,
+ remoteMultiaddr,
+ w.IncomingDataChannels,
+ w.PeerConnectionClosedCh,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, err
+}
+
+func (l *listener) Accept() (tpt.CapableConn, error) {
+ select {
+ case <-l.ctx.Done():
+ return nil, tpt.ErrListenerClosed
+ case conn := <-l.acceptQueue:
+ return conn, nil
+ }
+}
+
+func (l *listener) Close() error {
+ select {
+ case <-l.ctx.Done():
+ default:
+ }
+ l.cancel()
+ l.mux.Close()
+ l.wg.Wait()
+loop:
+ for {
+ select {
+ case conn := <-l.acceptQueue:
+ conn.Close()
+ default:
+ break loop
+ }
+ }
+ return nil
+}
+
+func (l *listener) Addr() net.Addr {
+ return l.localAddr
+}
+
+func (l *listener) Multiaddr() ma.Multiaddr {
+ return l.localMultiaddr
+}
+
+// addOnConnectionStateChangeCallback adds the OnConnectionStateChange to the PeerConnection.
+// The channel returned here:
+// * is closed when the state changes to Connection
+// * receives an error when the state changes to Failed or Closed or Disconnected
+func addOnConnectionStateChangeCallback(pc *webrtc.PeerConnection) <-chan error {
+ errC := make(chan error, 1)
+ var once sync.Once
+ pc.OnConnectionStateChange(func(_ webrtc.PeerConnectionState) {
+ switch pc.ConnectionState() {
+ case webrtc.PeerConnectionStateConnected:
+ once.Do(func() { close(errC) })
+ // PeerConnectionStateFailed happens when we fail to negotiate the connection.
+ // PeerConnectionStateDisconnected happens when we disconnect immediately after connecting.
+ // PeerConnectionStateClosed happens when we close the peer connection locally, not when remote closes. We don't need
+ // to error in this case, but it's a no-op, so it doesn't hurt.
+ case webrtc.PeerConnectionStateFailed, webrtc.PeerConnectionStateClosed, webrtc.PeerConnectionStateDisconnected:
+ once.Do(func() {
+ errC <- errors.New("peerconnection failed")
+ close(errC)
+ })
+ }
+ })
+ return errC
+}
diff --git a/p2p/transport/webrtc/logger.go b/p2p/transport/webrtc/logger.go
new file mode 100644
index 0000000000..e2d1aabb82
--- /dev/null
+++ b/p2p/transport/webrtc/logger.go
@@ -0,0 +1,90 @@
+package libp2pwebrtc
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ pionLogging "github.com/pion/logging"
+)
+
+var log = logging.Logger("webrtc-transport")
+
+// pionLog is the logger provided to pion for internal logging
+var pionLog = logging.Logger("webrtc-transport-pion")
+
+// pionLogger wraps the StandardLogger interface to provide a LeveledLogger interface
+// as expected by pion
+// Pion logs are too noisy and have invalid log levels. pionLogger downgrades all the
+// logs to debug
+type pionLogger struct {
+ *slog.Logger
+}
+
+var pLog = pionLogger{pionLog}
+
+var _ pionLogging.LeveledLogger = pLog
+
+func (l pionLogger) Debug(s string) {
+ l.Logger.Debug(s)
+}
+
+func (l pionLogger) Debugf(s string, args ...interface{}) {
+ if l.Logger.Enabled(context.Background(), slog.LevelDebug) {
+ l.Logger.Debug(fmt.Sprintf(s, args...))
+ }
+}
+
+func (l pionLogger) Error(s string) {
+ l.Logger.Error(s)
+}
+
+func (l pionLogger) Errorf(s string, args ...interface{}) {
+ if l.Logger.Enabled(context.Background(), slog.LevelError) {
+ l.Logger.Error(fmt.Sprintf(s, args...))
+ }
+}
+
+func (l pionLogger) Info(s string) {
+ l.Logger.Info(s)
+}
+
+func (l pionLogger) Infof(s string, args ...interface{}) {
+ if l.Logger.Enabled(context.Background(), slog.LevelInfo) {
+ l.Logger.Info(fmt.Sprintf(s, args...))
+ }
+}
+
+func (l pionLogger) Warn(s string) {
+ l.Logger.Warn(s)
+}
+
+func (l pionLogger) Warnf(s string, args ...interface{}) {
+ if l.Logger.Enabled(context.Background(), slog.LevelWarn) {
+ l.Logger.Warn(fmt.Sprintf(s, args...))
+ }
+}
+
+func (l pionLogger) Trace(s string) {
+ l.Logger.Debug(s)
+}
+func (l pionLogger) Tracef(s string, args ...interface{}) {
+ if l.Logger.Enabled(context.Background(), slog.LevelDebug) {
+ l.Logger.Debug(fmt.Sprintf(s, args...))
+ }
+}
+
+// loggerFactory returns pLog for all new logger instances
+type loggerFactory struct{}
+
+// NewLogger returns pLog for all new logger instances. Internally pion creates lots of
+// separate logging objects unnecessarily. To avoid the allocations we use a single log
+// object for all of pion logging.
+func (loggerFactory) NewLogger(_ string) pionLogging.LeveledLogger {
+ return pLog
+}
+
+var _ pionLogging.LoggerFactory = loggerFactory{}
+
+var pionLoggerFactory = loggerFactory{}
diff --git a/p2p/transport/webrtc/pb/message.pb.go b/p2p/transport/webrtc/pb/message.pb.go
new file mode 100644
index 0000000000..8918897d3e
--- /dev/null
+++ b/p2p/transport/webrtc/pb/message.pb.go
@@ -0,0 +1,219 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.29.2
+// source: p2p/transport/webrtc/pb/message.proto
+
+package pb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Message_Flag int32
+
+const (
+ // The sender will no longer send messages on the stream.
+ Message_FIN Message_Flag = 0
+ // The sender will no longer read messages on the stream. Incoming data is
+ // being discarded on receipt.
+ Message_STOP_SENDING Message_Flag = 1
+ // The sender abruptly terminates the sending part of the stream. The
+ // receiver can discard any data that it already received on that stream.
+ Message_RESET Message_Flag = 2
+ // Sending the FIN_ACK flag acknowledges the previous receipt of a message
+ // with the FIN flag set. Receiving a FIN_ACK flag gives the recipient
+ // confidence that the remote has received all sent messages.
+ Message_FIN_ACK Message_Flag = 3
+)
+
+// Enum value maps for Message_Flag.
+var (
+ Message_Flag_name = map[int32]string{
+ 0: "FIN",
+ 1: "STOP_SENDING",
+ 2: "RESET",
+ 3: "FIN_ACK",
+ }
+ Message_Flag_value = map[string]int32{
+ "FIN": 0,
+ "STOP_SENDING": 1,
+ "RESET": 2,
+ "FIN_ACK": 3,
+ }
+)
+
+func (x Message_Flag) Enum() *Message_Flag {
+ p := new(Message_Flag)
+ *p = x
+ return p
+}
+
+func (x Message_Flag) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Message_Flag) Descriptor() protoreflect.EnumDescriptor {
+ return file_p2p_transport_webrtc_pb_message_proto_enumTypes[0].Descriptor()
+}
+
+func (Message_Flag) Type() protoreflect.EnumType {
+ return &file_p2p_transport_webrtc_pb_message_proto_enumTypes[0]
+}
+
+func (x Message_Flag) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *Message_Flag) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = Message_Flag(num)
+ return nil
+}
+
+// Deprecated: Use Message_Flag.Descriptor instead.
+func (Message_Flag) EnumDescriptor() ([]byte, []int) {
+ return file_p2p_transport_webrtc_pb_message_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type Message struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Flag *Message_Flag `protobuf:"varint,1,opt,name=flag,enum=Message_Flag" json:"flag,omitempty"`
+ Message []byte `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
+ ErrorCode *uint32 `protobuf:"varint,3,opt,name=errorCode" json:"errorCode,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message) Reset() {
+ *x = Message{}
+ mi := &file_p2p_transport_webrtc_pb_message_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_p2p_transport_webrtc_pb_message_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_p2p_transport_webrtc_pb_message_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Message) GetFlag() Message_Flag {
+ if x != nil && x.Flag != nil {
+ return *x.Flag
+ }
+ return Message_FIN
+}
+
+func (x *Message) GetMessage() []byte {
+ if x != nil {
+ return x.Message
+ }
+ return nil
+}
+
+func (x *Message) GetErrorCode() uint32 {
+ if x != nil && x.ErrorCode != nil {
+ return *x.ErrorCode
+ }
+ return 0
+}
+
+var File_p2p_transport_webrtc_pb_message_proto protoreflect.FileDescriptor
+
+const file_p2p_transport_webrtc_pb_message_proto_rawDesc = "" +
+ "\n" +
+ "%p2p/transport/webrtc/pb/message.proto\"\x9f\x01\n" +
+ "\aMessage\x12!\n" +
+ "\x04flag\x18\x01 \x01(\x0e2\r.Message.FlagR\x04flag\x12\x18\n" +
+ "\amessage\x18\x02 \x01(\fR\amessage\x12\x1c\n" +
+ "\terrorCode\x18\x03 \x01(\rR\terrorCode\"9\n" +
+ "\x04Flag\x12\a\n" +
+ "\x03FIN\x10\x00\x12\x10\n" +
+ "\fSTOP_SENDING\x10\x01\x12\t\n" +
+ "\x05RESET\x10\x02\x12\v\n" +
+ "\aFIN_ACK\x10\x03B5Z3github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+
+var (
+ file_p2p_transport_webrtc_pb_message_proto_rawDescOnce sync.Once
+ file_p2p_transport_webrtc_pb_message_proto_rawDescData []byte
+)
+
+func file_p2p_transport_webrtc_pb_message_proto_rawDescGZIP() []byte {
+ file_p2p_transport_webrtc_pb_message_proto_rawDescOnce.Do(func() {
+ file_p2p_transport_webrtc_pb_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_p2p_transport_webrtc_pb_message_proto_rawDesc), len(file_p2p_transport_webrtc_pb_message_proto_rawDesc)))
+ })
+ return file_p2p_transport_webrtc_pb_message_proto_rawDescData
+}
+
+var file_p2p_transport_webrtc_pb_message_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_p2p_transport_webrtc_pb_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_p2p_transport_webrtc_pb_message_proto_goTypes = []any{
+ (Message_Flag)(0), // 0: Message.Flag
+ (*Message)(nil), // 1: Message
+}
+var file_p2p_transport_webrtc_pb_message_proto_depIdxs = []int32{
+ 0, // 0: Message.flag:type_name -> Message.Flag
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_p2p_transport_webrtc_pb_message_proto_init() }
+func file_p2p_transport_webrtc_pb_message_proto_init() {
+ if File_p2p_transport_webrtc_pb_message_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_p2p_transport_webrtc_pb_message_proto_rawDesc), len(file_p2p_transport_webrtc_pb_message_proto_rawDesc)),
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_p2p_transport_webrtc_pb_message_proto_goTypes,
+ DependencyIndexes: file_p2p_transport_webrtc_pb_message_proto_depIdxs,
+ EnumInfos: file_p2p_transport_webrtc_pb_message_proto_enumTypes,
+ MessageInfos: file_p2p_transport_webrtc_pb_message_proto_msgTypes,
+ }.Build()
+ File_p2p_transport_webrtc_pb_message_proto = out.File
+ file_p2p_transport_webrtc_pb_message_proto_goTypes = nil
+ file_p2p_transport_webrtc_pb_message_proto_depIdxs = nil
+}
diff --git a/p2p/transport/webrtc/pb/message.proto b/p2p/transport/webrtc/pb/message.proto
new file mode 100644
index 0000000000..2401f7c4d2
--- /dev/null
+++ b/p2p/transport/webrtc/pb/message.proto
@@ -0,0 +1,26 @@
+syntax = "proto2";
+
+option go_package = "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb";
+
+message Message {
+ enum Flag {
+ // The sender will no longer send messages on the stream.
+ FIN = 0;
+ // The sender will no longer read messages on the stream. Incoming data is
+ // being discarded on receipt.
+ STOP_SENDING = 1;
+ // The sender abruptly terminates the sending part of the stream. The
+ // receiver can discard any data that it already received on that stream.
+ RESET = 2;
+ // Sending the FIN_ACK flag acknowledges the previous receipt of a message
+ // with the FIN flag set. Receiving a FIN_ACK flag gives the recipient
+ // confidence that the remote has received all sent messages.
+ FIN_ACK = 3;
+ }
+
+ optional Flag flag=1;
+
+ optional bytes message = 2;
+
+ optional uint32 errorCode = 3;
+}
diff --git a/p2p/transport/webrtc/sdp.go b/p2p/transport/webrtc/sdp.go
new file mode 100644
index 0000000000..878b668a18
--- /dev/null
+++ b/p2p/transport/webrtc/sdp.go
@@ -0,0 +1,143 @@
+package libp2pwebrtc
+
+import (
+ "crypto"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/multiformats/go-multihash"
+)
+
+// clientSDP describes an SDP format string which can be used
+// to infer a client's SDP offer from the incoming STUN message.
+// The fingerprint used to render a client SDP is arbitrary since
+// it fingerprint verification is disabled in favour of a noise
+// handshake. The max message size is fixed to 16384 bytes.
+const clientSDP = `v=0
+o=- 0 0 IN %[1]s %[2]s
+s=-
+c=IN %[1]s %[2]s
+t=0 0
+
+m=application %[3]d UDP/DTLS/SCTP webrtc-datachannel
+a=mid:0
+a=ice-options:ice2
+a=ice-ufrag:%[4]s
+a=ice-pwd:%[4]s
+a=fingerprint:sha-256 ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad
+a=setup:actpass
+a=sctp-port:5000
+a=max-message-size:16384
+`
+
+func createClientSDP(addr *net.UDPAddr, ufrag string) string {
+ ipVersion := "IP4"
+ if addr.IP.To4() == nil {
+ ipVersion = "IP6"
+ }
+ return fmt.Sprintf(
+ clientSDP,
+ ipVersion,
+ addr.IP,
+ addr.Port,
+ ufrag,
+ )
+}
+
+// serverSDP defines an SDP format string used by a dialer
+// to infer the SDP answer of a server based on the provided
+// multiaddr, and the locally set ICE credentials. The max
+// message size is fixed to 16384 bytes.
+const serverSDP = `v=0
+o=- 0 0 IN %[1]s %[2]s
+s=-
+t=0 0
+a=ice-lite
+m=application %[3]d UDP/DTLS/SCTP webrtc-datachannel
+c=IN %[1]s %[2]s
+a=mid:0
+a=ice-options:ice2
+a=ice-ufrag:%[4]s
+a=ice-pwd:%[4]s
+a=fingerprint:%[5]s
+
+a=setup:passive
+a=sctp-port:5000
+a=max-message-size:16384
+a=candidate:1 1 UDP 1 %[2]s %[3]d typ host
+a=end-of-candidates
+`
+
+func createServerSDP(addr *net.UDPAddr, ufrag string, fingerprint multihash.DecodedMultihash) (string, error) {
+ ipVersion := "IP4"
+ if addr.IP.To4() == nil {
+ ipVersion = "IP6"
+ }
+
+ sdpString, err := getSupportedSDPString(fingerprint.Code)
+ if err != nil {
+ return "", err
+ }
+
+ var builder strings.Builder
+ builder.Grow(len(fingerprint.Digest)*3 + 8)
+ builder.WriteString(sdpString)
+ builder.WriteByte(' ')
+ builder.WriteString(encodeInterspersedHex(fingerprint.Digest))
+ fp := builder.String()
+
+ return fmt.Sprintf(
+ serverSDP,
+ ipVersion,
+ addr.IP,
+ addr.Port,
+ ufrag,
+ fp,
+ ), nil
+}
+
+// getSupportedSDPHash converts a multihash code to the
+// corresponding crypto.Hash for supported protocols. If a
+// crypto.Hash cannot be found, it returns `(0, false)`
+func getSupportedSDPHash(code uint64) (crypto.Hash, bool) {
+ switch code {
+ case multihash.MD5:
+ return crypto.MD5, true
+ case multihash.SHA1:
+ return crypto.SHA1, true
+ case multihash.SHA3_224:
+ return crypto.SHA3_224, true
+ case multihash.SHA2_256:
+ return crypto.SHA256, true
+ case multihash.SHA3_384:
+ return crypto.SHA3_384, true
+ case multihash.SHA2_512:
+ return crypto.SHA512, true
+ default:
+ return 0, false
+ }
+}
+
+// getSupportedSDPString converts a multihash code
+// to a string format recognised by pion for fingerprint
+// algorithms
+func getSupportedSDPString(code uint64) (string, error) {
+ // values based on (cryto.Hash).String()
+ switch code {
+ case multihash.MD5:
+ return "md5", nil
+ case multihash.SHA1:
+ return "sha-1", nil
+ case multihash.SHA3_224:
+ return "sha3-224", nil
+ case multihash.SHA2_256:
+ return "sha-256", nil
+ case multihash.SHA3_384:
+ return "sha3-384", nil
+ case multihash.SHA2_512:
+ return "sha-512", nil
+ default:
+ return "", fmt.Errorf("unsupported hash code (%d)", code)
+ }
+}
diff --git a/p2p/transport/webrtc/sdp_test.go b/p2p/transport/webrtc/sdp_test.go
new file mode 100644
index 0000000000..2e7ac5b3e8
--- /dev/null
+++ b/p2p/transport/webrtc/sdp_test.go
@@ -0,0 +1,101 @@
+package libp2pwebrtc
+
+import (
+ "encoding/hex"
+ "net"
+ "testing"
+
+ "github.com/multiformats/go-multihash"
+ "github.com/stretchr/testify/require"
+)
+
+const expectedServerSDP = `v=0
+o=- 0 0 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=ice-lite
+m=application 37826 UDP/DTLS/SCTP webrtc-datachannel
+c=IN IP4 0.0.0.0
+a=mid:0
+a=ice-options:ice2
+a=ice-ufrag:d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581
+a=ice-pwd:d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581
+a=fingerprint:sha-256 ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad
+
+a=setup:passive
+a=sctp-port:5000
+a=max-message-size:16384
+a=candidate:1 1 UDP 1 0.0.0.0 37826 typ host
+a=end-of-candidates
+`
+
+func TestRenderServerSDP(t *testing.T) {
+ encoded, err := hex.DecodeString("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
+ require.NoError(t, err)
+
+ testMultihash := multihash.DecodedMultihash{
+ Code: multihash.SHA2_256,
+ Name: multihash.Codes[multihash.SHA2_256],
+ Digest: encoded,
+ Length: len(encoded),
+ }
+ addr := &net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 37826}
+ ufrag := "d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581"
+ fingerprint := testMultihash
+
+ sdp, err := createServerSDP(addr, ufrag, fingerprint)
+ require.NoError(t, err)
+ require.Equal(t, expectedServerSDP, sdp)
+}
+
+const expectedClientSDP = `v=0
+o=- 0 0 IN IP4 0.0.0.0
+s=-
+c=IN IP4 0.0.0.0
+t=0 0
+
+m=application 37826 UDP/DTLS/SCTP webrtc-datachannel
+a=mid:0
+a=ice-options:ice2
+a=ice-ufrag:d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581
+a=ice-pwd:d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581
+a=fingerprint:sha-256 ba:78:16:bf:8f:01:cf:ea:41:41:40:de:5d:ae:22:23:b0:03:61:a3:96:17:7a:9c:b4:10:ff:61:f2:00:15:ad
+a=setup:actpass
+a=sctp-port:5000
+a=max-message-size:16384
+`
+
+func TestRenderClientSDP(t *testing.T) {
+ addr := &net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 37826}
+ ufrag := "d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581"
+ sdp := createClientSDP(addr, ufrag)
+ require.Equal(t, expectedClientSDP, sdp)
+}
+
+func BenchmarkRenderClientSDP(b *testing.B) {
+ addr := &net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 37826}
+ ufrag := "d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581"
+
+ for i := 0; i < b.N; i++ {
+ createClientSDP(addr, ufrag)
+ }
+}
+
+func BenchmarkRenderServerSDP(b *testing.B) {
+ encoded, _ := hex.DecodeString("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
+
+ testMultihash := multihash.DecodedMultihash{
+ Code: multihash.SHA2_256,
+ Name: multihash.Codes[multihash.SHA2_256],
+ Digest: encoded,
+ Length: len(encoded),
+ }
+ addr := &net.UDPAddr{IP: net.IPv4(0, 0, 0, 0), Port: 37826}
+ ufrag := "d2c0fc07-8bb3-42ae-bae2-a6fce8a0b581"
+ fingerprint := testMultihash
+
+ for i := 0; i < b.N; i++ {
+ createServerSDP(addr, ufrag, fingerprint)
+ }
+
+}
diff --git a/p2p/transport/webrtc/stream.go b/p2p/transport/webrtc/stream.go
new file mode 100644
index 0000000000..ee8993c115
--- /dev/null
+++ b/p2p/transport/webrtc/stream.go
@@ -0,0 +1,293 @@
+package libp2pwebrtc
+
+import (
+ "errors"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+ "github.com/libp2p/go-msgio/pbio"
+
+ "github.com/pion/datachannel"
+ "github.com/pion/webrtc/v4"
+)
+
+const (
+ // maxSendMessageSize is the maximum message size of the Protobuf message we send / receive.
+ // NOTE: Change `varintOverhead` if you change this.
+ maxSendMessageSize = 16384
+ // Proto overhead assumption is 5 bytes
+ protoOverhead = 5
+ // Varint overhead is assumed to be 2 bytes. This is safe since
+ // 1. This is only used and when writing message, and
+ // 2. We only send messages in chunks of `maxMessageSize - varintOverhead`
+ // which includes the data and the protobuf header. Since `maxMessageSize`
+ // is less than or equal to 2 ^ 14, the varint will not be more than
+ // 2 bytes in length.
+ varintOverhead = 2
+
+ // maxTotalControlMessagesSize is the maximum total size of all control messages we will
+ // write on this stream.
+ // 4 control messages of size 10 bytes + 10 bytes buffer. This number doesn't need to be
+ // exact. In the worst case, we enqueue these many bytes more in the webrtc peer connection
+ // send queue.
+ maxTotalControlMessagesSize = 50
+
+ // maxFINACKWait is the maximum amount of time a stream will wait to read
+ // FIN_ACK before closing the data channel
+ maxFINACKWait = 10 * time.Second
+
+ // maxReceiveMessageSize is the maximum message size of the Protobuf message we receive.
+ maxReceiveMessageSize = 256<<10 + 1<<10 // 1kB buffer
+)
+
+type receiveState uint8
+
+const (
+ receiveStateReceiving receiveState = iota
+ receiveStateDataRead // received and read the FIN
+ receiveStateReset // either by calling CloseRead locally, or by receiving
+)
+
+type sendState uint8
+
+const (
+ sendStateSending sendState = iota
+ sendStateDataSent
+ sendStateDataReceived
+ sendStateReset
+)
+
+// Package pion detached data channel into a net.Conn
+// and then a network.MuxedStream
+type stream struct {
+ mx sync.Mutex
+
+ // readerMx ensures that only a single goroutine reads from the reader. Read is not threadsafe
+ // But we may need to read from reader for control messages from a different goroutine.
+ readerMx sync.Mutex
+ reader pbio.Reader
+ readError error
+
+ // this buffer is limited up to a single message. Reason we need it
+ // is because a reader might read a message midway, and so we need a
+ // wait to buffer that for as long as the remaining part is not (yet) read
+ nextMessage *pb.Message
+ receiveState receiveState
+
+ writer pbio.Writer // concurrent writes prevented by mx
+ writeStateChanged chan struct{}
+ sendState sendState
+ writeDeadline time.Time
+ writeError error
+ maxSendMessageSize int
+
+ controlMessageReaderOnce sync.Once
+ // controlMessageReaderEndTime is the end time for reading FIN_ACK from the control
+ // message reader. We cannot rely on SetReadDeadline to do this since that is prone to
+ // race condition where a previous deadline timer fires after the latest call to
+ // SetReadDeadline
+ // See: https://github.com/pion/sctp/pull/290
+ controlMessageReaderEndTime time.Time
+
+ onDoneOnce sync.Once
+ onDone func()
+ id uint16 // for logging purposes
+ dataChannel *datachannel.DataChannel
+ closeForShutdownErr error
+}
+
+var _ network.MuxedStream = &stream{}
+
+func newStream(
+ channel *webrtc.DataChannel,
+ rwc datachannel.ReadWriteCloser,
+ maxSendMessageSize int,
+ onDone func(),
+) *stream {
+ s := &stream{
+ reader: pbio.NewDelimitedReader(rwc, maxReceiveMessageSize),
+ writer: pbio.NewDelimitedWriter(rwc),
+ writeStateChanged: make(chan struct{}, 1),
+ id: *channel.ID(),
+ dataChannel: rwc.(*datachannel.DataChannel),
+ onDone: onDone,
+ maxSendMessageSize: maxSendMessageSize,
+ }
+ s.dataChannel.SetBufferedAmountLowThreshold(uint64(s.sendBufferLowThreshold()))
+ s.dataChannel.OnBufferedAmountLow(func() {
+ s.notifyWriteStateChanged()
+ })
+ return s
+}
+
+func (s *stream) Close() error {
+ s.mx.Lock()
+ isClosed := s.closeForShutdownErr != nil
+ s.mx.Unlock()
+ if isClosed {
+ return nil
+ }
+ defer s.cleanup()
+ closeWriteErr := s.CloseWrite()
+ closeReadErr := s.CloseRead()
+ if closeWriteErr != nil || closeReadErr != nil {
+ s.Reset()
+ return errors.Join(closeWriteErr, closeReadErr)
+ }
+
+ s.mx.Lock()
+ if s.controlMessageReaderEndTime.IsZero() {
+ s.controlMessageReaderEndTime = time.Now().Add(maxFINACKWait)
+ s.setDataChannelReadDeadline(time.Now().Add(-1 * time.Hour))
+ }
+ s.mx.Unlock()
+ return nil
+}
+
+func (s *stream) Reset() error {
+ return s.ResetWithError(0)
+}
+
+func (s *stream) ResetWithError(errCode network.StreamErrorCode) error {
+ s.mx.Lock()
+ isClosed := s.closeForShutdownErr != nil
+ s.mx.Unlock()
+ if isClosed {
+ return nil
+ }
+
+ defer s.cleanup()
+ cancelWriteErr := s.cancelWrite(errCode)
+ closeReadErr := s.closeRead(errCode, false)
+ s.setDataChannelReadDeadline(time.Now().Add(-1 * time.Hour))
+ return errors.Join(closeReadErr, cancelWriteErr)
+}
+
+func (s *stream) closeForShutdown(closeErr error) {
+ defer s.cleanup()
+
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ s.closeForShutdownErr = closeErr
+ s.notifyWriteStateChanged()
+}
+
+func (s *stream) SetDeadline(t time.Time) error {
+ _ = s.SetReadDeadline(t)
+ return s.SetWriteDeadline(t)
+}
+
+// processIncomingFlag processes the flag(FIN/RST/etc) on msg.
+// It needs to be called while the mutex is locked.
+func (s *stream) processIncomingFlag(msg *pb.Message) {
+ if msg.Flag == nil {
+ return
+ }
+
+ switch msg.GetFlag() {
+ case pb.Message_STOP_SENDING:
+ // We must process STOP_SENDING after sending a FIN(sendStateDataSent). Remote peer
+ // may not send a FIN_ACK once it has sent a STOP_SENDING
+ if s.sendState == sendStateSending || s.sendState == sendStateDataSent {
+ s.sendState = sendStateReset
+ s.writeError = &network.StreamError{Remote: true, ErrorCode: network.StreamErrorCode(msg.GetErrorCode())}
+ }
+ s.notifyWriteStateChanged()
+ case pb.Message_FIN_ACK:
+ s.sendState = sendStateDataReceived
+ s.notifyWriteStateChanged()
+ case pb.Message_FIN:
+ if s.receiveState == receiveStateReceiving {
+ s.receiveState = receiveStateDataRead
+ }
+ if err := s.writer.WriteMsg(&pb.Message{Flag: pb.Message_FIN_ACK.Enum()}); err != nil {
+ log.Debug("failed to send FIN_ACK", "error", err)
+ // Remote has finished writing all the data It'll stop waiting for the
+ // FIN_ACK eventually or will be notified when we close the datachannel
+ }
+ s.spawnControlMessageReader()
+ case pb.Message_RESET:
+ if s.receiveState == receiveStateReceiving {
+ s.receiveState = receiveStateReset
+ s.readError = &network.StreamError{Remote: true, ErrorCode: network.StreamErrorCode(msg.GetErrorCode())}
+ }
+ if s.sendState == sendStateSending || s.sendState == sendStateDataSent {
+ s.sendState = sendStateReset
+ s.writeError = &network.StreamError{Remote: true, ErrorCode: network.StreamErrorCode(msg.GetErrorCode())}
+ }
+ s.spawnControlMessageReader()
+ }
+}
+
+// spawnControlMessageReader is used for processing control messages after the reader is closed.
+func (s *stream) spawnControlMessageReader() {
+ s.controlMessageReaderOnce.Do(func() {
+ // Spawn a goroutine to ensure that we're not holding any locks
+ go func() {
+ // cleanup the sctp deadline timer goroutine
+ defer s.setDataChannelReadDeadline(time.Time{})
+
+ defer s.dataChannel.Close()
+
+ // Unblock any Read call waiting on reader.ReadMsg
+ s.setDataChannelReadDeadline(time.Now().Add(-1 * time.Hour))
+
+ s.readerMx.Lock()
+ // We have the lock: any readers blocked on reader.ReadMsg have exited.
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ // From this point onwards only this goroutine will do reader.ReadMsg.
+ // We just wanted to ensure any exising readers have exited.
+ // Read calls from this point onwards will exit immediately on checking
+ // s.readState
+ s.readerMx.Unlock()
+
+ if s.nextMessage != nil {
+ s.processIncomingFlag(s.nextMessage)
+ s.nextMessage = nil
+ }
+ var msg pb.Message
+ for {
+ // Connection closed. No need to cleanup the data channel.
+ if s.closeForShutdownErr != nil {
+ return
+ }
+ // Write half of the stream completed.
+ if s.sendState == sendStateDataReceived || s.sendState == sendStateReset {
+ return
+ }
+ // FIN_ACK wait deadling exceeded.
+ if !s.controlMessageReaderEndTime.IsZero() && time.Now().After(s.controlMessageReaderEndTime) {
+ return
+ }
+
+ s.setDataChannelReadDeadline(s.controlMessageReaderEndTime)
+ s.mx.Unlock()
+ err := s.reader.ReadMsg(&msg)
+ s.mx.Lock()
+ if err != nil {
+ // We have to manually manage deadline exceeded errors since pion/sctp can
+ // return deadline exceeded error for cancelled deadlines
+ // see: https://github.com/pion/sctp/pull/290/files
+ if errors.Is(err, os.ErrDeadlineExceeded) {
+ continue
+ }
+ return
+ }
+ s.processIncomingFlag(&msg)
+ }
+ }()
+ })
+}
+
+func (s *stream) cleanup() {
+ s.onDoneOnce.Do(func() {
+ if s.onDone != nil {
+ s.onDone()
+ }
+ })
+}
diff --git a/p2p/transport/webrtc/stream_read.go b/p2p/transport/webrtc/stream_read.go
new file mode 100644
index 0000000000..003d5f563e
--- /dev/null
+++ b/p2p/transport/webrtc/stream_read.go
@@ -0,0 +1,120 @@
+package libp2pwebrtc
+
+import (
+ "io"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+)
+
+func (s *stream) Read(b []byte) (int, error) {
+ s.readerMx.Lock()
+ defer s.readerMx.Unlock()
+
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ if s.closeForShutdownErr != nil {
+ return 0, s.closeForShutdownErr
+ }
+ switch s.receiveState {
+ case receiveStateDataRead:
+ return 0, io.EOF
+ case receiveStateReset:
+ return 0, s.readError
+ }
+
+ if len(b) == 0 {
+ return 0, nil
+ }
+
+ var read int
+ for {
+ if s.nextMessage == nil {
+ // load the next message
+ s.mx.Unlock()
+ var msg pb.Message
+ err := s.reader.ReadMsg(&msg)
+ s.mx.Lock()
+ if err != nil {
+ // connection was closed
+ if s.closeForShutdownErr != nil {
+ return 0, s.closeForShutdownErr
+ }
+ if err == io.EOF {
+ // if the channel was properly closed, return EOF
+ if s.receiveState == receiveStateDataRead {
+ return 0, io.EOF
+ }
+ // This case occurs when remote closes the datachannel without writing a FIN
+ // message. Some implementations discard the buffered data on closing the
+ // datachannel. For these implementations a stream reset will be observed as an
+ // abrupt closing of the datachannel.
+ s.receiveState = receiveStateReset
+ s.readError = &network.StreamError{Remote: true}
+ return 0, s.readError
+ }
+ if s.receiveState == receiveStateReset {
+ return 0, s.readError
+ }
+ if s.receiveState == receiveStateDataRead {
+ return 0, io.EOF
+ }
+ return 0, err
+ }
+ s.nextMessage = &msg
+ }
+
+ if len(s.nextMessage.Message) > 0 {
+ n := copy(b, s.nextMessage.Message)
+ read += n
+ s.nextMessage.Message = s.nextMessage.Message[n:]
+ return read, nil
+ }
+
+ // process flags on the message after reading all the data
+ s.processIncomingFlag(s.nextMessage)
+ s.nextMessage = nil
+ if s.closeForShutdownErr != nil {
+ return read, s.closeForShutdownErr
+ }
+ switch s.receiveState {
+ case receiveStateDataRead:
+ return read, io.EOF
+ case receiveStateReset:
+ return read, s.readError
+ }
+ }
+}
+
+func (s *stream) SetReadDeadline(t time.Time) error {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ if s.receiveState == receiveStateReceiving {
+ s.setDataChannelReadDeadline(t)
+ }
+ return nil
+}
+
+func (s *stream) setDataChannelReadDeadline(t time.Time) error {
+ return s.dataChannel.SetReadDeadline(t)
+}
+
+func (s *stream) CloseRead() error {
+ return s.closeRead(0, false)
+}
+
+func (s *stream) closeRead(errCode network.StreamErrorCode, remote bool) error {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ var err error
+ if s.receiveState == receiveStateReceiving && s.closeForShutdownErr == nil {
+ code := uint32(errCode)
+ err = s.writer.WriteMsg(&pb.Message{Flag: pb.Message_STOP_SENDING.Enum(), ErrorCode: &code})
+ s.receiveState = receiveStateReset
+ s.readError = &network.StreamError{Remote: remote, ErrorCode: errCode}
+ }
+ s.spawnControlMessageReader()
+ return err
+}
diff --git a/p2p/transport/webrtc/stream_test.go b/p2p/transport/webrtc/stream_test.go
new file mode 100644
index 0000000000..461ed27ff8
--- /dev/null
+++ b/p2p/transport/webrtc/stream_test.go
@@ -0,0 +1,575 @@
+package libp2pwebrtc
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+ "github.com/libp2p/go-msgio/pbio"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/pion/datachannel"
+ "github.com/pion/sctp"
+ "github.com/pion/webrtc/v4"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type detachedChan struct {
+ rwc datachannel.ReadWriteCloser
+ dc *webrtc.DataChannel
+}
+
+func getDetachedDataChannels(t *testing.T) (detachedChan, detachedChan) {
+ s := webrtc.SettingEngine{}
+ s.SetIncludeLoopbackCandidate(true)
+ s.DetachDataChannels()
+ api := webrtc.NewAPI(webrtc.WithSettingEngine(s))
+
+ offerPC, err := api.NewPeerConnection(webrtc.Configuration{})
+ require.NoError(t, err)
+ t.Cleanup(func() { offerPC.Close() })
+ offerRWCChan := make(chan detachedChan, 1)
+ offerDC, err := offerPC.CreateDataChannel("data", nil)
+ require.NoError(t, err)
+ offerDC.OnOpen(func() {
+ rwc, err := offerDC.Detach()
+ require.NoError(t, err)
+ offerRWCChan <- detachedChan{rwc: rwc, dc: offerDC}
+ })
+
+ answerPC, err := api.NewPeerConnection(webrtc.Configuration{})
+ require.NoError(t, err)
+
+ answerChan := make(chan detachedChan, 1)
+ answerPC.OnDataChannel(func(dc *webrtc.DataChannel) {
+ dc.OnOpen(func() {
+ rwc, err := dc.Detach()
+ require.NoError(t, err)
+ answerChan <- detachedChan{rwc: rwc, dc: dc}
+ })
+ })
+ t.Cleanup(func() { answerPC.Close() })
+
+ // Set ICE Candidate handlers. As soon as a PeerConnection has gathered a candidate send it to the other peer
+ answerPC.OnICECandidate(func(candidate *webrtc.ICECandidate) {
+ if candidate != nil {
+ require.NoError(t, offerPC.AddICECandidate(candidate.ToJSON()))
+ }
+ })
+ offerPC.OnICECandidate(func(candidate *webrtc.ICECandidate) {
+ if candidate != nil {
+ require.NoError(t, answerPC.AddICECandidate(candidate.ToJSON()))
+ }
+ })
+
+ // Set the handler for Peer connection state
+ // This will notify you when the peer has connected/disconnected
+ offerPC.OnConnectionStateChange(func(s webrtc.PeerConnectionState) {
+ if s == webrtc.PeerConnectionStateFailed {
+ t.Log("peer connection failed on offerer")
+ }
+ })
+
+ // Set the handler for Peer connection state
+ // This will notify you when the peer has connected/disconnected
+ answerPC.OnConnectionStateChange(func(s webrtc.PeerConnectionState) {
+ if s == webrtc.PeerConnectionStateFailed {
+ t.Log("peer connection failed on answerer")
+ }
+ })
+
+ // Now, create an offer
+ offer, err := offerPC.CreateOffer(nil)
+ require.NoError(t, err)
+ require.NoError(t, answerPC.SetRemoteDescription(offer))
+ require.NoError(t, offerPC.SetLocalDescription(offer))
+
+ answer, err := answerPC.CreateAnswer(nil)
+ require.NoError(t, err)
+ require.NoError(t, offerPC.SetRemoteDescription(answer))
+ require.NoError(t, answerPC.SetLocalDescription(answer))
+
+ return <-answerChan, <-offerRWCChan
+}
+
+// assertDataChannelOpen checks if the datachannel is open.
+// It sends empty messages on the data channel to check if the channel is still open.
+// The control message reader goroutine depends on exclusive access to datachannel.Read
+// so we have to depend on Write to determine whether the channel has been closed.
+func assertDataChannelOpen(t *testing.T, dc *datachannel.DataChannel) {
+ t.Helper()
+ emptyMsg := &pb.Message{}
+ msg, err := proto.Marshal(emptyMsg)
+ if err != nil {
+ t.Fatal("unexpected mashalling error", err)
+ }
+ for i := 0; i < 3; i++ {
+ _, err := dc.Write(msg)
+ if err != nil {
+ t.Fatal("unexpected write err: ", err)
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+// assertDataChannelClosed checks if the datachannel is closed.
+// It sends empty messages on the data channel to check if the channel has been closed.
+// The control message reader goroutine depends on exclusive access to datachannel.Read
+// so we have to depend on Write to determine whether the channel has been closed.
+func assertDataChannelClosed(t *testing.T, dc *datachannel.DataChannel) {
+ t.Helper()
+ emptyMsg := &pb.Message{}
+ msg, err := proto.Marshal(emptyMsg)
+ if err != nil {
+ t.Fatal("unexpected mashalling error", err)
+ }
+ for i := 0; i < 5; i++ {
+ _, err := dc.Write(msg)
+ if err != nil {
+ if errors.Is(err, sctp.ErrStreamClosed) {
+ return
+ } else {
+ t.Fatal("unexpected write err: ", err)
+ }
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+}
+
+func TestStreamSimpleReadWriteClose(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ var clientDone, serverDone atomic.Bool
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { clientDone.Store(true) })
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() { serverDone.Store(true) })
+
+ // send a foobar from the client
+ n, err := clientStr.Write([]byte("foobar"))
+ require.NoError(t, err)
+ require.Equal(t, 6, n)
+ require.NoError(t, clientStr.CloseWrite())
+ // writing after closing should error
+ _, err = clientStr.Write([]byte("foobar"))
+ require.Error(t, err)
+ require.False(t, clientDone.Load())
+
+ // now read all the data on the server side
+ b, err := io.ReadAll(serverStr)
+ require.NoError(t, err)
+ require.Equal(t, []byte("foobar"), b)
+ // reading again should give another io.EOF
+ n, err = serverStr.Read(make([]byte, 10))
+ require.Zero(t, n)
+ require.ErrorIs(t, err, io.EOF)
+ require.False(t, serverDone.Load())
+
+ // send something back
+ _, err = serverStr.Write([]byte("lorem ipsum"))
+ require.NoError(t, err)
+ require.NoError(t, serverStr.CloseWrite())
+
+ // and read it at the client
+ require.False(t, clientDone.Load())
+ b, err = io.ReadAll(clientStr)
+ require.NoError(t, err)
+ require.Equal(t, []byte("lorem ipsum"), b)
+
+ // stream is only cleaned up on calling Close or Reset
+ clientStr.Close()
+ serverStr.Close()
+ require.Eventually(t, func() bool { return clientDone.Load() }, 5*time.Second, 100*time.Millisecond)
+ // Need to call Close for cleanup. Otherwise the FIN_ACK is never read
+ require.NoError(t, serverStr.Close())
+ require.Eventually(t, func() bool { return serverDone.Load() }, 5*time.Second, 100*time.Millisecond)
+}
+
+func TestStreamPartialReads(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ _, err := serverStr.Write([]byte("foobar"))
+ require.NoError(t, err)
+ require.NoError(t, serverStr.CloseWrite())
+
+ n, err := clientStr.Read([]byte{}) // empty read
+ require.NoError(t, err)
+ require.Zero(t, n)
+ b := make([]byte, 3)
+ n, err = clientStr.Read(b)
+ require.Equal(t, 3, n)
+ require.NoError(t, err)
+ require.Equal(t, []byte("foo"), b)
+ b, err = io.ReadAll(clientStr)
+ require.NoError(t, err)
+ require.Equal(t, []byte("bar"), b)
+}
+
+func TestStreamSkipEmptyFrames(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ for i := 0; i < 10; i++ {
+ require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{}))
+ }
+ require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{Message: []byte("foo")}))
+ for i := 0; i < 10; i++ {
+ require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{}))
+ }
+ require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{Message: []byte("bar")}))
+ for i := 0; i < 10; i++ {
+ require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{}))
+ }
+ require.NoError(t, serverStr.writer.WriteMsg(&pb.Message{Flag: pb.Message_FIN.Enum()}))
+
+ var read []byte
+ var count int
+ for i := 0; i < 100; i++ {
+ b := make([]byte, 10)
+ count++
+ n, err := clientStr.Read(b)
+ read = append(read, b[:n]...)
+ if err == io.EOF {
+ break
+ }
+ require.NoError(t, err)
+ }
+ require.LessOrEqual(t, count, 3, "should've taken a maximum of 3 reads")
+ require.Equal(t, []byte("foobar"), read)
+}
+
+func TestStreamReadReturnsOnClose(t *testing.T) {
+ client, _ := getDetachedDataChannels(t)
+
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ errChan := make(chan error, 1)
+ go func() {
+ _, err := clientStr.Read([]byte{0})
+ errChan <- err
+ }()
+ time.Sleep(100 * time.Millisecond) // give the Read call some time to hit the loop
+ require.NoError(t, clientStr.Close())
+ select {
+ case err := <-errChan:
+ require.ErrorIs(t, err, network.ErrReset)
+ case <-time.After(500 * time.Millisecond):
+ t.Fatal("timeout")
+ }
+
+ _, err := clientStr.Read([]byte{0})
+ require.ErrorIs(t, err, network.ErrReset)
+}
+
+func TestStreamResets(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ var clientDone, serverDone atomic.Bool
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { clientDone.Store(true) })
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() { serverDone.Store(true) })
+
+ // send a foobar from the client
+ _, err := clientStr.Write([]byte("foobar"))
+ require.NoError(t, err)
+ _, err = serverStr.Write([]byte("lorem ipsum"))
+ require.NoError(t, err)
+ require.NoError(t, clientStr.Reset()) // resetting resets both directions
+ require.True(t, clientDone.Load())
+ // attempting to write more data should result in a reset error
+ _, err = clientStr.Write([]byte("foobar"))
+ require.ErrorIs(t, err, network.ErrReset)
+ // read what the server sent
+ b, err := io.ReadAll(clientStr)
+ require.Empty(t, b)
+ require.ErrorIs(t, err, network.ErrReset)
+
+ // read the data on the server side
+ require.False(t, serverDone.Load())
+ b, err = io.ReadAll(serverStr)
+ require.Equal(t, []byte("foobar"), b)
+ require.ErrorIs(t, err, network.ErrReset)
+ require.Eventually(t, func() bool {
+ _, err := serverStr.Write([]byte("foobar"))
+ return errors.Is(err, network.ErrReset)
+ }, time.Second, 50*time.Millisecond)
+ serverStr.Close()
+ require.Eventually(t, func() bool {
+ return serverDone.Load()
+ }, time.Second, 50*time.Millisecond)
+}
+
+func TestStreamReadDeadlineAsync(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ timeout := 100 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ timeout *= 5
+ }
+ start := time.Now()
+ clientStr.SetReadDeadline(start.Add(timeout))
+ _, err := clientStr.Read([]byte{0})
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+ took := time.Since(start)
+ require.GreaterOrEqual(t, took, timeout)
+ require.LessOrEqual(t, took, timeout*3/2)
+ // repeated calls should return immediately
+ start = time.Now()
+ _, err = clientStr.Read([]byte{0})
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+ require.LessOrEqual(t, time.Since(start), timeout/3)
+ // clear the deadline
+ clientStr.SetReadDeadline(time.Time{})
+ _, err = serverStr.Write([]byte("foobar"))
+ require.NoError(t, err)
+ _, err = clientStr.Read([]byte{0})
+ require.NoError(t, err)
+ require.LessOrEqual(t, time.Since(start), timeout/3)
+}
+
+func TestStreamWriteDeadlineAsync(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+ _ = serverStr
+
+ b := make([]byte, 1024)
+ rand.Read(b)
+ start := time.Now()
+ timeout := 100 * time.Millisecond
+ if os.Getenv("CI") != "" {
+ timeout *= 5
+ }
+ clientStr.SetWriteDeadline(start.Add(timeout))
+ var hitDeadline bool
+ for i := 0; i < 2000; i++ {
+ if _, err := clientStr.Write(b); err != nil {
+ t.Logf("wrote %d kB", i)
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+ hitDeadline = true
+ break
+ }
+ }
+ require.True(t, hitDeadline)
+ took := time.Since(start)
+ require.GreaterOrEqual(t, took, timeout)
+ require.LessOrEqual(t, took, timeout*3/2)
+}
+
+func TestStreamReadAfterClose(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ serverStr.Close()
+ b := make([]byte, 1)
+ _, err := clientStr.Read(b)
+ require.Equal(t, io.EOF, err)
+ _, err = clientStr.Read(nil)
+ require.Equal(t, io.EOF, err)
+
+ client, server = getDetachedDataChannels(t)
+
+ clientStr = newStream(client.dc, client.rwc, maxSendMessageSize, func() {})
+ serverStr = newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ serverStr.Reset()
+ b = make([]byte, 1)
+ _, err = clientStr.Read(b)
+ require.ErrorIs(t, err, network.ErrReset)
+ _, err = clientStr.Read(nil)
+ require.ErrorIs(t, err, network.ErrReset)
+}
+
+func TestStreamCloseAfterFINACK(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ done := make(chan bool, 1)
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { done <- true })
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ go func() {
+ err := clientStr.Close()
+ assert.NoError(t, err)
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(200 * time.Millisecond):
+ t.Fatalf("Close should signal OnDone immediately")
+ }
+
+ // Reading FIN_ACK on server should trigger data channel close on the client
+ b := make([]byte, 1)
+ _, err := serverStr.Read(b)
+ require.Error(t, err)
+ require.ErrorIs(t, err, io.EOF)
+ assertDataChannelClosed(t, client.rwc.(*datachannel.DataChannel))
+}
+
+// TestStreamFinAckAfterStopSending tests that FIN_ACK is sent even after the write half
+// of the stream is closed.
+func TestStreamFinAckAfterStopSending(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ done := make(chan bool, 1)
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { done <- true })
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() {})
+
+ go func() {
+ clientStr.CloseRead()
+ clientStr.Write([]byte("hello world"))
+ done <- true
+ err := clientStr.Close()
+ assert.NoError(t, err)
+ }()
+ <-done
+
+ select {
+ case <-done:
+ case <-time.After(500 * time.Millisecond):
+ t.Errorf("Close should signal onDone immediately")
+ }
+
+ // serverStr has write half closed and read half open
+ // serverStr should still send FIN_ACK
+ b := make([]byte, 24)
+ _, err := serverStr.Read(b)
+ require.NoError(t, err)
+ serverStr.Close() // Sends stop_sending, fin
+ assertDataChannelClosed(t, server.rwc.(*datachannel.DataChannel))
+ assertDataChannelClosed(t, client.rwc.(*datachannel.DataChannel))
+}
+
+func TestStreamConcurrentClose(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ start := make(chan bool, 2)
+ done := make(chan bool, 2)
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { done <- true })
+ serverStr := newStream(server.dc, server.rwc, maxSendMessageSize, func() { done <- true })
+
+ go func() {
+ start <- true
+ clientStr.Close()
+ }()
+ go func() {
+ start <- true
+ serverStr.Close()
+ }()
+ <-start
+ <-start
+
+ select {
+ case <-done:
+ case <-time.After(2 * time.Second):
+ t.Fatalf("concurrent close should succeed quickly")
+ }
+ select {
+ case <-done:
+ case <-time.After(2 * time.Second):
+ t.Fatalf("concurrent close should succeed quickly")
+ }
+
+ // Wait for FIN_ACK AND datachannel close
+ assertDataChannelClosed(t, client.rwc.(*datachannel.DataChannel))
+ assertDataChannelClosed(t, server.rwc.(*datachannel.DataChannel))
+
+}
+
+func TestStreamResetAfterClose(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ done := make(chan bool, 2)
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { done <- true })
+ clientStr.Close()
+
+ select {
+ case <-done:
+ case <-time.After(500 * time.Millisecond):
+ t.Fatalf("Close should run cleanup immediately")
+ }
+ // The server data channel should still be open
+ assertDataChannelOpen(t, server.rwc.(*datachannel.DataChannel))
+ clientStr.Reset()
+ // Reset closes the datachannels
+ assertDataChannelClosed(t, server.rwc.(*datachannel.DataChannel))
+ assertDataChannelClosed(t, client.rwc.(*datachannel.DataChannel))
+ select {
+ case <-done:
+ t.Fatalf("onDone should not be called twice")
+ case <-time.After(50 * time.Millisecond):
+ }
+}
+
+func TestStreamDataChannelCloseOnFINACK(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+
+ done := make(chan bool, 1)
+ clientStr := newStream(client.dc, client.rwc, maxSendMessageSize, func() { done <- true })
+
+ clientStr.Close()
+
+ select {
+ case <-time.After(500 * time.Millisecond):
+ t.Fatalf("Close should run cleanup immediately")
+ case <-done:
+ }
+
+ // sending FIN_ACK closes the datachannel
+ serverWriter := pbio.NewDelimitedWriter(server.rwc)
+ err := serverWriter.WriteMsg(&pb.Message{Flag: pb.Message_FIN_ACK.Enum()})
+ require.NoError(t, err)
+
+ assertDataChannelClosed(t, server.rwc.(*datachannel.DataChannel))
+ assertDataChannelClosed(t, client.rwc.(*datachannel.DataChannel))
+}
+
+func TestStreamChunking(t *testing.T) {
+ for _, msgSize := range []int{16 << 10, 32 << 10, 64 << 10, 128 << 10, 256 << 10} {
+ t.Run(fmt.Sprintf("msgSize=%d", msgSize), func(t *testing.T) {
+ client, server := getDetachedDataChannels(t)
+ defer client.dc.Close()
+ defer server.dc.Close()
+
+ clientStr := newStream(client.dc, client.rwc, msgSize, nil)
+ // server should read large messages even if it can only send 16 kB messages.
+ serverStr := newStream(server.dc, server.rwc, 16<<10, nil)
+
+ N := msgSize + 1000
+ input := make([]byte, N)
+ _, err := rand.Read(input)
+ require.NoError(t, err)
+ go func() {
+ n, err := clientStr.Write(input)
+ require.NoError(t, err)
+ require.Equal(t, n, len(input))
+ }()
+
+ data := make([]byte, N)
+ n, err := serverStr.Read(data)
+ require.NoError(t, err)
+ require.LessOrEqual(t, n, msgSize)
+ // shouldn't be much less than msgSize
+ require.GreaterOrEqual(t, n, msgSize-100)
+ _, err = serverStr.Read(data[n:])
+ require.NoError(t, err)
+ require.Equal(t, input, data)
+ })
+ }
+}
diff --git a/p2p/transport/webrtc/stream_write.go b/p2p/transport/webrtc/stream_write.go
new file mode 100644
index 0000000000..4900f0fcdc
--- /dev/null
+++ b/p2p/transport/webrtc/stream_write.go
@@ -0,0 +1,172 @@
+package libp2pwebrtc
+
+import (
+ "errors"
+ "os"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+)
+
+var errWriteAfterClose = errors.New("write after close")
+
+// If we have less space than minMessageSize, we don't put a new message on the data channel.
+// Instead, we wait until more space opens up.
+const minMessageSize = 1 << 10
+
+func (s *stream) Write(b []byte) (int, error) {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ if s.closeForShutdownErr != nil {
+ return 0, s.closeForShutdownErr
+ }
+ switch s.sendState {
+ case sendStateReset:
+ return 0, s.writeError
+ case sendStateDataSent, sendStateDataReceived:
+ return 0, errWriteAfterClose
+ }
+
+ if !s.writeDeadline.IsZero() && time.Now().After(s.writeDeadline) {
+ return 0, os.ErrDeadlineExceeded
+ }
+
+ var writeDeadlineTimer *time.Timer
+ defer func() {
+ if writeDeadlineTimer != nil {
+ writeDeadlineTimer.Stop()
+ }
+ }()
+
+ var n int
+ var msg pb.Message
+ for len(b) > 0 {
+ if s.closeForShutdownErr != nil {
+ return n, s.closeForShutdownErr
+ }
+ switch s.sendState {
+ case sendStateReset:
+ return n, s.writeError
+ case sendStateDataSent, sendStateDataReceived:
+ return n, errWriteAfterClose
+ }
+
+ writeDeadline := s.writeDeadline
+ // deadline deleted, stop and remove the timer
+ if writeDeadline.IsZero() && writeDeadlineTimer != nil {
+ writeDeadlineTimer.Stop()
+ writeDeadlineTimer = nil
+ }
+ var writeDeadlineChan <-chan time.Time
+ if !writeDeadline.IsZero() {
+ if writeDeadlineTimer == nil {
+ writeDeadlineTimer = time.NewTimer(time.Until(writeDeadline))
+ } else {
+ if !writeDeadlineTimer.Stop() {
+ <-writeDeadlineTimer.C
+ }
+ writeDeadlineTimer.Reset(time.Until(writeDeadline))
+ }
+ writeDeadlineChan = writeDeadlineTimer.C
+ }
+
+ availableSpace := s.availableSendSpace()
+ if availableSpace < minMessageSize {
+ s.mx.Unlock()
+ select {
+ case <-writeDeadlineChan:
+ s.mx.Lock()
+ return n, os.ErrDeadlineExceeded
+ case <-s.writeStateChanged:
+ }
+ s.mx.Lock()
+ continue
+ }
+ end := s.maxSendMessageSize
+ if end > availableSpace {
+ end = availableSpace
+ }
+ end -= protoOverhead + varintOverhead
+ if end > len(b) {
+ end = len(b)
+ }
+ msg = pb.Message{Message: b[:end]}
+ if err := s.writer.WriteMsg(&msg); err != nil {
+ return n, err
+ }
+ n += end
+ b = b[end:]
+ }
+ return n, nil
+}
+
+func (s *stream) SetWriteDeadline(t time.Time) error {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+ s.writeDeadline = t
+ s.notifyWriteStateChanged()
+ return nil
+}
+
+// sendBufferSize() is the maximum data we enqueue on the underlying data channel for writes.
+// The underlying SCTP layer has an unbounded buffer for writes. We limit the amount enqueued
+// per stream is limited to avoid a single stream monopolizing the entire connection.
+func (s *stream) sendBufferSize() int {
+ return 2 * s.maxSendMessageSize
+}
+
+// sendBufferLowThreshold() is the threshold below which we write more data on the underlying
+// data channel. We want a notification as soon as we can write 1 full sized message.
+func (s *stream) sendBufferLowThreshold() int {
+ return s.sendBufferSize() - s.maxSendMessageSize
+}
+
+func (s *stream) availableSendSpace() int {
+ buffered := int(s.dataChannel.BufferedAmount())
+ availableSpace := s.sendBufferSize() - buffered
+ if availableSpace+maxTotalControlMessagesSize < 0 { // this should never happen, but better check
+ log.Error("data channel buffered more data than the maximum amount", "max", s.sendBufferSize(), "buffered", buffered)
+ }
+ return availableSpace
+}
+
+func (s *stream) cancelWrite(errCode network.StreamErrorCode) error {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ // There's no need to reset the write half if the write half has been closed
+ // successfully or has been reset previously
+ if s.sendState == sendStateDataReceived || s.sendState == sendStateReset {
+ return nil
+ }
+ s.sendState = sendStateReset
+ s.writeError = &network.StreamError{Remote: false, ErrorCode: errCode}
+ // Remove reference to this stream from data channel
+ s.dataChannel.OnBufferedAmountLow(nil)
+ s.notifyWriteStateChanged()
+ code := uint32(errCode)
+ return s.writer.WriteMsg(&pb.Message{Flag: pb.Message_RESET.Enum(), ErrorCode: &code})
+}
+
+func (s *stream) CloseWrite() error {
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ if s.sendState != sendStateSending {
+ return nil
+ }
+ s.sendState = sendStateDataSent
+ // Remove reference to this stream from data channel
+ s.dataChannel.OnBufferedAmountLow(nil)
+ s.notifyWriteStateChanged()
+ return s.writer.WriteMsg(&pb.Message{Flag: pb.Message_FIN.Enum()})
+}
+
+func (s *stream) notifyWriteStateChanged() {
+ select {
+ case s.writeStateChanged <- struct{}{}:
+ default:
+ }
+}
diff --git a/p2p/transport/webrtc/transport.go b/p2p/transport/webrtc/transport.go
new file mode 100644
index 0000000000..b0365ff0f9
--- /dev/null
+++ b/p2p/transport/webrtc/transport.go
@@ -0,0 +1,674 @@
+// Package libp2pwebrtc implements the WebRTC transport for go-libp2p,
+// as described in https://github.com/libp2p/specs/tree/master/webrtc.
+package libp2pwebrtc
+
+import (
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "time"
+
+ mrand "math/rand/v2"
+
+ "google.golang.org/protobuf/proto"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ "github.com/libp2p/go-libp2p/core/sec"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+ "github.com/libp2p/go-msgio"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multihash"
+
+ "github.com/pion/datachannel"
+ "github.com/pion/webrtc/v4"
+)
+
+var webrtcComponent *ma.Component
+
+func init() {
+ var err error
+ webrtcComponent, err = ma.NewComponent(ma.ProtocolWithCode(ma.P_WEBRTC_DIRECT).Name, "")
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ os.Executable()
+ }
+}
+
+const (
+ // handshakeChannelNegotiated is used to specify that the
+ // handshake data channel does not need negotiation via DCEP.
+ // A constant is used since the `DataChannelInit` struct takes
+ // references instead of values.
+ handshakeChannelNegotiated = true
+ // handshakeChannelID is the agreed ID for the handshake data
+ // channel. A constant is used since the `DataChannelInit` struct takes
+ // references instead of values. We specify the type here as this
+ // value is only ever copied and passed by reference
+ handshakeChannelID = uint16(0)
+)
+
+// timeout values for the peerconnection
+// https://github.com/pion/webrtc/blob/v3.1.50/settingengine.go#L102-L109
+const (
+ DefaultDisconnectedTimeout = 20 * time.Second
+ DefaultFailedTimeout = 30 * time.Second
+ DefaultKeepaliveTimeout = 15 * time.Second
+
+ // sctpReceiveBufferSize is the size of the buffer for incoming messages.
+ //
+ // This is enough space for enqueuing 10 full sized messages.
+ // Besides throughput, this only matters if an application is using multiple dependent
+ // streams, say streams 1 & 2. It reads from stream 1 only after receiving message from
+ // stream 2. A buffer of 10 messages should serve all such situations.
+ sctpReceiveBufferSize = 10 * maxReceiveMessageSize
+)
+
+type WebRTCTransport struct {
+ webrtcConfig webrtc.Configuration
+ rcmgr network.ResourceManager
+ gater connmgr.ConnectionGater
+ privKey ic.PrivKey
+ noiseTpt *noise.Transport
+ localPeerId peer.ID
+
+ listenUDP func(network string, laddr *net.UDPAddr) (net.PacketConn, error)
+
+ // timeouts
+ peerConnectionTimeouts iceTimeouts
+
+ // in-flight connections
+ maxInFlightConnections uint32
+}
+
+var _ tpt.Transport = &WebRTCTransport{}
+
+type Option func(*WebRTCTransport) error
+
+type iceTimeouts struct {
+ Disconnect time.Duration
+ Failed time.Duration
+ Keepalive time.Duration
+}
+
+type ListenUDPFn func(network string, laddr *net.UDPAddr) (net.PacketConn, error)
+
+func New(privKey ic.PrivKey, psk pnet.PSK, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, listenUDP ListenUDPFn, opts ...Option) (*WebRTCTransport, error) {
+ if psk != nil {
+ log.Error("WebRTC doesn't support private networks yet.")
+ return nil, fmt.Errorf("WebRTC doesn't support private networks yet")
+ }
+ if rcmgr == nil {
+ rcmgr = &network.NullResourceManager{}
+ }
+ localPeerID, err := peer.IDFromPrivateKey(privKey)
+ if err != nil {
+ return nil, fmt.Errorf("get local peer ID: %w", err)
+ }
+ // We use elliptic P-256 since it is widely supported by browsers.
+ //
+ // Implementation note: Testing with the browser,
+ // it seems like Chromium only supports ECDSA P-256 or RSA key signatures in the webrtc TLS certificate.
+ // We tried using P-228 and P-384 which caused the DTLS handshake to fail with Illegal Parameter
+ //
+ // Please refer to this is a list of suggested algorithms for the WebCrypto API.
+ // The algorithm for generating a certificate for an RTCPeerConnection
+ // must adhere to the WebCrpyto API. From my observation,
+ // RSA and ECDSA P-256 is supported on almost all browsers.
+ // Ed25519 is not present on the list.
+ pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ if err != nil {
+ return nil, fmt.Errorf("generate key for cert: %w", err)
+ }
+ cert, err := webrtc.GenerateCertificate(pk)
+ if err != nil {
+ return nil, fmt.Errorf("generate certificate: %w", err)
+ }
+ config := webrtc.Configuration{
+ Certificates: []webrtc.Certificate{*cert},
+ }
+ noiseTpt, err := noise.New(noise.ID, privKey, nil)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create noise transport: %w", err)
+ }
+ transport := &WebRTCTransport{
+ rcmgr: rcmgr,
+ gater: gater,
+ webrtcConfig: config,
+ privKey: privKey,
+ noiseTpt: noiseTpt,
+ localPeerId: localPeerID,
+
+ listenUDP: listenUDP,
+ peerConnectionTimeouts: iceTimeouts{
+ Disconnect: DefaultDisconnectedTimeout,
+ Failed: DefaultFailedTimeout,
+ Keepalive: DefaultKeepaliveTimeout,
+ },
+
+ maxInFlightConnections: DefaultMaxInFlightConnections,
+ }
+ for _, opt := range opts {
+ if err := opt(transport); err != nil {
+ return nil, err
+ }
+ }
+ return transport, nil
+}
+
+func (t *WebRTCTransport) ListenOrder() int {
+ return libp2pquic.ListenOrder + 1 // We want to listen after QUIC listens so we can possibly reuse the same port.
+}
+
+func (t *WebRTCTransport) Protocols() []int {
+ return []int{ma.P_WEBRTC_DIRECT}
+}
+
+func (t *WebRTCTransport) Proxy() bool {
+ return false
+}
+
+func (t *WebRTCTransport) CanDial(addr ma.Multiaddr) bool {
+ isValid, n := IsWebRTCDirectMultiaddr(addr)
+ return isValid && n > 0
+}
+
+// Listen returns a listener for addr.
+//
+// The IP, Port combination for addr must be exclusive to this listener as a WebRTC listener cannot
+// be multiplexed on the same port as other UDP based transports like QUIC and WebTransport.
+// See https://github.com/libp2p/go-libp2p/issues/2446 for details.
+func (t *WebRTCTransport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
+ addr, wrtcComponent := ma.SplitLast(addr)
+ isWebrtc := wrtcComponent.Equal(webrtcComponent)
+ if !isWebrtc {
+ return nil, fmt.Errorf("must listen on webrtc multiaddr")
+ }
+ nw, host, err := manet.DialArgs(addr)
+ if err != nil {
+ return nil, fmt.Errorf("listener could not fetch dialargs: %w", err)
+ }
+ udpAddr, err := net.ResolveUDPAddr(nw, host)
+ if err != nil {
+ return nil, fmt.Errorf("listener could not resolve udp address: %w", err)
+ }
+
+ socket, err := t.listenUDP(nw, udpAddr)
+ if err != nil {
+ return nil, fmt.Errorf("listen on udp: %w", err)
+ }
+
+ listener, err := t.listenSocket(socket)
+ if err != nil {
+ socket.Close()
+ return nil, err
+ }
+ return listener, nil
+}
+
+func (t *WebRTCTransport) listenSocket(socket net.PacketConn) (tpt.Listener, error) {
+ listenerMultiaddr, err := manet.FromNetAddr(socket.LocalAddr())
+ if err != nil {
+ return nil, err
+ }
+
+ listenerFingerprint, err := t.getCertificateFingerprint()
+ if err != nil {
+ return nil, err
+ }
+
+ encodedLocalFingerprint, err := encodeDTLSFingerprint(listenerFingerprint)
+ if err != nil {
+ return nil, err
+ }
+
+ certComp, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_CERTHASH).Name, encodedLocalFingerprint)
+ if err != nil {
+ return nil, err
+ }
+ listenerMultiaddr = listenerMultiaddr.AppendComponent(webrtcComponent, certComp)
+
+ return newListener(
+ t,
+ listenerMultiaddr,
+ socket,
+ t.webrtcConfig,
+ )
+}
+
+func (t *WebRTCTransport) Dial(ctx context.Context, remoteMultiaddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) {
+ scope, err := t.rcmgr.OpenConnection(network.DirOutbound, false, remoteMultiaddr)
+ if err != nil {
+ return nil, err
+ }
+ if err := scope.SetPeer(p); err != nil {
+ scope.Done()
+ return nil, err
+ }
+ conn, err := t.dial(ctx, scope, remoteMultiaddr, p)
+ if err != nil {
+ scope.Done()
+ return nil, err
+ }
+ return conn, nil
+}
+
+func (t *WebRTCTransport) dial(ctx context.Context, scope network.ConnManagementScope, remoteMultiaddr ma.Multiaddr, p peer.ID) (tConn tpt.CapableConn, err error) {
+ var w webRTCConnection
+ defer func() {
+ if err != nil {
+ if w.PeerConnection != nil {
+ _ = w.PeerConnection.Close()
+ }
+ if tConn != nil {
+ _ = tConn.Close()
+ tConn = nil
+ }
+ }
+ }()
+
+ remoteMultihash, err := decodeRemoteFingerprint(remoteMultiaddr)
+ if err != nil {
+ return nil, fmt.Errorf("decode fingerprint: %w", err)
+ }
+ remoteHashFunction, ok := getSupportedSDPHash(remoteMultihash.Code)
+ if !ok {
+ return nil, fmt.Errorf("unsupported hash function: %w", nil)
+ }
+
+ rnw, rhost, err := manet.DialArgs(remoteMultiaddr)
+ if err != nil {
+ return nil, fmt.Errorf("generate dial args: %w", err)
+ }
+
+ raddr, err := net.ResolveUDPAddr(rnw, rhost)
+ if err != nil {
+ return nil, fmt.Errorf("resolve udp address: %w", err)
+ }
+
+ // Instead of encoding the local fingerprint we
+ // generate a random UUID as the connection ufrag.
+ // The only requirement here is that the ufrag and password
+ // must be equal, which will allow the server to determine
+ // the password using the STUN message.
+ ufrag := genUfrag()
+
+ settingEngine := webrtc.SettingEngine{
+ LoggerFactory: pionLoggerFactory,
+ }
+ settingEngine.SetICECredentials(ufrag, ufrag)
+ settingEngine.DetachDataChannels()
+ // use the first best address candidate
+ settingEngine.SetPrflxAcceptanceMinWait(0)
+ settingEngine.SetICETimeouts(
+ t.peerConnectionTimeouts.Disconnect,
+ t.peerConnectionTimeouts.Failed,
+ t.peerConnectionTimeouts.Keepalive,
+ )
+ // By default, webrtc will not collect candidates on the loopback address.
+ // This is disallowed in the ICE specification. However, implementations
+ // do not strictly follow this, for eg. Chrome gathers TCP loopback candidates.
+ // If you run pion on a system with only the loopback interface UP,
+ // it will not connect to anything.
+ settingEngine.SetIncludeLoopbackCandidate(true)
+ settingEngine.SetSCTPMaxReceiveBufferSize(sctpReceiveBufferSize)
+ if err := scope.ReserveMemory(sctpReceiveBufferSize, network.ReservationPriorityMedium); err != nil {
+ return nil, err
+ }
+
+ w, err = newWebRTCConnection(settingEngine, t.webrtcConfig)
+ if err != nil {
+ return nil, fmt.Errorf("instantiating peer connection failed: %w", err)
+ }
+
+ errC := addOnConnectionStateChangeCallback(w.PeerConnection)
+
+ // do offer-answer exchange
+ offer, err := w.PeerConnection.CreateOffer(nil)
+ if err != nil {
+ return nil, fmt.Errorf("create offer: %w", err)
+ }
+
+ err = w.PeerConnection.SetLocalDescription(offer)
+ if err != nil {
+ return nil, fmt.Errorf("set local description: %w", err)
+ }
+
+ answerSDPString, err := createServerSDP(raddr, ufrag, *remoteMultihash)
+ if err != nil {
+ return nil, fmt.Errorf("render server SDP: %w", err)
+ }
+
+ answer := webrtc.SessionDescription{SDP: answerSDPString, Type: webrtc.SDPTypeAnswer}
+ err = w.PeerConnection.SetRemoteDescription(answer)
+ if err != nil {
+ return nil, fmt.Errorf("set remote description: %w", err)
+ }
+
+ // await peerconnection opening
+ select {
+ case err := <-errC:
+ if err != nil {
+ return nil, err
+ }
+ case <-ctx.Done():
+ return nil, errors.New("peerconnection opening timed out")
+ }
+
+ // We are connected, run the noise handshake
+ detached, err := detachHandshakeDataChannel(ctx, w.HandshakeDataChannel)
+ if err != nil {
+ return nil, err
+ }
+ channel := newStream(w.HandshakeDataChannel, detached, maxSendMessageSize, nil)
+
+ remotePubKey, err := t.noiseHandshake(ctx, w.PeerConnection, channel, p, remoteHashFunction, false)
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup local and remote address for the connection
+ cp, err := w.HandshakeDataChannel.Transport().Transport().ICETransport().GetSelectedCandidatePair()
+ if cp == nil {
+ return nil, errors.New("ice connection did not have selected candidate pair: nil result")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("ice connection did not have selected candidate pair: error: %w", err)
+ }
+ // the local address of the selected candidate pair should be the local address for the connection
+ localAddr, err := manet.FromNetAddr(&net.UDPAddr{IP: net.ParseIP(cp.Local.Address), Port: int(cp.Local.Port)})
+ if err != nil {
+ return nil, err
+ }
+ remoteMultiaddrWithoutCerthash, _ := ma.SplitFunc(remoteMultiaddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH })
+
+ conn, err := newConnection(
+ network.DirOutbound,
+ w.PeerConnection,
+ t,
+ scope,
+ t.localPeerId,
+ localAddr,
+ p,
+ remotePubKey,
+ remoteMultiaddrWithoutCerthash,
+ w.IncomingDataChannels,
+ w.PeerConnectionClosedCh,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if t.gater != nil && !t.gater.InterceptSecured(network.DirOutbound, p, conn) {
+ return nil, fmt.Errorf("secured connection gated")
+ }
+ return conn, nil
+}
+
+func genUfrag() string {
+ const (
+ uFragAlphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
+ uFragPrefix = "libp2p+webrtc+v1/"
+ uFragIdLength = 32
+ uFragLength = len(uFragPrefix) + uFragIdLength
+ )
+
+ seed := [32]byte{}
+ rand.Read(seed[:])
+ r := mrand.New(mrand.New(mrand.NewChaCha8(seed)))
+ b := make([]byte, uFragLength)
+ for i := 0; i < len(uFragPrefix); i++ {
+ b[i] = uFragPrefix[i]
+ }
+ for i := len(uFragPrefix); i < uFragLength; i++ {
+ b[i] = uFragAlphabet[r.IntN(len(uFragAlphabet))]
+ }
+ return string(b)
+}
+
+func (t *WebRTCTransport) getCertificateFingerprint() (webrtc.DTLSFingerprint, error) {
+ fps, err := t.webrtcConfig.Certificates[0].GetFingerprints()
+ if err != nil {
+ return webrtc.DTLSFingerprint{}, err
+ }
+ return fps[0], nil
+}
+
+func (t *WebRTCTransport) generateNoisePrologue(pc *webrtc.PeerConnection, hash crypto.Hash, inbound bool) ([]byte, error) {
+ raw := pc.SCTP().Transport().GetRemoteCertificate()
+ cert, err := x509.ParseCertificate(raw)
+ if err != nil {
+ return nil, err
+ }
+
+ // NOTE: should we want we can fork the cert code as well to avoid
+ // all the extra allocations due to unneeded string interspersing (hex)
+ localFp, err := t.getCertificateFingerprint()
+ if err != nil {
+ return nil, err
+ }
+
+ remoteFpBytes, err := parseFingerprint(cert, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ localFpBytes, err := decodeInterspersedHexFromASCIIString(localFp.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ localEncoded, err := multihash.Encode(localFpBytes, multihash.SHA2_256)
+ if err != nil {
+ log.Debug("could not encode multihash for local fingerprint")
+ return nil, err
+ }
+ remoteEncoded, err := multihash.Encode(remoteFpBytes, multihash.SHA2_256)
+ if err != nil {
+ log.Debug("could not encode multihash for remote fingerprint")
+ return nil, err
+ }
+
+ result := []byte("libp2p-webrtc-noise:")
+ if inbound {
+ result = append(result, remoteEncoded...)
+ result = append(result, localEncoded...)
+ } else {
+ result = append(result, localEncoded...)
+ result = append(result, remoteEncoded...)
+ }
+ return result, nil
+}
+
+func (t *WebRTCTransport) noiseHandshake(ctx context.Context, pc *webrtc.PeerConnection, s *stream, peer peer.ID, hash crypto.Hash, inbound bool) (ic.PubKey, error) {
+ prologue, err := t.generateNoisePrologue(pc, hash, inbound)
+ if err != nil {
+ return nil, fmt.Errorf("generate prologue: %w", err)
+ }
+ opts := make([]noise.SessionOption, 0, 2)
+ opts = append(opts, noise.Prologue(prologue))
+ if peer == "" {
+ opts = append(opts, noise.DisablePeerIDCheck())
+ }
+ sessionTransport, err := t.noiseTpt.WithSessionOptions(opts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to instantiate Noise transport: %w", err)
+ }
+ var secureConn sec.SecureConn
+ if inbound {
+ secureConn, err = sessionTransport.SecureOutbound(ctx, netConnWrapper{s}, peer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to secure inbound connection: %w", err)
+ }
+ } else {
+ secureConn, err = sessionTransport.SecureInbound(ctx, netConnWrapper{s}, peer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to secure outbound connection: %w", err)
+ }
+ }
+ return secureConn.RemotePublicKey(), nil
+}
+
+func (t *WebRTCTransport) AddCertHashes(addr ma.Multiaddr) (ma.Multiaddr, bool) {
+ listenerFingerprint, err := t.getCertificateFingerprint()
+ if err != nil {
+ return nil, false
+ }
+
+ encodedLocalFingerprint, err := encodeDTLSFingerprint(listenerFingerprint)
+ if err != nil {
+ return nil, false
+ }
+
+ certComp, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_CERTHASH).Name, encodedLocalFingerprint)
+ if err != nil {
+ return nil, false
+ }
+ return addr.Encapsulate(certComp), true
+}
+
+type netConnWrapper struct {
+ *stream
+}
+
+func (netConnWrapper) LocalAddr() net.Addr { return nil }
+func (netConnWrapper) RemoteAddr() net.Addr { return nil }
+func (w netConnWrapper) Close() error {
+ // Close called while running the security handshake is an error and we should Reset the
+ // stream in that case rather than gracefully closing
+ w.stream.Reset()
+ return nil
+}
+
+// detachHandshakeDataChannel detaches the handshake data channel
+func detachHandshakeDataChannel(ctx context.Context, dc *webrtc.DataChannel) (datachannel.ReadWriteCloser, error) {
+ done := make(chan struct{})
+ var rwc datachannel.ReadWriteCloser
+ var err error
+ dc.OnOpen(func() {
+ defer close(done)
+ rwc, err = dc.Detach()
+ })
+ // this is safe since for detached datachannels, the peerconnection runs the onOpen
+ // callback immediately if the SCTP transport is also connected.
+ select {
+ case <-done:
+ return rwc, err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+// webRTCConnection holds the webrtc.PeerConnection with the handshake channel and the queue for
+// incoming data channels created by the peer.
+//
+// When creating a webrtc.PeerConnection, It is important to set the OnDataChannel handler upfront
+// before connecting with the peer. If the handler's set up after connecting with the peer, there's
+// a small window of time where datachannels created by the peer may not surface to us and cause a
+// memory leak.
+type webRTCConnection struct {
+ PeerConnection *webrtc.PeerConnection
+ HandshakeDataChannel *webrtc.DataChannel
+ IncomingDataChannels chan dataChannel
+ PeerConnectionClosedCh chan struct{}
+}
+
+func newWebRTCConnection(settings webrtc.SettingEngine, config webrtc.Configuration) (webRTCConnection, error) {
+ api := webrtc.NewAPI(webrtc.WithSettingEngine(settings))
+ pc, err := api.NewPeerConnection(config)
+ if err != nil {
+ return webRTCConnection{}, fmt.Errorf("failed to create peer connection: %w", err)
+ }
+
+ negotiated, id := handshakeChannelNegotiated, handshakeChannelID
+ handshakeDataChannel, err := pc.CreateDataChannel("", &webrtc.DataChannelInit{
+ Negotiated: &negotiated,
+ ID: &id,
+ })
+ if err != nil {
+ pc.Close()
+ return webRTCConnection{}, fmt.Errorf("failed to create handshake channel: %w", err)
+ }
+
+ incomingDataChannels := make(chan dataChannel, maxAcceptQueueLen)
+ pc.OnDataChannel(func(dc *webrtc.DataChannel) {
+ dc.OnOpen(func() {
+ rwc, err := dc.Detach()
+ if err != nil {
+ log.Warn("could not detach datachannel", "id", *dc.ID())
+ return
+ }
+ select {
+ case incomingDataChannels <- dataChannel{rwc, dc}:
+ default:
+ log.Warn("connection busy, rejecting stream")
+ b, _ := proto.Marshal(&pb.Message{Flag: pb.Message_RESET.Enum()})
+ w := msgio.NewWriter(rwc)
+ w.WriteMsg(b)
+ rwc.Close()
+ }
+ })
+ })
+
+ connectionClosedCh := make(chan struct{}, 1)
+ pc.SCTP().OnClose(func(_ error) {
+ // We only need one message. Closing a connection is a problem as pion might invoke the callback more than once.
+ select {
+ case connectionClosedCh <- struct{}{}:
+ default:
+ }
+ })
+ return webRTCConnection{
+ PeerConnection: pc,
+ HandshakeDataChannel: handshakeDataChannel,
+ IncomingDataChannels: incomingDataChannels,
+ PeerConnectionClosedCh: connectionClosedCh,
+ }, nil
+}
+
+// IsWebRTCDirectMultiaddr returns whether addr is a /webrtc-direct multiaddr with the count of certhashes
+// in addr
+func IsWebRTCDirectMultiaddr(addr ma.Multiaddr) (bool, int) {
+ var foundUDP, foundWebRTC bool
+ certHashCount := 0
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if !foundUDP {
+ if c.Protocol().Code == ma.P_UDP {
+ foundUDP = true
+ }
+ return true
+ }
+ if !foundWebRTC && foundUDP {
+ // protocol after udp must be webrtc-direct
+ if c.Protocol().Code != ma.P_WEBRTC_DIRECT {
+ return false
+ }
+ foundWebRTC = true
+ return true
+ }
+ if foundWebRTC {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ certHashCount++
+ } else {
+ return false
+ }
+ }
+ return true
+ })
+ return foundUDP && foundWebRTC, certHashCount
+}
diff --git a/p2p/transport/webrtc/transport_test.go b/p2p/transport/webrtc/transport_test.go
new file mode 100644
index 0000000000..4f38dc5933
--- /dev/null
+++ b/p2p/transport/webrtc/transport_test.go
@@ -0,0 +1,1049 @@
+package libp2pwebrtc
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+ quicproxy "github.com/quic-go/quic-go/integrationtests/tools/proxy"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/crypto/sha3"
+)
+
+var netListenUDP ListenUDPFn = func(network string, laddr *net.UDPAddr) (net.PacketConn, error) {
+ return net.ListenUDP(network, laddr)
+}
+
+func getTransport(t *testing.T, opts ...Option) (*WebRTCTransport, peer.ID) {
+ t.Helper()
+ privKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
+ require.NoError(t, err)
+ rcmgr := &network.NullResourceManager{}
+ transport, err := New(privKey, nil, nil, rcmgr, netListenUDP, opts...)
+ require.NoError(t, err)
+ peerID, err := peer.IDFromPrivateKey(privKey)
+ require.NoError(t, err)
+ t.Cleanup(func() { rcmgr.Close() })
+ return transport, peerID
+}
+
+func TestNullRcmgrTransport(t *testing.T) {
+ privKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, -1)
+ require.NoError(t, err)
+ transport, err := New(privKey, nil, nil, nil, netListenUDP)
+ require.NoError(t, err)
+
+ listenTransport, pid := getTransport(t)
+ ln, err := listenTransport.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct"))
+ require.NoError(t, err)
+ go func() {
+ c, err := ln.Accept()
+ if !assert.NoError(t, err) {
+ t.Error(err)
+ }
+ t.Cleanup(func() { c.Close() })
+ }()
+ c, err := transport.Dial(context.Background(), ln.Multiaddr(), pid)
+ require.NoError(t, err)
+ c.Close()
+}
+
+func TestIsWebRTCDirectMultiaddr(t *testing.T) {
+ invalid := []string{
+ "/ip4/1.2.3.4/tcp/10/",
+ "/ip6/1::3/udp/100/quic-v1/",
+ "/ip4/1.2.3.4/udp/1/quic-v1/webrtc-direct",
+ }
+
+ valid := []struct {
+ addr string
+ count int
+ }{
+ {
+ addr: "/ip4/1.2.3.4/udp/1234/webrtc-direct",
+ count: 0,
+ },
+ {
+ addr: "/dns/test.test/udp/1234/webrtc-direct",
+ count: 0,
+ },
+ {
+ addr: "/ip4/1.2.3.4/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ count: 1,
+ },
+ {
+ addr: "/ip6/0:0:0:0:0:0:0:1/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ count: 1,
+ },
+ {
+ addr: "/dns/test.test/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ count: 1,
+ },
+ {
+ addr: "/dns/test.test/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7ZGrV4VZ3hpEKTd_zg",
+ count: 2,
+ },
+ }
+
+ for _, addr := range invalid {
+ a := ma.StringCast(addr)
+ isValid, n := IsWebRTCDirectMultiaddr(a)
+ require.Equal(t, 0, n)
+ require.False(t, isValid)
+ }
+
+ for _, tc := range valid {
+ a := ma.StringCast(tc.addr)
+ isValid, n := IsWebRTCDirectMultiaddr(a)
+ require.Equal(t, tc.count, n)
+ require.True(t, isValid)
+ }
+}
+
+func TestTransportWebRTC_CanDial(t *testing.T) {
+ tr, _ := getTransport(t)
+ invalid := []string{
+ "/ip4/1.2.3.4/udp/1234/webrtc-direct",
+ "/dns/test.test/udp/1234/webrtc-direct",
+ }
+
+ valid := []string{
+ "/ip4/1.2.3.4/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ "/ip6/0:0:0:0:0:0:0:1/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ "/ip6/::1/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ "/dns/test.test/udp/1234/webrtc-direct/certhash/uEiAsGPzpiPGQzSlVHRXrUCT5EkTV7YFrV4VZ3hpEKTd_zg",
+ }
+
+ for _, addr := range invalid {
+ a := ma.StringCast(addr)
+ require.False(t, tr.CanDial(a))
+ }
+
+ for _, addr := range valid {
+ a := ma.StringCast(addr)
+ require.True(t, tr.CanDial(a), addr)
+ }
+}
+
+func TestTransportAddCertHasher(t *testing.T) {
+ tr, _ := getTransport(t)
+ addrs := []string{
+ "/ip4/1.2.3.4/udp/1/webrtc-direct",
+ "/ip6/1::3/udp/2/webrtc-direct",
+ }
+ for _, a := range addrs {
+ addr, added := tr.AddCertHashes(ma.StringCast(a))
+ require.True(t, added)
+ _, err := addr.ValueForProtocol(ma.P_CERTHASH)
+ require.NoError(t, err)
+ require.True(t, strings.HasPrefix(addr.String(), a))
+ }
+}
+
+func TestTransportWebRTC_ListenFailsOnNonWebRTCMultiaddr(t *testing.T) {
+ tr, _ := getTransport(t)
+ testAddrs := []string{
+ "/ip4/0.0.0.0/udp/0",
+ "/ip4/0.0.0.0/tcp/0/wss",
+ }
+ for _, addr := range testAddrs {
+ listenMultiaddr, err := ma.NewMultiaddr(addr)
+ require.NoError(t, err)
+ listener, err := tr.Listen(listenMultiaddr)
+ require.Error(t, err)
+ require.Nil(t, listener)
+ }
+}
+
+// using assert inside goroutines, refer: https://github.com/stretchr/testify/issues/772#issuecomment-945166599
+func TestTransportWebRTC_DialFailsOnUnsupportedHashFunction(t *testing.T) {
+ tr, _ := getTransport(t)
+ hash := sha3.New512()
+ certhash := func() string {
+ _, err := hash.Write([]byte("test-data"))
+ require.NoError(t, err)
+ mh, err := multihash.Encode(hash.Sum([]byte{}), multihash.SHA3_512)
+ require.NoError(t, err)
+ certhash, err := multibase.Encode(multibase.Base58BTC, mh)
+ require.NoError(t, err)
+ return certhash
+ }()
+ testaddr, err := ma.NewMultiaddr("/ip4/1.2.3.4/udp/1234/webrtc-direct/certhash/" + certhash)
+ require.NoError(t, err)
+ _, err = tr.Dial(context.Background(), testaddr, "")
+ require.ErrorContains(t, err, "unsupported hash function")
+}
+
+func TestTransportWebRTC_CanListenSingle(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ tr1, connectingPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ done := make(chan struct{})
+ go func() {
+ _, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ assert.NoError(t, err)
+ close(done)
+ }()
+
+ conn, err := listener.Accept()
+ require.NoError(t, err)
+ require.NotNil(t, conn)
+
+ require.Equal(t, connectingPeer, conn.RemotePeer())
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.FailNow()
+ }
+}
+
+// WithListenerMaxInFlightConnections sets the maximum number of connections that are in-flight, i.e
+// they are being negotiated, or are waiting to be accepted.
+func WithListenerMaxInFlightConnections(m uint32) Option {
+ return func(t *WebRTCTransport) error {
+ if m == 0 {
+ t.maxInFlightConnections = DefaultMaxInFlightConnections
+ } else {
+ t.maxInFlightConnections = m
+ }
+ return nil
+ }
+}
+
+func TestTransportWebRTC_CanListenMultiple(t *testing.T) {
+ count := 3
+ tr, listeningPeer := getTransport(t, WithListenerMaxInFlightConnections(uint32(count)))
+
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ var wg sync.WaitGroup
+ go func() {
+ for i := 0; i < count; i++ {
+ conn, err := listener.Accept()
+ assert.NoError(t, err)
+ assert.NotNil(t, conn)
+ defer conn.Close()
+ }
+ wg.Wait()
+ cancel()
+ }()
+
+ for i := 0; i < count; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ctr, _ := getTransport(t)
+ conn, err := ctr.Dial(ctx, listener.Multiaddr(), listeningPeer)
+ select {
+ case <-ctx.Done():
+ default:
+ assert.NoError(t, err)
+ assert.NotNil(t, conn)
+ t.Cleanup(func() { conn.Close() })
+ }
+ }()
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-time.After(30 * time.Second):
+ t.Fatalf("timed out")
+ }
+}
+
+func TestTransportWebRTC_CanCreateSuccessiveConnections(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ count := 2
+
+ var wg sync.WaitGroup
+ wg.Add(count)
+ go func() {
+ for i := 0; i < count; i++ {
+ ctr, _ := getTransport(t)
+ conn, err := ctr.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ require.Equal(t, conn.RemotePeer(), listeningPeer)
+ t.Cleanup(func() { conn.Close() })
+ wg.Done()
+ }
+ }()
+
+ for i := 0; i < count; i++ {
+ conn, err := listener.Accept()
+ require.NoError(t, err)
+ defer conn.Close()
+ }
+ wg.Wait()
+}
+
+func TestTransportWebRTC_ListenerCanCreateStreams(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ tr1, connectingPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ streamChan := make(chan network.MuxedStream)
+ go func() {
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ t.Cleanup(func() { conn.Close() })
+ t.Logf("connection opened by dialer")
+
+ stream, err := conn.AcceptStream()
+ require.NoError(t, err)
+ t.Logf("dialer accepted stream")
+ streamChan <- stream
+ }()
+
+ conn, err := listener.Accept()
+ require.NoError(t, err)
+ defer conn.Close()
+ require.Equal(t, connectingPeer, conn.RemotePeer())
+ t.Logf("listener accepted connection")
+
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ t.Logf("listener opened stream")
+ _, err = stream.Write([]byte("test"))
+ require.NoError(t, err)
+
+ var str network.MuxedStream
+ select {
+ case str = <-streamChan:
+ case <-time.After(3 * time.Second):
+ t.Fatal("stream opening timed out")
+ }
+ buf := make([]byte, 100)
+ stream.SetReadDeadline(time.Now().Add(3 * time.Second))
+ n, err := str.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, "test", string(buf[:n]))
+}
+
+func TestTransportWebRTC_DialerCanCreateStreams(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ tr1, connectingPeer := getTransport(t)
+ done := make(chan struct{})
+
+ go func() {
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ require.Equal(t, connectingPeer, lconn.RemotePeer())
+ defer lconn.Close()
+
+ stream, err := lconn.AcceptStream()
+ require.NoError(t, err)
+ buf := make([]byte, 100)
+ n, err := stream.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, "test", string(buf[:n]))
+
+ close(done)
+ }()
+
+ go func() {
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+ t.Logf("dialer opened connection")
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ t.Logf("dialer opened stream")
+ _, err = stream.Write([]byte("test"))
+ require.NoError(t, err)
+ <-done
+ }()
+
+ select {
+ case <-done:
+ case <-time.After(10 * time.Second):
+ t.Fatal("timed out")
+ }
+}
+
+func TestTransportWebRTC_DialerCanCreateStreamsMultiple(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ tr1, connectingPeer := getTransport(t)
+ readerDone := make(chan struct{})
+
+ const (
+ numListeners = 10
+ numStreams = 100
+ numWriters = 10
+ size = 20 << 10
+ )
+
+ go func() {
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ require.Equal(t, connectingPeer, lconn.RemotePeer())
+ defer lconn.Close()
+ var wg sync.WaitGroup
+ var doneStreams atomic.Int32
+ for i := 0; i < numListeners; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ var nn int32
+ if nn = doneStreams.Add(1); nn > int32(numStreams) {
+ return
+ }
+ s, err := lconn.AcceptStream()
+ require.NoError(t, err)
+ n, err := io.Copy(s, s)
+ require.Equal(t, n, int64(size))
+ require.NoError(t, err)
+ s.Close()
+ }
+ }()
+ }
+ wg.Wait()
+ readerDone <- struct{}{}
+ }()
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ var writerWG sync.WaitGroup
+ var cnt atomic.Int32
+ var streamsStarted atomic.Int32
+ for i := 0; i < numWriters; i++ {
+ writerWG.Add(1)
+ go func() {
+ defer writerWG.Done()
+ buf := make([]byte, size)
+ for {
+ var nn int32
+ if nn = streamsStarted.Add(1); nn > int32(numStreams) {
+ return
+ }
+ rand.Read(buf)
+
+ s, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ n, err := s.Write(buf)
+ require.Equal(t, n, size)
+ require.NoError(t, err)
+ s.CloseWrite()
+ resp := make([]byte, size+10)
+ n, err = io.ReadFull(s, resp)
+ require.ErrorIs(t, err, io.ErrUnexpectedEOF)
+ require.Equal(t, n, size)
+ if string(buf) != string(resp[:size]) {
+ t.Errorf("bytes not equal: %d %d", len(buf), len(resp))
+ }
+ s.Close()
+ t.Log("completed stream: ", cnt.Add(1), s.(*stream).id)
+ }
+ }()
+ }
+ writerWG.Wait()
+ select {
+ case <-readerDone:
+ case <-time.After(100 * time.Second):
+ t.Fatal("timed out")
+ }
+}
+
+func TestTransportWebRTC_Deadline(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+ tr1, connectingPeer := getTransport(t)
+
+ t.Run("SetReadDeadline", func(t *testing.T) {
+ go func() {
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ t.Cleanup(func() { lconn.Close() })
+ require.Equal(t, connectingPeer, lconn.RemotePeer())
+ _, err = lconn.AcceptStream()
+ require.NoError(t, err)
+ }()
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+
+ // deadline set to the past
+ stream.SetReadDeadline(time.Now().Add(-200 * time.Millisecond))
+ _, err = stream.Read([]byte{0, 0})
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+
+ // future deadline exceeded
+ stream.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
+ _, err = stream.Read([]byte{0, 0})
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+ })
+
+ t.Run("SetWriteDeadline", func(t *testing.T) {
+ go func() {
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ t.Cleanup(func() { lconn.Close() })
+ require.Equal(t, connectingPeer, lconn.RemotePeer())
+ _, err = lconn.AcceptStream()
+ require.NoError(t, err)
+ }()
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+
+ stream.SetWriteDeadline(time.Now().Add(100 * time.Millisecond))
+ largeBuffer := make([]byte, 20*1024*1024)
+ _, err = stream.Write(largeBuffer)
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+
+ stream.SetWriteDeadline(time.Now().Add(-200 * time.Millisecond))
+ smallBuffer := make([]byte, 1024)
+ _, err = stream.Write(smallBuffer)
+ require.ErrorIs(t, err, os.ErrDeadlineExceeded)
+ })
+}
+
+func TestTransportWebRTC_StreamWriteBufferContention(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ tr1, connectingPeer := getTransport(t)
+
+ var wg sync.WaitGroup
+ wg.Add(3)
+ go func() {
+ defer wg.Done()
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ t.Cleanup(func() { lconn.Close() })
+ require.Equal(t, connectingPeer, lconn.RemotePeer())
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ _, err := lconn.AcceptStream()
+ require.NoError(t, err)
+ }()
+ }
+ }()
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ errC := make(chan error)
+ // writers
+ for i := 0; i < 2; i++ {
+ go func() {
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+
+ stream.SetWriteDeadline(time.Now().Add(200 * time.Millisecond))
+ largeBuffer := make([]byte, 2*1024*1024)
+ _, err = stream.Write(largeBuffer)
+ errC <- err
+ }()
+ }
+
+ require.ErrorIs(t, <-errC, os.ErrDeadlineExceeded)
+ require.ErrorIs(t, <-errC, os.ErrDeadlineExceeded)
+ wg.Wait()
+}
+
+func TestTransportWebRTC_RemoteReadsAfterClose(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ tr1, _ := getTransport(t)
+
+ done := make(chan error)
+ go func() {
+ lconn, err := listener.Accept()
+ if err != nil {
+ done <- err
+ return
+ }
+ t.Cleanup(func() { lconn.Close() })
+
+ stream, err := lconn.AcceptStream()
+ if err != nil {
+ done <- err
+ return
+ }
+ _, err = stream.Write([]byte{1, 2, 3, 4})
+ if err != nil {
+ done <- err
+ return
+ }
+ err = stream.Close()
+ if err != nil {
+ done <- err
+ return
+ }
+ close(done)
+ }()
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+ // create a stream
+ stream, err := conn.OpenStream(context.Background())
+
+ require.NoError(t, err)
+ // require write and close to complete
+ require.NoError(t, <-done)
+ stream.SetReadDeadline(time.Now().Add(5 * time.Second))
+
+ buf := make([]byte, 10)
+ n, err := stream.Read(buf)
+ require.NoError(t, err)
+ require.Equal(t, 4, n)
+}
+
+func TestTransportWebRTC_RemoteReadsAfterClose2(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ tr1, _ := getTransport(t)
+
+ awaitStreamClosure := make(chan struct{})
+ readBytesResult := make(chan int)
+ done := make(chan error)
+ go func() {
+ lconn, err := listener.Accept()
+ if err != nil {
+ done <- err
+ return
+ }
+ defer lconn.Close()
+ stream, err := lconn.AcceptStream()
+ if err != nil {
+ done <- err
+ return
+ }
+
+ <-awaitStreamClosure
+ buf := make([]byte, 16)
+ n, err := stream.Read(buf)
+ if err != nil {
+ done <- err
+ return
+ }
+ readBytesResult <- n
+ close(done)
+ }()
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+ // create a stream
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ _, err = stream.Write([]byte{1, 2, 3, 4})
+ require.NoError(t, err)
+ err = stream.Close()
+ require.NoError(t, err)
+ // signal stream closure
+ close(awaitStreamClosure)
+ require.Equal(t, 4, <-readBytesResult)
+}
+
+func TestTransportWebRTC_Close(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ listener, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer listener.Close()
+
+ tr1, connectingPeer := getTransport(t)
+
+ t.Run("RemoteClosesStream", func(t *testing.T) {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ lconn, err := listener.Accept()
+ require.NoError(t, err)
+ t.Cleanup(func() { lconn.Close() })
+ require.Equal(t, connectingPeer, lconn.RemotePeer())
+ stream, err := lconn.AcceptStream()
+ require.NoError(t, err)
+ time.Sleep(100 * time.Millisecond)
+ _ = stream.Close()
+ }()
+
+ buf := make([]byte, 2)
+
+ conn, err := tr1.Dial(context.Background(), listener.Multiaddr(), listeningPeer)
+ require.NoError(t, err)
+ defer conn.Close()
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+
+ err = stream.SetReadDeadline(time.Now().Add(2 * time.Second))
+ require.NoError(t, err)
+ _, err = stream.Read(buf)
+ require.ErrorIs(t, err, io.EOF)
+
+ wg.Wait()
+ })
+}
+
+func TestTransportWebRTC_PeerConnectionDTLSFailed(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ ln, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer ln.Close()
+
+ encoded, err := hex.DecodeString("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
+ require.NoError(t, err)
+ encodedCerthash, err := multihash.Encode(encoded, multihash.SHA2_256)
+ require.NoError(t, err)
+ badEncodedCerthash, err := multibase.Encode(multibase.Base58BTC, encodedCerthash)
+ require.NoError(t, err)
+ badCerthash, err := ma.NewMultiaddr(fmt.Sprintf("/certhash/%s", badEncodedCerthash))
+ require.NoError(t, err)
+ badMultiaddr, _ := ma.SplitFunc(ln.Multiaddr(), func(c ma.Component) bool { return c.Protocol().Code == ma.P_CERTHASH })
+ badMultiaddr = badMultiaddr.Encapsulate(badCerthash)
+
+ tr1, _ := getTransport(t)
+ conn, err := tr1.Dial(context.Background(), badMultiaddr, listeningPeer)
+ require.Error(t, err)
+ require.ErrorContains(t, err, "failed")
+ require.Nil(t, conn)
+}
+
+func newUDPConnLocalhost(t testing.TB) *net.UDPConn {
+ t.Helper()
+ conn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0})
+ require.NoError(t, err)
+ t.Cleanup(func() { conn.Close() })
+ return conn
+}
+
+func TestConnectionTimeoutOnListener(t *testing.T) {
+ tr, listeningPeer := getTransport(t)
+ tr.peerConnectionTimeouts.Disconnect = 100 * time.Millisecond
+ tr.peerConnectionTimeouts.Failed = 150 * time.Millisecond
+ tr.peerConnectionTimeouts.Keepalive = 50 * time.Millisecond
+
+ listenMultiaddr := ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct")
+ ln, err := tr.Listen(listenMultiaddr)
+ require.NoError(t, err)
+ defer ln.Close()
+
+ var drop atomic.Bool
+ proxy := quicproxy.Proxy{
+ Conn: newUDPConnLocalhost(t),
+ ServerAddr: ln.Addr().(*net.UDPAddr),
+ DropPacket: func(_ quicproxy.Direction, _, _ net.Addr, _ []byte) bool { return drop.Load() },
+ }
+ require.NoError(t, proxy.Start())
+ defer proxy.Close()
+
+ tr1, connectingPeer := getTransport(t)
+ go func() {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ addr, err := manet.FromNetAddr(proxy.LocalAddr())
+ require.NoError(t, err)
+ _, webrtcComponent := ma.SplitFunc(ln.Multiaddr(), func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBRTC_DIRECT })
+ addr = addr.Encapsulate(webrtcComponent)
+ conn, err := tr1.Dial(ctx, addr, listeningPeer)
+ require.NoError(t, err)
+ t.Cleanup(func() { conn.Close() })
+ str, err := conn.OpenStream(ctx)
+ require.NoError(t, err)
+ str.Write([]byte("foobar"))
+ }()
+
+ conn, err := ln.Accept()
+ require.NoError(t, err)
+ require.Equal(t, connectingPeer, conn.RemotePeer())
+ defer conn.Close()
+
+ str, err := conn.AcceptStream()
+ require.NoError(t, err)
+ _, err = str.Write([]byte("test"))
+ require.NoError(t, err)
+ // start dropping all packets
+ drop.Store(true)
+ start := time.Now()
+ for {
+ if _, err := str.Write([]byte("test")); err != nil {
+ if os.IsTimeout(err) {
+ break
+ }
+ // If we write when a connection timeout happens, sctp provides
+ // a "stream closed" error. This occurs concurrently with the
+ // callback we receive for connection timeout.
+ // Test once more after sleep that we provide the correct error.
+ if strings.Contains(err.Error(), "stream closed") {
+ time.Sleep(50 * time.Millisecond)
+ _, err = str.Write([]byte("test"))
+ require.True(t, os.IsTimeout(err), "invalid error type: %v", err)
+ } else {
+ t.Fatal("invalid error type", err)
+ }
+ break
+ }
+
+ if time.Since(start) > 5*time.Second {
+ t.Fatal("timeout")
+ }
+ // make sure to not write too often, we don't want to fill the flow control window
+ time.Sleep(20 * time.Millisecond)
+ }
+ // make sure that accepting a stream also returns an error...
+ _, err = conn.AcceptStream()
+ require.True(t, os.IsTimeout(err))
+ // ... as well as opening a new stream
+ _, err = conn.OpenStream(context.Background())
+ require.True(t, os.IsTimeout(err))
+}
+
+func TestMaxInFlightRequests(t *testing.T) {
+ const count = 3
+ tr, listeningPeer := getTransport(t,
+ WithListenerMaxInFlightConnections(count),
+ )
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ var wg sync.WaitGroup
+ var success, fails atomic.Int32
+ for i := 0; i < count+1; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ dialer, _ := getTransport(t)
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ if conn, err := dialer.Dial(ctx, ln.Multiaddr(), listeningPeer); err == nil {
+ success.Add(1)
+ t.Cleanup(func() { conn.Close() })
+ } else {
+ t.Log("failed to dial:", err)
+ fails.Add(1)
+ }
+ }()
+ }
+ wg.Wait()
+ require.Equal(t, count, int(success.Load()), "expected exactly 3 dial successes")
+ require.Equal(t, 1, int(fails.Load()), "expected exactly 1 dial failure")
+}
+
+func TestGenUfrag(t *testing.T) {
+ for i := 0; i < 10; i++ {
+ s := genUfrag()
+ require.True(t, strings.HasPrefix(s, "libp2p+webrtc+v1/"))
+ }
+}
+
+func TestManyConnections(t *testing.T) {
+ var listeners []tpt.Listener
+ var listenerPeerIDs []peer.ID
+
+ const numListeners = 5
+ const dialersPerListener = 5
+ const connsPerDialer = 10
+ errCh := make(chan error, 10*numListeners*dialersPerListener*connsPerDialer)
+ successCh := make(chan struct{}, 10*numListeners*dialersPerListener*connsPerDialer)
+
+ for i := 0; i < numListeners; i++ {
+ tr, lp := getTransport(t)
+ listenerPeerIDs = append(listenerPeerIDs, lp)
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct"))
+ require.NoError(t, err)
+ defer ln.Close()
+ listeners = append(listeners, ln)
+ }
+
+ runListenConn := func(conn tpt.CapableConn) {
+ defer conn.Close()
+ s, err := conn.AcceptStream()
+ if err != nil {
+ t.Errorf("accept stream failed for listener: %s", err)
+ errCh <- err
+ return
+ }
+ var b [4]byte
+ if _, err := s.Read(b[:]); err != nil {
+ t.Errorf("read stream failed for listener: %s", err)
+ errCh <- err
+ return
+ }
+ s.Write(b[:])
+ _, err = s.Read(b[:]) // peer will close the connection after read
+ if !assert.Error(t, err) {
+ err = errors.New("invalid read: expected conn to close")
+ errCh <- err
+ return
+ }
+ successCh <- struct{}{}
+ }
+
+ runDialConn := func(conn tpt.CapableConn) {
+ defer conn.Close()
+
+ s, err := conn.OpenStream(context.Background())
+ if err != nil {
+ t.Errorf("accept stream failed for listener: %s", err)
+ errCh <- err
+ return
+ }
+ var b [4]byte
+ if _, err := s.Write(b[:]); err != nil {
+ t.Errorf("write stream failed for dialer: %s", err)
+ errCh <- err
+ return
+ }
+ if _, err := s.Read(b[:]); err != nil {
+ t.Errorf("read stream failed for dialer: %s", err)
+ errCh <- err
+ return
+ }
+ s.Close()
+ }
+
+ runListener := func(ln tpt.Listener) {
+ for i := 0; i < dialersPerListener*connsPerDialer; i++ {
+ conn, err := ln.Accept()
+ if err != nil {
+ t.Errorf("listener failed to accept conneciton: %s", err)
+ return
+ }
+ go runListenConn(conn)
+ }
+ }
+
+ runDialer := func(ln tpt.Listener, lp peer.ID) {
+ tp, _ := getTransport(t)
+ for i := 0; i < connsPerDialer; i++ {
+ // We want to test for deadlocks, set a high timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
+ conn, err := tp.Dial(ctx, ln.Multiaddr(), lp)
+ if err != nil {
+ t.Errorf("dial failed: %s", err)
+ errCh <- err
+ cancel()
+ return
+ }
+ runDialConn(conn)
+ cancel()
+ }
+ }
+
+ for i := 0; i < numListeners; i++ {
+ go runListener(listeners[i])
+ }
+ for i := 0; i < numListeners; i++ {
+ for j := 0; j < dialersPerListener; j++ {
+ go runDialer(listeners[i], listenerPeerIDs[i])
+ }
+ }
+
+ for i := 0; i < numListeners*dialersPerListener*connsPerDialer; i++ {
+ select {
+ case <-successCh:
+ t.Log("completed conn: ", i)
+ case err := <-errCh:
+ t.Fatalf("failed: %s", err)
+ case <-time.After(300 * time.Second):
+ t.Fatalf("timed out")
+ }
+ }
+}
+
+func TestConnectionClosedWhenRemoteCloses(t *testing.T) {
+ listenT, p := getTransport(t)
+ listener, err := listenT.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/webrtc-direct"))
+ require.NoError(t, err)
+ defer listener.Close()
+
+ accepted := make(chan struct{})
+ dialer, _ := getTransport(t)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ c, err := listener.Accept()
+ close(accepted)
+ if !assert.NoError(t, err) {
+ return
+ }
+ assert.Eventually(t, func() bool {
+ return c.IsClosed()
+ }, 5*time.Second, 50*time.Millisecond)
+ }()
+
+ c, err := dialer.Dial(context.Background(), listener.Multiaddr(), p)
+ require.NoError(t, err)
+ <-accepted
+ c.Close()
+ wg.Wait()
+}
diff --git a/p2p/transport/webrtc/udpmux/mux.go b/p2p/transport/webrtc/udpmux/mux.go
new file mode 100644
index 0000000000..12b7eea60d
--- /dev/null
+++ b/p2p/transport/webrtc/udpmux/mux.go
@@ -0,0 +1,302 @@
+// The udpmux package contains the logic for multiplexing multiple WebRTC (ICE)
+// connections over a single UDP socket.
+package udpmux
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+
+ pool "github.com/libp2p/go-buffer-pool"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ "github.com/pion/ice/v4"
+ "github.com/pion/stun"
+)
+
+var log = logging.Logger("webrtc-udpmux")
+
+// ReceiveBufSize is the size of the buffer used to receive packets from the PacketConn.
+// It is fine for this number to be higher than the actual path MTU as this value is not
+// used to decide the packet size on the write path.
+const ReceiveBufSize = 1500
+
+type Candidate struct {
+ Ufrag string
+ Addr *net.UDPAddr
+}
+
+// UDPMux multiplexes multiple ICE connections over a single net.PacketConn,
+// generally a UDP socket.
+//
+// The connections are indexed by (ufrag, IP address family) and by remote
+// address from which the connection has received valid STUN/RTC packets.
+//
+// When a new packet is received on the underlying net.PacketConn, we
+// first check the address map to see if there is a connection associated with the
+// remote address:
+// If found, we pass the packet to that connection.
+// Otherwise, we check to see if the packet is a STUN packet.
+// If it is, we read the ufrag from the STUN packet and use it to check if there
+// is a connection associated with the (ufrag, IP address family) pair.
+// If found we add the association to the address map.
+type UDPMux struct {
+ socket net.PacketConn
+
+ queue chan Candidate
+
+ mx sync.Mutex
+ // ufragMap allows us to multiplex incoming STUN packets based on ufrag
+ ufragMap map[ufragConnKey]*muxedConnection
+ // addrMap allows us to correctly direct incoming packets after the connection
+ // is established and ufrag isn't available on all packets
+ addrMap map[string]*muxedConnection
+ // ufragAddrMap allows cleaning up all addresses from the addrMap once the connection is closed
+ // During the ICE connectivity checks, the same ufrag might be used on multiple addresses.
+ ufragAddrMap map[ufragConnKey][]net.Addr
+
+ // the context controls the lifecycle of the mux
+ wg sync.WaitGroup
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+var _ ice.UDPMux = &UDPMux{}
+
+func NewUDPMux(socket net.PacketConn) *UDPMux {
+ ctx, cancel := context.WithCancel(context.Background())
+ mux := &UDPMux{
+ ctx: ctx,
+ cancel: cancel,
+ socket: socket,
+ ufragMap: make(map[ufragConnKey]*muxedConnection),
+ addrMap: make(map[string]*muxedConnection),
+ ufragAddrMap: make(map[ufragConnKey][]net.Addr),
+ queue: make(chan Candidate, 32),
+ }
+
+ return mux
+}
+
+func (mux *UDPMux) Start() {
+ mux.wg.Add(1)
+ go func() {
+ defer mux.wg.Done()
+ mux.readLoop()
+ }()
+}
+
+// GetListenAddresses implements ice.UDPMux
+func (mux *UDPMux) GetListenAddresses() []net.Addr {
+ return []net.Addr{mux.socket.LocalAddr()}
+}
+
+// GetConn implements ice.UDPMux
+// It creates a net.PacketConn for a given ufrag if an existing one cannot be found.
+// We differentiate IPv4 and IPv6 addresses, since a remote is can be reachable at multiple different
+// UDP addresses of the same IP address family (eg. server-reflexive addresses and peer-reflexive addresses).
+func (mux *UDPMux) GetConn(ufrag string, addr net.Addr) (net.PacketConn, error) {
+ a, ok := addr.(*net.UDPAddr)
+ if !ok {
+ return nil, fmt.Errorf("unexpected address type: %T", addr)
+ }
+ select {
+ case <-mux.ctx.Done():
+ return nil, io.ErrClosedPipe
+ default:
+ isIPv6 := ok && a.IP.To4() == nil
+ _, conn := mux.getOrCreateConn(ufrag, isIPv6, mux, addr)
+ return conn, nil
+ }
+}
+
+// Close implements ice.UDPMux
+func (mux *UDPMux) Close() error {
+ select {
+ case <-mux.ctx.Done():
+ return nil
+ default:
+ }
+ mux.cancel()
+ mux.socket.Close()
+ mux.wg.Wait()
+ return nil
+}
+
+// writeTo writes a packet to the underlying net.PacketConn
+func (mux *UDPMux) writeTo(buf []byte, addr net.Addr) (int, error) {
+ return mux.socket.WriteTo(buf, addr)
+}
+
+func (mux *UDPMux) readLoop() {
+ for {
+ select {
+ case <-mux.ctx.Done():
+ return
+ default:
+ }
+
+ buf := pool.Get(ReceiveBufSize)
+
+ n, addr, err := mux.socket.ReadFrom(buf)
+ if err != nil {
+ if strings.Contains(err.Error(), "use of closed network connection") || errors.Is(err, context.Canceled) {
+ log.Debug("readLoop exiting: socket closed", "local_addr", mux.socket.LocalAddr())
+ } else {
+ log.Error("error reading from socket", "local_addr", mux.socket.LocalAddr(), "error", err)
+ }
+ pool.Put(buf)
+ return
+ }
+ buf = buf[:n]
+
+ if processed := mux.processPacket(buf, addr); !processed {
+ pool.Put(buf)
+ }
+ }
+}
+
+func (mux *UDPMux) processPacket(buf []byte, addr net.Addr) (processed bool) {
+ udpAddr, ok := addr.(*net.UDPAddr)
+ if !ok {
+ log.Error("received a non-UDP address", "addr", addr)
+ return false
+ }
+ isIPv6 := udpAddr.IP.To4() == nil
+
+ // Connections are indexed by remote address. We first
+ // check if the remote address has a connection associated
+ // with it. If yes, we push the received packet to the connection
+ mux.mx.Lock()
+ conn, ok := mux.addrMap[addr.String()]
+ mux.mx.Unlock()
+ if ok {
+ if err := conn.Push(buf, addr); err != nil {
+ log.Debug("could not push packet", "error", err)
+ return false
+ }
+ return true
+ }
+
+ if !stun.IsMessage(buf) {
+ log.Debug("incoming message is not a STUN message")
+ return false
+ }
+
+ msg := &stun.Message{Raw: buf}
+ if err := msg.Decode(); err != nil {
+ log.Debug("failed to decode STUN message", "error", err)
+ return false
+ }
+ if msg.Type != stun.BindingRequest {
+ log.Debug("incoming message should be a STUN binding request", "got_type", msg.Type)
+ return false
+ }
+
+ ufrag, err := ufragFromSTUNMessage(msg)
+ if err != nil {
+ log.Debug("could not find STUN username", "error", err)
+ return false
+ }
+
+ connCreated, conn := mux.getOrCreateConn(ufrag, isIPv6, mux, udpAddr)
+ if connCreated {
+ select {
+ case mux.queue <- Candidate{Addr: udpAddr, Ufrag: ufrag}:
+ default:
+ log.Debug("queue full, dropping incoming candidate", "ufrag", ufrag, "addr", udpAddr)
+ conn.Close()
+ return false
+ }
+ }
+
+ if err := conn.Push(buf, addr); err != nil {
+ log.Debug("could not push packet", "error", err)
+ return false
+ }
+ return true
+}
+
+func (mux *UDPMux) Accept(ctx context.Context) (Candidate, error) {
+ select {
+ case c := <-mux.queue:
+ return c, nil
+ case <-ctx.Done():
+ return Candidate{}, ctx.Err()
+ case <-mux.ctx.Done():
+ return Candidate{}, mux.ctx.Err()
+ }
+}
+
+type ufragConnKey struct {
+ ufrag string
+ isIPv6 bool
+}
+
+// ufragFromSTUNMessage returns the local or ufrag
+// from the STUN username attribute. Local ufrag is the ufrag of the
+// peer which initiated the connectivity check, e.g in a connectivity
+// check from A to B, the username attribute will be B_ufrag:A_ufrag
+// with the local ufrag value being A_ufrag. In case of ice-lite, the
+// localUfrag value will always be the remote peer's ufrag since ICE-lite
+// implementations do not generate connectivity checks. In our specific
+// case, since the local and remote ufrag is equal, we can return
+// either value.
+func ufragFromSTUNMessage(msg *stun.Message) (string, error) {
+ attr, err := msg.Get(stun.AttrUsername)
+ if err != nil {
+ return "", err
+ }
+ index := bytes.Index(attr, []byte{':'})
+ if index == -1 {
+ return "", fmt.Errorf("invalid STUN username attribute")
+ }
+ return string(attr[index+1:]), nil
+}
+
+// RemoveConnByUfrag removes the connection associated with the ufrag and all the
+// addresses associated with that connection. This method is called by pion when
+// a peerconnection is closed.
+func (mux *UDPMux) RemoveConnByUfrag(ufrag string) {
+ if ufrag == "" {
+ return
+ }
+
+ mux.mx.Lock()
+ defer mux.mx.Unlock()
+
+ for _, isIPv6 := range [...]bool{true, false} {
+ key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6}
+ if conn, ok := mux.ufragMap[key]; ok {
+ delete(mux.ufragMap, key)
+ for _, addr := range mux.ufragAddrMap[key] {
+ delete(mux.addrMap, addr.String())
+ }
+ delete(mux.ufragAddrMap, key)
+ conn.close()
+ }
+ }
+}
+
+func (mux *UDPMux) getOrCreateConn(ufrag string, isIPv6 bool, _ *UDPMux, addr net.Addr) (created bool, _ *muxedConnection) {
+ key := ufragConnKey{ufrag: ufrag, isIPv6: isIPv6}
+
+ mux.mx.Lock()
+ defer mux.mx.Unlock()
+
+ if conn, ok := mux.ufragMap[key]; ok {
+ mux.addrMap[addr.String()] = conn
+ mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr)
+ return false, conn
+ }
+
+ conn := newMuxedConnection(mux, ufrag)
+ mux.ufragMap[key] = conn
+ mux.addrMap[addr.String()] = conn
+ mux.ufragAddrMap[key] = append(mux.ufragAddrMap[key], addr)
+ return true, conn
+}
diff --git a/p2p/transport/webrtc/udpmux/mux_test.go b/p2p/transport/webrtc/udpmux/mux_test.go
new file mode 100644
index 0000000000..b75f3e8302
--- /dev/null
+++ b/p2p/transport/webrtc/udpmux/mux_test.go
@@ -0,0 +1,274 @@
+package udpmux
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/pion/stun"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func getSTUNBindingRequest(ufrag string) *stun.Message {
+ msg := stun.New()
+ msg.SetType(stun.BindingRequest)
+ uattr := stun.RawAttribute{
+ Type: stun.AttrUsername,
+ Value: []byte(fmt.Sprintf("%s:%s", ufrag, ufrag)), // This is the format we expect in our connections
+ }
+ uattr.AddTo(msg)
+ msg.Encode()
+ return msg
+}
+
+func setupMapping(t *testing.T, ufrag string, from net.PacketConn, m *UDPMux) {
+ t.Helper()
+ msg := getSTUNBindingRequest(ufrag)
+ _, err := from.WriteTo(msg.Raw, m.GetListenAddresses()[0])
+ require.NoError(t, err)
+}
+
+func newPacketConn(t *testing.T) net.PacketConn {
+ t.Helper()
+ udpPort0 := &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 0}
+ c, err := net.ListenUDP("udp", udpPort0)
+ require.NoError(t, err)
+ t.Cleanup(func() { c.Close() })
+ return c
+}
+
+func TestAccept(t *testing.T) {
+ c := newPacketConn(t)
+ defer c.Close()
+ m := NewUDPMux(c)
+ m.Start()
+ defer m.Close()
+
+ ufrags := []string{"a", "b", "c", "d"}
+ conns := make([]net.PacketConn, len(ufrags))
+ for i, ufrag := range ufrags {
+ conns[i] = newPacketConn(t)
+ setupMapping(t, ufrag, conns[i], m)
+ }
+ for i, ufrag := range ufrags {
+ c, err := m.Accept(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, c.Ufrag, ufrag)
+ require.Equal(t, c.Addr, conns[i].LocalAddr())
+ }
+
+ for i, ufrag := range ufrags {
+ // should not be accepted
+ setupMapping(t, ufrag, conns[i], m)
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+ _, err := m.Accept(ctx)
+ require.Error(t, err)
+
+ // should not be accepted
+ cc := newPacketConn(t)
+ setupMapping(t, ufrag, cc, m)
+ ctx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+ _, err = m.Accept(ctx)
+ require.Error(t, err)
+ }
+}
+
+func TestGetConn(t *testing.T) {
+ c := newPacketConn(t)
+ m := NewUDPMux(c)
+ m.Start()
+ defer m.Close()
+
+ ufrags := []string{"a", "b", "c", "d"}
+ conns := make([]net.PacketConn, len(ufrags))
+ for i, ufrag := range ufrags {
+ conns[i] = newPacketConn(t)
+ setupMapping(t, ufrag, conns[i], m)
+ }
+ for i, ufrag := range ufrags {
+ c, err := m.Accept(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, c.Ufrag, ufrag)
+ require.Equal(t, c.Addr, conns[i].LocalAddr())
+ }
+
+ for i, ufrag := range ufrags {
+ c, err := m.GetConn(ufrag, conns[i].LocalAddr())
+ require.NoError(t, err)
+ msg := make([]byte, 100)
+ _, _, err = c.ReadFrom(msg)
+ require.NoError(t, err)
+ }
+
+ for i, ufrag := range ufrags {
+ cc := newPacketConn(t)
+ // setupMapping of cc to ufrags[0] and remove the stun binding request from the queue
+ setupMapping(t, ufrag, cc, m)
+ mc, err := m.GetConn(ufrag, cc.LocalAddr())
+ require.NoError(t, err)
+ msg := make([]byte, 100)
+ _, _, err = mc.ReadFrom(msg)
+ require.NoError(t, err)
+
+ // Write from new connection should provide the new address on ReadFrom
+ _, err = cc.WriteTo([]byte("test1"), c.LocalAddr())
+ require.NoError(t, err)
+ n, addr, err := mc.ReadFrom(msg)
+ require.NoError(t, err)
+ require.Equal(t, addr, cc.LocalAddr())
+ require.Equal(t, "test1", string(msg[:n]))
+
+ // Write from original connection should provide the original address
+ _, err = conns[i].WriteTo([]byte("test2"), c.LocalAddr())
+ require.NoError(t, err)
+ n, addr, err = mc.ReadFrom(msg)
+ require.NoError(t, err)
+ require.Equal(t, addr, conns[i].LocalAddr())
+ require.Equal(t, "test2", string(msg[:n]))
+ }
+}
+
+func TestRemoveConnByUfrag(t *testing.T) {
+ c := newPacketConn(t)
+ m := NewUDPMux(c)
+ m.Start()
+ defer m.Close()
+
+ // Map each ufrag to two addresses
+ ufrag := "a"
+ count := 10
+ conns := make([]net.PacketConn, count)
+ for i := 0; i < 10; i++ {
+ conns[i] = newPacketConn(t)
+ setupMapping(t, ufrag, conns[i], m)
+ }
+ mc, err := m.GetConn(ufrag, conns[0].LocalAddr())
+ require.NoError(t, err)
+ for i := 0; i < 10; i++ {
+ mc1, err := m.GetConn(ufrag, conns[i].LocalAddr())
+ require.NoError(t, err)
+ if mc1 != mc {
+ t.Fatalf("expected the two muxed connections to be same")
+ }
+ }
+
+ // Now remove the ufrag
+ m.RemoveConnByUfrag(ufrag)
+
+ // All connections should now be associated with b
+ ufrag = "b"
+ for i := 0; i < 10; i++ {
+ setupMapping(t, ufrag, conns[i], m)
+ }
+ mc, err = m.GetConn(ufrag, conns[0].LocalAddr())
+ require.NoError(t, err)
+ for i := 0; i < 10; i++ {
+ mc1, err := m.GetConn(ufrag, conns[i].LocalAddr())
+ require.NoError(t, err)
+ if mc1 != mc {
+ t.Fatalf("expected the two muxed connections to be same")
+ }
+ }
+
+ // Should be different even if the address is the same
+ mc1, err := m.GetConn("a", conns[0].LocalAddr())
+ require.NoError(t, err)
+ if mc1 == mc {
+ t.Fatalf("expected the two connections to be different")
+ }
+}
+
+func TestMuxedConnection(t *testing.T) {
+ c := newPacketConn(t)
+ m := NewUDPMux(c)
+ m.Start()
+ defer m.Close()
+
+ msgCount := 3
+ connCount := 3
+
+ ufrags := []string{"a", "b", "c"}
+ addrUfragMap := make(map[string]string)
+ ufragConnsMap := make(map[string][]net.PacketConn)
+ for _, ufrag := range ufrags {
+ for i := 0; i < connCount; i++ {
+ cc := newPacketConn(t)
+ addrUfragMap[cc.LocalAddr().String()] = ufrag
+ ufragConnsMap[ufrag] = append(ufragConnsMap[ufrag], cc)
+ }
+ }
+
+ done := make(chan bool, len(ufrags))
+ for _, ufrag := range ufrags {
+ go func(ufrag string) {
+ for _, cc := range ufragConnsMap[ufrag] {
+ setupMapping(t, ufrag, cc, m)
+ for j := 0; j < msgCount; j++ {
+ cc.WriteTo([]byte(ufrag), c.LocalAddr())
+ }
+ }
+ done <- true
+ }(ufrag)
+ }
+ for i := 0; i < len(ufrags); i++ {
+ <-done
+ }
+
+ for _, ufrag := range ufrags {
+ mc, err := m.GetConn(ufrag, c.LocalAddr()) // the address is irrelevant
+ require.NoError(t, err)
+ msgs := 0
+ stunRequests := 0
+ msg := make([]byte, 1500)
+ addrPacketCount := make(map[string]int)
+ for i := 0; i < connCount; i++ {
+ for j := 0; j < msgCount+1; j++ {
+ n, addr1, err := mc.ReadFrom(msg)
+ require.NoError(t, err)
+ require.Equal(t, addrUfragMap[addr1.String()], ufrag)
+ addrPacketCount[addr1.String()]++
+ if stun.IsMessage(msg[:n]) {
+ stunRequests++
+ } else {
+ msgs++
+ }
+ }
+ }
+ for addr, v := range addrPacketCount {
+ require.Equal(t, v, msgCount+1) // msgCount msgs + 1 STUN binding request
+ delete(addrUfragMap, addr)
+ }
+ require.Len(t, addrPacketCount, connCount)
+ }
+ require.Empty(t, addrUfragMap)
+}
+
+func TestRemovingUfragClosesConn(t *testing.T) {
+ c := newPacketConn(t)
+ m := NewUDPMux(c)
+ m.Start()
+ defer m.Close()
+ remoteAddr := &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 1234}
+ conn, err := m.GetConn("a", remoteAddr)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ connClosed := make(chan bool)
+ go func() {
+ _, _, err := conn.ReadFrom(make([]byte, 100))
+ assert.ErrorIs(t, err, context.Canceled)
+ close(connClosed)
+ }()
+ require.NoError(t, err)
+ m.RemoveConnByUfrag("a")
+ select {
+ case <-connClosed:
+ case <-time.After(1 * time.Second):
+ t.Fatalf("expected the connection to be closed")
+ }
+}
diff --git a/p2p/transport/webrtc/udpmux/muxed_connection.go b/p2p/transport/webrtc/udpmux/muxed_connection.go
new file mode 100644
index 0000000000..4d560b68b3
--- /dev/null
+++ b/p2p/transport/webrtc/udpmux/muxed_connection.go
@@ -0,0 +1,119 @@
+package udpmux
+
+import (
+ "context"
+ "errors"
+ "net"
+ "time"
+
+ pool "github.com/libp2p/go-buffer-pool"
+)
+
+type packet struct {
+ buf []byte
+ addr net.Addr
+}
+
+var _ net.PacketConn = &muxedConnection{}
+
+const queueLen = 128
+
+// muxedConnection provides a net.PacketConn abstraction
+// over packetQueue and adds the ability to store addresses
+// from which this connection (indexed by ufrag) received
+// data.
+type muxedConnection struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ queue chan packet
+ mux *UDPMux
+ ufrag string
+}
+
+var _ net.PacketConn = &muxedConnection{}
+
+func newMuxedConnection(mux *UDPMux, ufrag string) *muxedConnection {
+ ctx, cancel := context.WithCancel(mux.ctx)
+ return &muxedConnection{
+ ctx: ctx,
+ cancel: cancel,
+ queue: make(chan packet, queueLen),
+ mux: mux,
+ ufrag: ufrag,
+ }
+}
+
+func (c *muxedConnection) Push(buf []byte, addr net.Addr) error {
+ if c.ctx.Err() != nil {
+ return errors.New("closed")
+ }
+ select {
+ case c.queue <- packet{buf: buf, addr: addr}:
+ return nil
+ default:
+ return errors.New("queue full")
+ }
+}
+
+func (c *muxedConnection) ReadFrom(buf []byte) (int, net.Addr, error) {
+ select {
+ case p := <-c.queue:
+ n := copy(buf, p.buf) // This might discard parts of the packet, if p is too short
+ if n < len(p.buf) {
+ log.Debug("short read", "had", len(p.buf), "read", n)
+ }
+ pool.Put(p.buf)
+ return n, p.addr, nil
+ case <-c.ctx.Done():
+ return 0, nil, c.ctx.Err()
+ }
+}
+
+func (c *muxedConnection) WriteTo(p []byte, addr net.Addr) (n int, err error) {
+ return c.mux.writeTo(p, addr)
+}
+
+func (c *muxedConnection) Close() error {
+ if c.ctx.Err() != nil {
+ return nil
+ }
+ // mux calls close to actually close the connection
+ //
+ // Removing the connection from the mux or closing the connection
+ // must trigger the other.
+ // Doing this here ensures we don't need to call both RemoveConnByUfrag
+ // and close on all code paths.
+ c.mux.RemoveConnByUfrag(c.ufrag)
+ return nil
+}
+
+// closes the connection. Must only be called by the mux.
+func (c *muxedConnection) close() {
+ c.cancel()
+ // drain the packet queue
+ for {
+ select {
+ case p := <-c.queue:
+ pool.Put(p.buf)
+ default:
+ return
+ }
+ }
+}
+
+func (c *muxedConnection) LocalAddr() net.Addr { return c.mux.socket.LocalAddr() }
+
+func (*muxedConnection) SetDeadline(_ time.Time) error {
+ // no deadline is desired here
+ return nil
+}
+
+func (*muxedConnection) SetReadDeadline(_ time.Time) error {
+ // no read deadline is desired here
+ return nil
+}
+
+func (*muxedConnection) SetWriteDeadline(_ time.Time) error {
+ // no write deadline is desired here
+ return nil
+}
diff --git a/p2p/transport/websocket/LICENSE-APACHE b/p2p/transport/websocket/LICENSE-APACHE
new file mode 100644
index 0000000000..14478a3b60
--- /dev/null
+++ b/p2p/transport/websocket/LICENSE-APACHE
@@ -0,0 +1,5 @@
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
diff --git a/p2p/transport/websocket/LICENSE-MIT b/p2p/transport/websocket/LICENSE-MIT
new file mode 100644
index 0000000000..72dc60d84b
--- /dev/null
+++ b/p2p/transport/websocket/LICENSE-MIT
@@ -0,0 +1,19 @@
+The MIT License (MIT)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/p2p/transport/websocket/addrs.go b/p2p/transport/websocket/addrs.go
new file mode 100644
index 0000000000..6fbd852636
--- /dev/null
+++ b/p2p/transport/websocket/addrs.go
@@ -0,0 +1,175 @@
+package websocket
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+var _ net.Addr = (*Addr)(nil)
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string {
+ return "websocket"
+}
+
+// NewAddr creates an Addr with `ws` scheme (insecure).
+//
+// Deprecated. Use NewAddrWithScheme.
+func NewAddr(host string) *Addr {
+ // Older versions of the transport only supported insecure connections (i.e.
+ // WS instead of WSS). Assume that is the case here.
+ return NewAddrWithScheme(host, false)
+}
+
+// NewAddrWithScheme creates a new Addr using the given host string. isSecure
+// should be true for WSS connections and false for WS.
+func NewAddrWithScheme(host string, isSecure bool) *Addr {
+ scheme := "ws"
+ if isSecure {
+ scheme = "wss"
+ }
+ return &Addr{
+ URL: &url.URL{
+ Scheme: scheme,
+ Host: host,
+ },
+ }
+}
+
+func ConvertWebsocketMultiaddrToNetAddr(maddr ma.Multiaddr) (net.Addr, error) {
+ url, err := parseMultiaddr(maddr)
+ if err != nil {
+ return nil, err
+ }
+ return &Addr{URL: url}, nil
+}
+
+func ParseWebsocketNetAddr(a net.Addr) (ma.Multiaddr, error) {
+ wsa, ok := a.(*Addr)
+ if !ok {
+ return nil, fmt.Errorf("not a websocket address")
+ }
+
+ var (
+ tcpma ma.Multiaddr
+ err error
+ port int
+ host = wsa.Hostname()
+ )
+
+ // Get the port
+ if portStr := wsa.Port(); portStr != "" {
+ port, err = strconv.Atoi(portStr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse port '%q': %s", portStr, err)
+ }
+ } else {
+ return nil, fmt.Errorf("invalid port in url: '%q'", wsa.URL)
+ }
+
+ // NOTE: Ignoring IPv6 zones...
+ // Detect if host is IP address or DNS
+ if ip := net.ParseIP(host); ip != nil {
+ // Assume IP address
+ tcpma, err = manet.FromNetAddr(&net.TCPAddr{
+ IP: ip,
+ Port: port,
+ })
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // Assume DNS name
+ tcpma, err = ma.NewMultiaddr(fmt.Sprintf("/dns/%s/tcp/%d", host, port))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ wsma, err := ma.NewMultiaddr("/" + wsa.Scheme)
+ if err != nil {
+ return nil, err
+ }
+
+ return tcpma.Encapsulate(wsma), nil
+}
+
+func parseMultiaddr(maddr ma.Multiaddr) (*url.URL, error) {
+ parsed, err := parseWebsocketMultiaddr(maddr)
+ if err != nil {
+ return nil, err
+ }
+
+ scheme := "ws"
+ if parsed.isWSS {
+ scheme = "wss"
+ }
+
+ network, host, err := manet.DialArgs(parsed.restMultiaddr)
+ if err != nil {
+ return nil, err
+ }
+ switch network {
+ case "tcp", "tcp4", "tcp6":
+ default:
+ return nil, fmt.Errorf("unsupported websocket network %s", network)
+ }
+ return &url.URL{
+ Scheme: scheme,
+ Host: host,
+ }, nil
+}
+
+type parsedWebsocketMultiaddr struct {
+ isWSS bool
+ // sni is the SNI value for the TLS handshake, and for setting HTTP Host header
+ sni *ma.Component
+ // the rest of the multiaddr before the /tls/sni/example.com/ws or /ws or /wss
+ restMultiaddr ma.Multiaddr
+}
+
+func parseWebsocketMultiaddr(a ma.Multiaddr) (parsedWebsocketMultiaddr, error) {
+ out := parsedWebsocketMultiaddr{}
+ // First check if we have a WSS component. If so we'll canonicalize it into a /tls/ws
+ withoutWss := a.Decapsulate(wssComponent.Multiaddr())
+ if !withoutWss.Equal(a) {
+ a = withoutWss.Encapsulate(tlsWsAddr)
+ }
+
+ // Remove the ws component
+ withoutWs := a.Decapsulate(wsComponent.Multiaddr())
+ if withoutWs.Equal(a) {
+ return out, fmt.Errorf("not a websocket multiaddr")
+ }
+
+ rest := withoutWs
+ // If this is not a wss then withoutWs is the rest of the multiaddr
+ out.restMultiaddr = withoutWs
+ for {
+ var head *ma.Component
+ rest, head = ma.SplitLast(rest)
+ if head == nil || len(rest) == 0 {
+ break
+ }
+
+ if head.Protocol().Code == ma.P_SNI {
+ out.sni = head
+ } else if head.Protocol().Code == ma.P_TLS {
+ out.isWSS = true
+ out.restMultiaddr = rest
+ break
+ }
+ }
+
+ return out, nil
+}
diff --git a/p2p/transport/websocket/addrs_test.go b/p2p/transport/websocket/addrs_test.go
new file mode 100644
index 0000000000..1a73c28762
--- /dev/null
+++ b/p2p/transport/websocket/addrs_test.go
@@ -0,0 +1,67 @@
+package websocket
+
+import (
+ "net/url"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+)
+
+func TestMultiaddrParsing(t *testing.T) {
+ addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5555/ws")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wsaddr, err := parseMultiaddr(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if wsaddr.String() != "ws://127.0.0.1:5555" {
+ t.Fatalf("expected ws://127.0.0.1:5555, got %s", wsaddr)
+ }
+}
+
+type httpAddr struct {
+ *url.URL
+}
+
+func (addr *httpAddr) Network() string {
+ return "http"
+}
+
+func TestParseWebsocketNetAddr(t *testing.T) {
+ notWs := &httpAddr{&url.URL{Host: "http://127.0.0.1:1234"}}
+ _, err := ParseWebsocketNetAddr(notWs)
+ if err.Error() != "not a websocket address" {
+ t.Fatalf("expect \"not a websocket address\", got \"%s\"", err)
+ }
+
+ wsAddr := NewAddrWithScheme("127.0.0.1:5555", false)
+ parsed, err := ParseWebsocketNetAddr(wsAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if parsed.String() != "/ip4/127.0.0.1/tcp/5555/ws" {
+ t.Fatalf("expected \"/ip4/127.0.0.1/tcp/5555/ws\", got \"%s\"", parsed.String())
+ }
+}
+
+func TestConvertWebsocketMultiaddrToNetAddr(t *testing.T) {
+ addr, err := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/5555/ws")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wsaddr, err := ConvertWebsocketMultiaddrToNetAddr(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if wsaddr.String() != "ws://127.0.0.1:5555" {
+ t.Fatalf("expected ws://127.0.0.1:5555, got %s", wsaddr)
+ }
+ if wsaddr.Network() != "websocket" {
+ t.Fatalf("expected network: \"websocket\", got \"%s\"", wsaddr.Network())
+ }
+}
diff --git a/p2p/transport/websocket/conn.go b/p2p/transport/websocket/conn.go
new file mode 100644
index 0000000000..c1ee82ef9b
--- /dev/null
+++ b/p2p/transport/websocket/conn.go
@@ -0,0 +1,184 @@
+package websocket
+
+import (
+ "errors"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ ws "github.com/gorilla/websocket"
+)
+
+// GracefulCloseTimeout is the time to wait trying to gracefully close a
+// connection before simply cutting it.
+var GracefulCloseTimeout = 100 * time.Millisecond
+
+// Conn implements net.Conn interface for gorilla/websocket.
+type Conn struct {
+ *ws.Conn
+ Scope network.ConnManagementScope
+ secure bool
+ DefaultMessageType int
+ reader io.Reader
+ closeOnceVal func() error
+ laddr ma.Multiaddr
+ raddr ma.Multiaddr
+
+ readLock, writeLock sync.Mutex
+}
+
+var _ net.Conn = (*Conn)(nil)
+var _ manet.Conn = (*Conn)(nil)
+
+// newConn creates a Conn given a regular gorilla/websocket Conn.
+func newConn(raw *ws.Conn, secure bool, scope network.ConnManagementScope) *Conn {
+ lna := NewAddrWithScheme(raw.LocalAddr().String(), secure)
+ laddr, err := manet.FromNetAddr(lna)
+ if err != nil {
+ log.Error("BUG: invalid localaddr on websocket conn", "local_addr", raw.LocalAddr())
+ return nil
+ }
+
+ rna := NewAddrWithScheme(raw.RemoteAddr().String(), secure)
+ raddr, err := manet.FromNetAddr(rna)
+ if err != nil {
+ log.Error("BUG: invalid remoteaddr on websocket conn", "remote_addr", raw.RemoteAddr())
+ return nil
+ }
+
+ c := &Conn{
+ Conn: raw,
+ Scope: scope,
+ secure: secure,
+ DefaultMessageType: ws.BinaryMessage,
+ laddr: laddr,
+ raddr: raddr,
+ }
+ c.closeOnceVal = sync.OnceValue(c.closeOnceFn)
+ return c
+}
+
+// LocalMultiaddr implements manet.Conn.
+func (c *Conn) LocalMultiaddr() ma.Multiaddr {
+ return c.laddr
+}
+
+// RemoteMultiaddr implements manet.Conn.
+func (c *Conn) RemoteMultiaddr() ma.Multiaddr {
+ return c.raddr
+}
+
+func (c *Conn) Read(b []byte) (int, error) {
+ c.readLock.Lock()
+ defer c.readLock.Unlock()
+
+ if c.reader == nil {
+ if err := c.prepNextReader(); err != nil {
+ return 0, err
+ }
+ }
+
+ for {
+ n, err := c.reader.Read(b)
+ switch err {
+ case io.EOF:
+ c.reader = nil
+
+ if n > 0 {
+ return n, nil
+ }
+
+ if err := c.prepNextReader(); err != nil {
+ return 0, err
+ }
+
+ // explicitly looping
+ default:
+ return n, err
+ }
+ }
+}
+
+func (c *Conn) prepNextReader() error {
+ t, r, err := c.Conn.NextReader()
+ if err != nil {
+ if wserr, ok := err.(*ws.CloseError); ok {
+ if wserr.Code == 1000 || wserr.Code == 1005 {
+ return io.EOF
+ }
+ }
+ return err
+ }
+
+ if t == ws.CloseMessage {
+ return io.EOF
+ }
+
+ c.reader = r
+ return nil
+}
+
+func (c *Conn) Write(b []byte) (n int, err error) {
+ c.writeLock.Lock()
+ defer c.writeLock.Unlock()
+
+ if err := c.Conn.WriteMessage(c.DefaultMessageType, b); err != nil {
+ return 0, err
+ }
+
+ return len(b), nil
+}
+
+// Close closes the connection.
+// subsequent and concurrent calls will return the same error value.
+// This method is thread-safe.
+func (c *Conn) Close() error {
+ return c.closeOnceVal()
+}
+
+func (c *Conn) closeOnceFn() error {
+ err1 := c.Conn.WriteControl(
+ ws.CloseMessage,
+ ws.FormatCloseMessage(ws.CloseNormalClosure, "closed"),
+ time.Now().Add(GracefulCloseTimeout),
+ )
+ err2 := c.Conn.Close()
+ return errors.Join(err1, err2)
+}
+
+func (c *Conn) LocalAddr() net.Addr {
+ return NewAddrWithScheme(c.Conn.LocalAddr().String(), c.secure)
+}
+
+func (c *Conn) RemoteAddr() net.Addr {
+ return NewAddrWithScheme(c.Conn.RemoteAddr().String(), c.secure)
+}
+
+func (c *Conn) SetDeadline(t time.Time) error {
+ if err := c.SetReadDeadline(t); err != nil {
+ return err
+ }
+
+ return c.SetWriteDeadline(t)
+}
+
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ // Don't lock when setting the read deadline. That would prevent us from
+ // interrupting an in-progress read.
+ return c.Conn.SetReadDeadline(t)
+}
+
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ // Unlike the read deadline, we need to lock when setting the write
+ // deadline.
+
+ c.writeLock.Lock()
+ defer c.writeLock.Unlock()
+
+ return c.Conn.SetWriteDeadline(t)
+}
diff --git a/p2p/transport/websocket/listener.go b/p2p/transport/websocket/listener.go
new file mode 100644
index 0000000000..7de1f42cf4
--- /dev/null
+++ b/p2p/transport/websocket/listener.go
@@ -0,0 +1,312 @@
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+
+ ws "github.com/gorilla/websocket"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+var log = logging.Logger("websocket-transport")
+
+type listener struct {
+ netListener *httpNetListener
+ server http.Server
+ wsUpgrader ws.Upgrader
+ // The Go standard library sets the http.Server.TLSConfig no matter if this is a WS or WSS,
+ // so we can't rely on checking if server.TLSConfig is set.
+ isWss bool
+
+ laddr ma.Multiaddr
+
+ incoming chan *Conn
+
+ closeOnce sync.Once
+ closeErr error
+ closed chan struct{}
+ wsurl *url.URL
+}
+
+var _ transport.GatedMaListener = &listener{}
+
+func (pwma *parsedWebsocketMultiaddr) toMultiaddr() ma.Multiaddr {
+ if !pwma.isWSS {
+ return pwma.restMultiaddr.AppendComponent(wsComponent)
+ }
+
+ if pwma.sni == nil {
+ return pwma.restMultiaddr.AppendComponent(tlsComponent, wsComponent)
+ }
+
+ return pwma.restMultiaddr.AppendComponent(tlsComponent, pwma.sni, wsComponent)
+}
+
+// newListener creates a new listener from a raw net.Listener.
+// tlsConf may be nil (for unencrypted websockets).
+func newListener(a ma.Multiaddr, tlsConf *tls.Config, sharedTcp *tcpreuse.ConnMgr, upgrader transport.Upgrader, handshakeTimeout time.Duration) (*listener, error) {
+ parsed, err := parseWebsocketMultiaddr(a)
+ if err != nil {
+ return nil, err
+ }
+
+ if parsed.isWSS && tlsConf == nil {
+ return nil, fmt.Errorf("cannot listen on wss address %s without a tls.Config", a)
+ }
+
+ var gmal transport.GatedMaListener
+ if sharedTcp == nil {
+ mal, err := manet.Listen(parsed.restMultiaddr)
+ if err != nil {
+ return nil, err
+ }
+ gmal = upgrader.GateMaListener(mal)
+ } else {
+ var connType tcpreuse.DemultiplexedConnType
+ if parsed.isWSS {
+ connType = tcpreuse.DemultiplexedConnType_TLS
+ } else {
+ connType = tcpreuse.DemultiplexedConnType_HTTP
+ }
+ gmal, err = sharedTcp.DemultiplexedListen(parsed.restMultiaddr, connType)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // laddr has the correct port in case we listened on port 0
+ laddr := gmal.Multiaddr()
+
+ // Don't resolve dns addresses.
+ // We want to be able to announce domain names, so the peer can validate the TLS certificate.
+ first, _ := ma.SplitFirst(a)
+ if c := first.Protocol().Code; c == ma.P_DNS || c == ma.P_DNS4 || c == ma.P_DNS6 || c == ma.P_DNSADDR {
+ _, last := ma.SplitFirst(laddr)
+ laddr = first.Encapsulate(last)
+ }
+ parsed.restMultiaddr = laddr
+
+ listenAddr := parsed.toMultiaddr()
+ wsurl, err := parseMultiaddr(listenAddr)
+ if err != nil {
+ gmal.Close()
+ return nil, fmt.Errorf("failed to parse multiaddr to URL: %v: %w", listenAddr, err)
+ }
+ ln := &listener{
+ netListener: &httpNetListener{
+ GatedMaListener: gmal,
+ handshakeTimeout: handshakeTimeout,
+ },
+ laddr: parsed.toMultiaddr(),
+ incoming: make(chan *Conn),
+ closed: make(chan struct{}),
+ isWss: parsed.isWSS,
+ wsurl: wsurl,
+ wsUpgrader: ws.Upgrader{
+ // Allow requests from *all* origins.
+ CheckOrigin: func(_ *http.Request) bool {
+ return true
+ },
+ HandshakeTimeout: handshakeTimeout,
+ },
+ }
+ ln.server = http.Server{
+ Handler: ln,
+ ErrorLog: slog.NewLogLogger(log.Handler(), slog.LevelError),
+ ConnContext: ln.ConnContext,
+ TLSConfig: tlsConf,
+ }
+ return ln, nil
+}
+
+func (l *listener) serve() {
+ defer close(l.closed)
+ if !l.isWss {
+ l.server.Serve(l.netListener)
+ } else {
+ l.server.ServeTLS(l.netListener, "", "")
+ }
+}
+
+type connKey struct{}
+
+func (l *listener) ConnContext(ctx context.Context, c net.Conn) context.Context {
+ // prefer `*tls.Conn` over `(interface{NetConn() net.Conn})` in case `manet.Conn` is extended
+ // to support a `NetConn() net.Conn` method.
+ if tc, ok := c.(*tls.Conn); ok {
+ c = tc.NetConn()
+ }
+ if nc, ok := c.(*negotiatingConn); ok {
+ return context.WithValue(ctx, connKey{}, nc)
+ }
+ log.Error("BUG: expected net.Conn of type *websocket.negotiatingConn", "got_type", fmt.Sprintf("%T", c))
+ // might as well close the connection as there's no way to proceed now.
+ c.Close()
+ return ctx
+}
+
+func (l *listener) extractConnFromContext(ctx context.Context) (*negotiatingConn, error) {
+ c := ctx.Value(connKey{})
+ if c == nil {
+ return nil, fmt.Errorf("expected *websocket.negotiatingConn in context: got nil")
+ }
+ nc, ok := c.(*negotiatingConn)
+ if !ok {
+ return nil, fmt.Errorf("expected *websocket.negotiatingConn in context: got %T", c)
+ }
+ return nc, nil
+}
+
+func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ c, err := l.wsUpgrader.Upgrade(w, r, nil)
+ if err != nil {
+ // The upgrader writes a response for us.
+ return
+ }
+ nc, err := l.extractConnFromContext(r.Context())
+ if err != nil {
+ c.Close()
+ w.WriteHeader(500)
+ log.Error("BUG: failed to extract conn from context", "remote_addr", r.RemoteAddr, "err", err)
+ return
+ }
+
+ cs, err := nc.Unwrap()
+ if err != nil {
+ c.Close()
+ w.WriteHeader(500)
+ log.Debug("connection timed out", "remote_addr", r.RemoteAddr)
+ return
+ }
+
+ conn := newConn(c, l.isWss, cs.Scope)
+ if conn == nil {
+ c.Close()
+ w.WriteHeader(500)
+ return
+ }
+
+ select {
+ case l.incoming <- conn:
+ case <-l.closed:
+ conn.Close()
+ }
+ // The connection has been hijacked, it's safe to return.
+}
+
+func (l *listener) Accept() (manet.Conn, network.ConnManagementScope, error) {
+ select {
+ case c, ok := <-l.incoming:
+ if !ok {
+ return nil, nil, transport.ErrListenerClosed
+ }
+ return c, c.Scope, nil
+ case <-l.closed:
+ return nil, nil, transport.ErrListenerClosed
+ }
+}
+
+func (l *listener) Addr() net.Addr {
+ return &Addr{URL: l.wsurl}
+}
+
+func (l *listener) Close() error {
+ l.closeOnce.Do(func() {
+ err1 := l.netListener.Close()
+ err2 := l.server.Close()
+ <-l.closed
+ l.closeErr = errors.Join(err1, err2)
+ })
+ return l.closeErr
+}
+
+func (l *listener) Multiaddr() ma.Multiaddr {
+ return l.laddr
+}
+
+// httpNetListener is a net.Listener that adapts a transport.GatedMaListener to a net.Listener.
+// It wraps the manet.Conn, and the Scope from the underlying gated listener in a connWithScope.
+type httpNetListener struct {
+ transport.GatedMaListener
+ handshakeTimeout time.Duration
+}
+
+var _ net.Listener = &httpNetListener{}
+
+func (l *httpNetListener) Accept() (net.Conn, error) {
+ conn, scope, err := l.GatedMaListener.Accept()
+ if err != nil {
+ if scope != nil {
+ log.Error("BUG: scope non-nil when err is non nil", "error", err)
+ scope.Done()
+ }
+ return nil, err
+ }
+ connWithScope := connWithScope{
+ Conn: conn,
+ Scope: scope,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), l.handshakeTimeout)
+ return &negotiatingConn{
+ connWithScope: connWithScope,
+ ctx: ctx,
+ cancelCtx: cancel,
+ stopClose: context.AfterFunc(ctx, func() {
+ connWithScope.Close()
+ log.Debug("handshake timeout for conn", "remote_addr", conn.RemoteAddr())
+ }),
+ }, nil
+}
+
+type connWithScope struct {
+ net.Conn
+ Scope network.ConnManagementScope
+}
+
+func (c connWithScope) Close() error {
+ c.Scope.Done()
+ return c.Conn.Close()
+}
+
+type negotiatingConn struct {
+ connWithScope
+ ctx context.Context
+ cancelCtx context.CancelFunc
+ stopClose func() bool
+}
+
+// Close closes the negotiating conn and the underlying connWithScope
+// This will be called in case the tls handshake or websocket upgrade fails.
+func (c *negotiatingConn) Close() error {
+ defer c.cancelCtx()
+ if c.stopClose != nil {
+ c.stopClose()
+ }
+ return c.connWithScope.Close()
+}
+
+func (c *negotiatingConn) Unwrap() (connWithScope, error) {
+ defer c.cancelCtx()
+ if c.stopClose != nil {
+ if !c.stopClose() {
+ return connWithScope{}, errors.New("timed out")
+ }
+ c.stopClose = nil
+ }
+ return c.connWithScope, nil
+}
diff --git a/p2p/transport/websocket/websocket.go b/p2p/transport/websocket/websocket.go
new file mode 100644
index 0000000000..3a6badac5d
--- /dev/null
+++ b/p2p/transport/websocket/websocket.go
@@ -0,0 +1,290 @@
+// Package websocket implements a websocket based transport for go-libp2p.
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcpreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ mafmt "github.com/multiformats/go-multiaddr-fmt"
+ manet "github.com/multiformats/go-multiaddr/net"
+
+ ws "github.com/gorilla/websocket"
+)
+
+// WsFmt is multiaddr formatter for WsProtocol
+var WsFmt = mafmt.And(mafmt.TCP, mafmt.Base(ma.P_WS))
+
+var dialMatcher = mafmt.And(
+ mafmt.Or(mafmt.IP, mafmt.DNS),
+ mafmt.Base(ma.P_TCP),
+ mafmt.Or(
+ mafmt.Base(ma.P_WS),
+ mafmt.And(
+ mafmt.Or(
+ mafmt.And(
+ mafmt.Base(ma.P_TLS),
+ mafmt.Base(ma.P_SNI)),
+ mafmt.Base(ma.P_TLS),
+ ),
+ mafmt.Base(ma.P_WS)),
+ mafmt.Base(ma.P_WSS)))
+
+var (
+ wssComponent, _ = ma.NewComponent("wss", "")
+ tlsComponent, _ = ma.NewComponent("tls", "")
+ wsComponent, _ = ma.NewComponent("ws", "")
+ tlsWsAddr = ma.Multiaddr{*tlsComponent, *wsComponent}
+)
+
+func init() {
+ manet.RegisterFromNetAddr(ParseWebsocketNetAddr, "websocket")
+ manet.RegisterToNetAddr(ConvertWebsocketMultiaddrToNetAddr, "ws")
+ manet.RegisterToNetAddr(ConvertWebsocketMultiaddrToNetAddr, "wss")
+}
+
+type Option func(*WebsocketTransport) error
+
+// WithTLSClientConfig sets a TLS client configuration on the WebSocket Dialer. Only
+// relevant for non-browser usages.
+//
+// Some useful use cases include setting InsecureSkipVerify to `true`, or
+// setting user-defined trusted CA certificates.
+func WithTLSClientConfig(c *tls.Config) Option {
+ return func(t *WebsocketTransport) error {
+ t.tlsClientConf = c
+ return nil
+ }
+}
+
+// WithTLSConfig sets a TLS configuration for the WebSocket listener.
+func WithTLSConfig(conf *tls.Config) Option {
+ return func(t *WebsocketTransport) error {
+ t.tlsConf = conf
+ return nil
+ }
+}
+
+var defaultHandshakeTimeout = 15 * time.Second
+
+// WithHandshakeTimeout sets a timeout for the websocket upgrade.
+func WithHandshakeTimeout(timeout time.Duration) Option {
+ return func(t *WebsocketTransport) error {
+ t.handshakeTimeout = timeout
+ return nil
+ }
+}
+
+// WebsocketTransport is the actual go-libp2p transport
+type WebsocketTransport struct {
+ upgrader transport.Upgrader
+ rcmgr network.ResourceManager
+ tlsClientConf *tls.Config
+ tlsConf *tls.Config
+ sharedTcp *tcpreuse.ConnMgr
+ handshakeTimeout time.Duration
+}
+
+var _ transport.Transport = (*WebsocketTransport)(nil)
+
+func New(u transport.Upgrader, rcmgr network.ResourceManager, sharedTCP *tcpreuse.ConnMgr, opts ...Option) (*WebsocketTransport, error) {
+ if rcmgr == nil {
+ rcmgr = &network.NullResourceManager{}
+ }
+ t := &WebsocketTransport{
+ upgrader: u,
+ rcmgr: rcmgr,
+ tlsClientConf: &tls.Config{},
+ sharedTcp: sharedTCP,
+ handshakeTimeout: defaultHandshakeTimeout,
+ }
+ for _, opt := range opts {
+ if err := opt(t); err != nil {
+ return nil, err
+ }
+ }
+ return t, nil
+}
+
+func (t *WebsocketTransport) CanDial(a ma.Multiaddr) bool {
+ return dialMatcher.Matches(a)
+}
+
+func (t *WebsocketTransport) Protocols() []int {
+ return []int{ma.P_WS, ma.P_WSS}
+}
+
+func (t *WebsocketTransport) Proxy() bool {
+ return false
+}
+
+func (t *WebsocketTransport) Resolve(_ context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) {
+ parsed, err := parseWebsocketMultiaddr(maddr)
+ if err != nil {
+ return nil, err
+ }
+
+ if !parsed.isWSS {
+ // No /tls/ws component, this isn't a secure websocket multiaddr. We can just return it here
+ return []ma.Multiaddr{maddr}, nil
+ }
+
+ if parsed.sni == nil {
+ var err error
+ // We don't have an sni component, we'll use dns
+ loop:
+ for _, c := range parsed.restMultiaddr {
+ switch c.Protocol().Code {
+ case ma.P_DNS, ma.P_DNS4, ma.P_DNS6:
+ // err shouldn't happen since this means we couldn't parse a dns hostname for an sni value.
+ parsed.sni, err = ma.NewComponent("sni", c.Value())
+ break loop
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if parsed.sni == nil {
+ // we didn't find anything to set the sni with. So we just return the given multiaddr
+ return []ma.Multiaddr{maddr}, nil
+ }
+
+ return []ma.Multiaddr{parsed.toMultiaddr()}, nil
+}
+
+// Dial will dial the given multiaddr and expect the given peer. If an
+// HTTPS_PROXY env is set, it will use that for the dial out.
+func (t *WebsocketTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (transport.CapableConn, error) {
+ connScope, err := t.rcmgr.OpenConnection(network.DirOutbound, true, raddr)
+ if err != nil {
+ return nil, err
+ }
+ c, err := t.dialWithScope(ctx, raddr, p, connScope)
+ if err != nil {
+ connScope.Done()
+ return nil, err
+ }
+ return c, nil
+}
+
+func (t *WebsocketTransport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) {
+ macon, err := t.maDial(ctx, raddr, connScope)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := t.upgrader.Upgrade(ctx, t, macon, network.DirOutbound, p, connScope)
+ if err != nil {
+ return nil, err
+ }
+ return &capableConn{CapableConn: conn}, nil
+}
+
+func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr, scope network.ConnManagementScope) (manet.Conn, error) {
+ wsurl, err := parseMultiaddr(raddr)
+ if err != nil {
+ return nil, err
+ }
+ isWss := wsurl.Scheme == "wss"
+ dialer := ws.Dialer{
+ HandshakeTimeout: t.handshakeTimeout,
+ // Inherit the default proxy behavior
+ Proxy: ws.DefaultDialer.Proxy,
+ }
+ if isWss {
+ sni := ""
+ sni, err = raddr.ValueForProtocol(ma.P_SNI)
+ if err != nil {
+ sni = ""
+ }
+
+ if sni != "" {
+ copytlsClientConf := t.tlsClientConf.Clone()
+ copytlsClientConf.ServerName = sni
+ dialer.TLSClientConfig = copytlsClientConf
+ ipPortAddr := wsurl.Host
+ // We set the `.Host` to the sni field so that the host header gets properly set.
+ wsurl.Host = sni + ":" + wsurl.Port()
+ // Setting the NetDial because we already have the resolved IP address, so we can avoid another resolution.
+ dialer.NetDial = func(network, address string) (net.Conn, error) {
+ var tcpAddr *net.TCPAddr
+ var err error
+ if address == wsurl.Host {
+ tcpAddr, err = net.ResolveTCPAddr(network, ipPortAddr) // Use our already resolved IP address
+ } else {
+ tcpAddr, err = net.ResolveTCPAddr(network, address)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return net.DialTCP("tcp", nil, tcpAddr)
+ }
+ } else {
+ dialer.TLSClientConfig = t.tlsClientConf
+ }
+ }
+
+ wscon, _, err := dialer.DialContext(ctx, wsurl.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ mnc, err := manet.WrapNetConn(newConn(wscon, isWss, scope))
+ if err != nil {
+ wscon.Close()
+ return nil, err
+ }
+ return mnc, nil
+}
+
+func (t *WebsocketTransport) gatedMaListen(a ma.Multiaddr) (transport.GatedMaListener, error) {
+ var tlsConf *tls.Config
+ if t.tlsConf != nil {
+ tlsConf = t.tlsConf.Clone()
+ }
+ l, err := newListener(a, tlsConf, t.sharedTcp, t.upgrader, t.handshakeTimeout)
+ if err != nil {
+ return nil, err
+ }
+ go l.serve()
+ return l, nil
+}
+
+func (t *WebsocketTransport) Listen(a ma.Multiaddr) (transport.Listener, error) {
+ gmal, err := t.gatedMaListen(a)
+ if err != nil {
+ return nil, err
+ }
+ return &transportListener{Listener: t.upgrader.UpgradeGatedMaListener(t, gmal)}, nil
+}
+
+// transportListener wraps a transport.Listener to provide connections with a `ConnState() network.ConnectionState` method.
+type transportListener struct {
+ transport.Listener
+}
+
+type capableConn struct {
+ transport.CapableConn
+}
+
+func (c *capableConn) ConnState() network.ConnectionState {
+ cs := c.CapableConn.ConnState()
+ cs.Transport = "websocket"
+ return cs
+}
+
+func (l *transportListener) Accept() (transport.CapableConn, error) {
+ conn, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ return &capableConn{CapableConn: conn}, nil
+}
diff --git a/p2p/transport/websocket/websocket_test.go b/p2p/transport/websocket/websocket_test.go
new file mode 100644
index 0000000000..fb60744385
--- /dev/null
+++ b/p2p/transport/websocket/websocket_test.go
@@ -0,0 +1,742 @@
+package websocket
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ gws "github.com/gorilla/websocket"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/sec"
+ "github.com/libp2p/go-libp2p/core/sec/insecure"
+ "github.com/libp2p/go-libp2p/core/test"
+ "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ tptu "github.com/libp2p/go-libp2p/p2p/net/upgrader"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ ttransport "github.com/libp2p/go-libp2p/p2p/transport/testsuite"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func newUpgrader(t *testing.T) (peer.ID, transport.Upgrader) {
+ t.Helper()
+ id, m := newInsecureMuxer(t)
+ u, err := tptu.New(m, []tptu.StreamMuxer{{ID: "/yamux", Muxer: yamux.DefaultTransport}}, nil, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return id, u
+}
+
+func newSecureUpgrader(t *testing.T) (peer.ID, transport.Upgrader) {
+ t.Helper()
+ id, m := newSecureMuxer(t)
+ u, err := tptu.New(m, []tptu.StreamMuxer{{ID: "/yamux", Muxer: yamux.DefaultTransport}}, nil, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return id, u
+}
+
+func newInsecureMuxer(t *testing.T) (peer.ID, []sec.SecureTransport) {
+ t.Helper()
+ priv, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(priv)
+ require.NoError(t, err)
+ return id, []sec.SecureTransport{insecure.NewWithIdentity(insecure.ID, id, priv)}
+}
+
+func newSecureMuxer(t *testing.T) (peer.ID, []sec.SecureTransport) {
+ t.Helper()
+ priv, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ t.Fatal(err)
+ }
+ noiseTpt, err := noise.New(noise.ID, priv, nil)
+ require.NoError(t, err)
+ return id, []sec.SecureTransport{noiseTpt}
+}
+
+func lastComponent(t *testing.T, a ma.Multiaddr) *ma.Component {
+ t.Helper()
+ _, wscomponent := ma.SplitLast(a)
+ require.NotNil(t, wscomponent)
+ if wscomponent.Equal(wsComponent) {
+ return wsComponent
+ }
+ if wscomponent.Equal(wssComponent) {
+ return wssComponent
+ }
+ t.Fatal("expected a ws or wss component")
+ return nil
+}
+
+func generateTLSConfig(t *testing.T) *tls.Config {
+ t.Helper()
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ require.NoError(t, err)
+ tmpl := &x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{},
+ SignatureAlgorithm: x509.SHA256WithRSA,
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour), // valid for an hour
+ BasicConstraintsValid: true,
+ }
+ certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, priv.Public(), priv)
+ require.NoError(t, err)
+ return &tls.Config{
+ Certificates: []tls.Certificate{{
+ PrivateKey: priv,
+ Certificate: [][]byte{certDER},
+ }},
+ }
+}
+
+func TestCanDial(t *testing.T) {
+ d := &WebsocketTransport{}
+ if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/ws")) {
+ t.Fatal("expected to match websocket maddr, but did not")
+ }
+ if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/wss")) {
+ t.Fatal("expected to match secure websocket maddr, but did not")
+ }
+ if d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555")) {
+ t.Fatal("expected to not match tcp maddr, but did")
+ }
+ if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/tls/ws")) {
+ t.Fatal("expected to match secure websocket maddr, but did not")
+ }
+ if !d.CanDial(ma.StringCast("/ip4/127.0.0.1/tcp/5555/tls/sni/example.com/ws")) {
+ t.Fatal("expected to match secure websocket maddr with sni, but did not")
+ }
+ if !d.CanDial(ma.StringCast("/dns4/example.com/tcp/5555/tls/sni/example.com/ws")) {
+ t.Fatal("expected to match secure websocket maddr with sni, but did not")
+ }
+ if !d.CanDial(ma.StringCast("/dnsaddr/example.com/tcp/5555/tls/sni/example.com/ws")) {
+ t.Fatal("expected to match secure websocket maddr with sni, but did not")
+ }
+}
+
+// testWSSServer returns a client hello info
+func testWSSServer(t *testing.T, listenAddr ma.Multiaddr) (ma.Multiaddr, peer.ID, chan error) {
+ errChan := make(chan error, 1)
+
+ ip := net.ParseIP("::")
+ tlsConf := getTLSConf(t, ip, time.Now(), time.Now().Add(time.Hour))
+ tlsConf.GetConfigForClient = func(chi *tls.ClientHelloInfo) (*tls.Config, error) {
+ if chi.ServerName != "example.com" {
+ errChan <- fmt.Errorf("didn't get the expected sni")
+ }
+ return tlsConf, nil
+ }
+
+ id, u := newSecureUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil, WithTLSConfig(tlsConf))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ l, err := tpt.Listen(listenAddr)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ l.Close()
+ })
+ go func() {
+ conn, err := l.Accept()
+ if err != nil {
+ errChan <- fmt.Errorf("error in accepting conn: %w", err)
+ return
+ }
+ defer conn.Close()
+
+ strm, err := conn.AcceptStream()
+ if err != nil {
+ errChan <- fmt.Errorf("error in accepting stream: %w", err)
+ return
+ }
+ defer strm.Close()
+ close(errChan)
+ }()
+
+ return l.Multiaddr(), id, errChan
+}
+
+func getTLSConf(t *testing.T, ip net.IP, start, end time.Time) *tls.Config {
+ t.Helper()
+ certTempl := &x509.Certificate{
+ SerialNumber: big.NewInt(1234),
+ Subject: pkix.Name{Organization: []string{"websocket"}},
+ NotBefore: start,
+ NotAfter: end,
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IPAddresses: []net.IP{ip},
+ }
+ priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ caBytes, err := x509.CreateCertificate(rand.Reader, certTempl, certTempl, &priv.PublicKey, priv)
+ require.NoError(t, err)
+ cert, err := x509.ParseCertificate(caBytes)
+ require.NoError(t, err)
+ return &tls.Config{
+ Certificates: []tls.Certificate{{
+ Certificate: [][]byte{cert.Raw},
+ PrivateKey: priv,
+ Leaf: cert,
+ }},
+ }
+}
+
+func TestHostHeaderWss(t *testing.T) {
+ server := &http.Server{}
+ l, err := net.Listen("tcp", ":0")
+ require.NoError(t, err)
+ defer server.Close()
+
+ errChan := make(chan error, 1)
+ go func() {
+ server.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ defer close(errChan)
+ if !strings.Contains(r.Host, "example.com") {
+ errChan <- errors.New("Didn't see host header")
+ }
+ w.WriteHeader(http.StatusNotFound)
+ })
+ server.TLSConfig = getTLSConf(t, net.ParseIP("127.0.0.1"), time.Now(), time.Now().Add(time.Hour))
+ server.ServeTLS(l, "", "")
+ }()
+
+ _, port, err := net.SplitHostPort(l.Addr().String())
+ require.NoError(t, err)
+ serverMA := ma.StringCast("/ip4/127.0.0.1/tcp/" + port + "/tls/sni/example.com/ws")
+
+ tlsConfig := &tls.Config{InsecureSkipVerify: true} // Our test server doesn't have a cert signed by a CA
+ _, u := newSecureUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil, WithTLSClientConfig(tlsConfig))
+ require.NoError(t, err)
+
+ masToDial, err := tpt.Resolve(context.Background(), serverMA)
+ require.NoError(t, err)
+
+ _, err = tpt.Dial(context.Background(), masToDial[0], test.RandPeerIDFatal(t))
+ require.Error(t, err)
+
+ err = <-errChan
+ require.NoError(t, err)
+}
+
+func TestDialWss(t *testing.T) {
+ serverMA, rid, errChan := testWSSServer(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/tls/sni/example.com/ws"))
+ require.Contains(t, serverMA.String(), "tls")
+
+ tlsConfig := &tls.Config{InsecureSkipVerify: true} // Our test server doesn't have a cert signed by a CA
+ _, u := newSecureUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil, WithTLSClientConfig(tlsConfig))
+ require.NoError(t, err)
+
+ masToDial, err := tpt.Resolve(context.Background(), serverMA)
+ require.NoError(t, err)
+
+ conn, err := tpt.Dial(context.Background(), masToDial[0], rid)
+ require.NoError(t, err)
+ defer conn.Close()
+
+ stream, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ defer stream.Close()
+
+ err = <-errChan
+ require.NoError(t, err)
+}
+
+func TestDialWssNoClientCert(t *testing.T) {
+ serverMA, rid, _ := testWSSServer(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/tls/sni/example.com/ws"))
+ require.Contains(t, serverMA.String(), "tls")
+
+ _, u := newSecureUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+
+ masToDial, err := tpt.Resolve(context.Background(), serverMA)
+ require.NoError(t, err)
+
+ _, err = tpt.Dial(context.Background(), masToDial[0], rid)
+ require.Error(t, err)
+
+ // The server doesn't have a signed certificate
+ require.Contains(t, err.Error(), "x509")
+}
+
+func TestWebsocketTransport(t *testing.T) {
+ t.Run("/ws", func(t *testing.T) {
+ peerA, ua := newUpgrader(t)
+ ta, err := New(ua, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ peerB, ub := newUpgrader(t)
+ tb, err := New(ub, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ttransport.SubtestTransport(t, ta, tb, "/ip4/127.0.0.1/tcp/0/ws", peerA)
+ ttransport.SubtestTransport(t, tb, ta, "/ip4/127.0.0.1/tcp/0/ws", peerB)
+
+ })
+ t.Run("/wss", func(t *testing.T) {
+ peerA, ua := newUpgrader(t)
+ tca := generateTLSConfig(t)
+ ta, err := New(ua, nil, nil, WithTLSConfig(tca), WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true}))
+ if err != nil {
+ t.Fatal(err)
+ }
+ peerB, ub := newUpgrader(t)
+ tcb := generateTLSConfig(t)
+ tb, err := New(ub, nil, nil, WithTLSConfig(tcb), WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true}))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ttransport.SubtestTransport(t, ta, tb, "/ip4/127.0.0.1/tcp/0/wss", peerA)
+ ttransport.SubtestTransport(t, tb, ta, "/ip4/127.0.0.1/tcp/0/ws", peerB)
+ })
+}
+
+func isWSS(addr ma.Multiaddr) bool {
+ if _, err := addr.ValueForProtocol(ma.P_WSS); err == nil {
+ return true
+ }
+ if _, err := addr.ValueForProtocol(ma.P_WS); err == nil {
+ return false
+ }
+ panic("not a WebSocket address")
+}
+
+func connectAndExchangeData(t *testing.T, laddr ma.Multiaddr, secure bool) {
+ var opts []Option
+ var tlsConf *tls.Config
+ if secure {
+ tlsConf = generateTLSConfig(t)
+ opts = append(opts, WithTLSConfig(tlsConf))
+ }
+ server, u := newUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil, opts...)
+ require.NoError(t, err)
+ l, err := tpt.Listen(laddr)
+ require.NoError(t, err)
+ if secure {
+ require.Contains(t, l.Multiaddr().String(), "tls")
+ } else {
+ require.Equal(t, lastComponent(t, l.Multiaddr()).String(), wsComponent.String())
+ }
+ defer l.Close()
+
+ msg := []byte("HELLO WORLD")
+
+ go func() {
+ var opts []Option
+ if secure {
+ opts = append(opts, WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true}))
+ }
+ _, u := newUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil, opts...)
+ require.NoError(t, err)
+ c, err := tpt.Dial(context.Background(), l.Multiaddr(), server)
+ require.NoError(t, err)
+ require.Equal(t, secure, isWSS(c.LocalMultiaddr()))
+ require.Equal(t, secure, isWSS(c.RemoteMultiaddr()))
+ str, err := c.OpenStream(context.Background())
+ require.NoError(t, err)
+ defer str.Close()
+ _, err = str.Write(msg)
+ require.NoError(t, err)
+ }()
+
+ c, err := l.Accept()
+ require.NoError(t, err)
+ defer c.Close()
+ require.Equal(t, secure, isWSS(c.LocalMultiaddr()))
+ require.Equal(t, secure, isWSS(c.RemoteMultiaddr()))
+ str, err := c.AcceptStream()
+ require.NoError(t, err)
+ defer str.Close()
+
+ out, err := io.ReadAll(str)
+ require.NoError(t, err)
+ require.Equal(t, out, msg, "got wrong message")
+}
+
+func TestWebsocketConnection(t *testing.T) {
+ t.Run("unencrypted", func(t *testing.T) {
+ connectAndExchangeData(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"), false)
+ })
+ t.Run("encrypted", func(t *testing.T) {
+ connectAndExchangeData(t, ma.StringCast("/ip4/127.0.0.1/tcp/0/wss"), true)
+ })
+}
+
+func TestWebsocketListenSecureFailWithoutTLSConfig(t *testing.T) {
+ _, u := newUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ addr := ma.StringCast("/ip4/127.0.0.1/tcp/0/wss")
+ _, err = tpt.Listen(addr)
+ require.EqualError(t, err, fmt.Sprintf("cannot listen on wss address %s without a tls.Config", addr))
+}
+
+func TestWebsocketListenSecureAndInsecure(t *testing.T) {
+ serverID, serverUpgrader := newUpgrader(t)
+ server, err := New(serverUpgrader, &network.NullResourceManager{}, nil, WithTLSConfig(generateTLSConfig(t)))
+ require.NoError(t, err)
+
+ lnInsecure, err := server.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"))
+ require.NoError(t, err)
+ lnSecure, err := server.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0/wss"))
+ require.NoError(t, err)
+
+ t.Run("insecure", func(t *testing.T) {
+ _, clientUpgrader := newUpgrader(t)
+ client, err := New(clientUpgrader, &network.NullResourceManager{}, nil, WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true}))
+ require.NoError(t, err)
+
+ // dialing the insecure address should succeed
+ conn, err := client.Dial(context.Background(), lnInsecure.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ require.Equal(t, lastComponent(t, conn.RemoteMultiaddr()).String(), wsComponent.String())
+ require.Equal(t, lastComponent(t, conn.LocalMultiaddr()).String(), wsComponent.String())
+
+ // dialing the secure address should fail
+ _, err = client.Dial(context.Background(), lnSecure.Multiaddr(), serverID)
+ require.NoError(t, err)
+ })
+
+ t.Run("secure", func(t *testing.T) {
+ _, clientUpgrader := newUpgrader(t)
+ client, err := New(clientUpgrader, &network.NullResourceManager{}, nil, WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true}))
+ require.NoError(t, err)
+
+ // dialing the insecure address should succeed
+ conn, err := client.Dial(context.Background(), lnSecure.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ require.Equal(t, lastComponent(t, conn.RemoteMultiaddr()).String(), wssComponent.String())
+ require.Equal(t, lastComponent(t, conn.LocalMultiaddr()).String(), wssComponent.String())
+
+ // dialing the insecure address should fail
+ _, err = client.Dial(context.Background(), lnInsecure.Multiaddr(), serverID)
+ require.NoError(t, err)
+ })
+}
+
+func TestConcurrentClose(t *testing.T) {
+ _, u := newUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil)
+ require.NoError(t, err)
+ l, err := tpt.gatedMaListen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ msg := []byte("HELLO WORLD")
+
+ go func() {
+ for i := 0; i < 100; i++ {
+ c, err := tpt.maDial(context.Background(), l.Multiaddr(), &network.NullScope{})
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ go func() {
+ _, _ = c.Write(msg)
+ }()
+ go func() {
+ _ = c.Close()
+ }()
+ }
+ }()
+
+ for i := 0; i < 100; i++ {
+ c, _, err := l.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ c.Close()
+ }
+}
+
+func TestWriteZero(t *testing.T) {
+ _, u := newUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ l, err := tpt.gatedMaListen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+
+ msg := []byte(nil)
+
+ go func() {
+ c, err := tpt.maDial(context.Background(), l.Multiaddr(), &network.NullScope{})
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ defer c.Close()
+
+ for i := 0; i < 100; i++ {
+ n, err := c.Write(msg)
+ if n != 0 {
+ t.Errorf("expected to write 0 bytes, wrote %d", n)
+ }
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ }
+ }()
+
+ c, _, err := l.Accept()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer c.Close()
+ buf := make([]byte, 100)
+ n, err := c.Read(buf)
+ if n != 0 {
+ t.Errorf("read %d bytes, expected 0", n)
+ }
+ if err != io.EOF {
+ t.Errorf("expected EOF, got err: %s", err)
+ }
+}
+
+func TestResolveMultiaddr(t *testing.T) {
+ // map[unresolved]resolved
+ testCases := map[string]string{
+ "/dns/example.com/tcp/1234/wss": "/dns/example.com/tcp/1234/tls/sni/example.com/ws",
+ "/dns4/example.com/tcp/1234/wss": "/dns4/example.com/tcp/1234/tls/sni/example.com/ws",
+ "/dns6/example.com/tcp/1234/wss": "/dns6/example.com/tcp/1234/tls/sni/example.com/ws",
+ "/dnsaddr/example.com/tcp/1234/wss": "/dnsaddr/example.com/tcp/1234/wss",
+ "/dns4/example.com/tcp/1234/tls/ws": "/dns4/example.com/tcp/1234/tls/sni/example.com/ws",
+ "/dns6/example.com/tcp/1234/tls/ws": "/dns6/example.com/tcp/1234/tls/sni/example.com/ws",
+ "/dnsaddr/example.com/tcp/1234/tls/ws": "/dnsaddr/example.com/tcp/1234/tls/ws",
+ }
+
+ for unresolved, expectedMA := range testCases {
+ t.Run(unresolved, func(t *testing.T) {
+
+ m1 := ma.StringCast(unresolved)
+ wsTpt := WebsocketTransport{}
+ ctx := context.Background()
+
+ addrs, err := wsTpt.Resolve(ctx, m1)
+ require.NoError(t, err)
+ require.Len(t, addrs, 1)
+
+ require.Equal(t, expectedMA, addrs[0].String())
+ })
+ }
+}
+
+func TestSocksProxy(t *testing.T) {
+ testCases := []string{
+ "/ip4/1.2.3.4/tcp/1/ws", // No TLS
+ "/ip4/1.2.3.4/tcp/1/tls/ws", // TLS no SNI
+ "/ip4/1.2.3.4/tcp/1/tls/sni/example.com/ws", // TLS with an SNI
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc, func(t *testing.T) {
+ proxyServer, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+ proxyServerErr := make(chan error, 1)
+
+ go func() {
+ defer proxyServer.Close()
+ c, err := proxyServer.Accept()
+ if err != nil {
+ proxyServerErr <- err
+ return
+ }
+ defer c.Close()
+
+ req := [32]byte{}
+ _, err = io.ReadFull(c, req[:3])
+ if err != nil {
+ proxyServerErr <- err
+ return
+ }
+
+ // Handshake a SOCKS5 client: https://www.rfc-editor.org/rfc/rfc1928.html#section-3
+ if !bytes.Equal([]byte{0x05, 0x01, 0x00}, req[:3]) {
+ t.Log("expected SOCKS5 connect request")
+ proxyServerErr <- err
+ return
+ }
+ _, err = c.Write([]byte{0x05, 0x00})
+ if err != nil {
+ proxyServerErr <- err
+ return
+ }
+
+ proxyServerErr <- nil
+ }()
+
+ orig := gws.DefaultDialer.Proxy
+ defer func() { gws.DefaultDialer.Proxy = orig }()
+
+ proxyUrl, err := url.Parse("socks5://" + proxyServer.Addr().String())
+ require.NoError(t, err)
+ gws.DefaultDialer.Proxy = http.ProxyURL(proxyUrl)
+
+ tlsConfig := &tls.Config{InsecureSkipVerify: true} // Our test server doesn't have a cert signed by a CA
+ _, u := newSecureUpgrader(t)
+ tpt, err := New(u, &network.NullResourceManager{}, nil, WithTLSClientConfig(tlsConfig))
+ require.NoError(t, err)
+
+ // This can be any wss address. We aren't actually going to dial it.
+ maToDial := ma.StringCast(tc)
+ _, err = tpt.Dial(context.Background(), maToDial, "")
+ require.ErrorContains(t, err, "failed to read connect reply from SOCKS5 proxy", "This should error as we don't have a real socks server")
+
+ select {
+ case <-time.After(1 * time.Second):
+ case err := <-proxyServerErr:
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ })
+ }
+}
+
+func TestListenerAddr(t *testing.T) {
+ _, upgrader := newUpgrader(t)
+ transport, err := New(upgrader, &network.NullResourceManager{}, nil, WithTLSConfig(generateTLSConfig(t)))
+ require.NoError(t, err)
+ l1, err := transport.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"))
+ require.NoError(t, err)
+ defer l1.Close()
+ require.Regexp(t, `^ws://127\.0\.0\.1:[\d]+$`, l1.Addr().String())
+ l2, err := transport.Listen(ma.StringCast("/ip4/127.0.0.1/tcp/0/wss"))
+ require.NoError(t, err)
+ defer l2.Close()
+ require.Regexp(t, `^wss://127\.0\.0\.1:[\d]+$`, l2.Addr().String())
+}
+func TestHandshakeTimeout(t *testing.T) {
+ handshakeTimeout := 200 * time.Millisecond
+ _, upgrader := newUpgrader(t)
+ tlsconf := generateTLSConfig(t)
+ transport, err := New(upgrader, &network.NullResourceManager{}, nil, WithHandshakeTimeout(handshakeTimeout), WithTLSConfig(tlsconf))
+ require.NoError(t, err)
+
+ fastWSDialer := gws.Dialer{
+ HandshakeTimeout: 10 * handshakeTimeout,
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ NetDial: func(_, addr string) (net.Conn, error) {
+ tcpConn, err := net.Dial("tcp", addr)
+ if !assert.NoError(t, err) {
+ return nil, err
+ }
+ return tcpConn, nil
+ },
+ }
+
+ slowWSDialer := gws.Dialer{
+ HandshakeTimeout: 10 * handshakeTimeout,
+ NetDial: func(_, addr string) (net.Conn, error) {
+ tcpConn, err := net.Dial("tcp", addr)
+ if !assert.NoError(t, err) {
+ return nil, err
+ }
+ // wait to simulate a slow handshake
+ time.Sleep(2 * handshakeTimeout)
+ return tcpConn, nil
+ },
+ TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
+ }
+ t.Run("ws", func(t *testing.T) {
+ // test the gatedMaListener as we're interested in the websocket handshake timeout and not the upgrader steps.
+ wsListener, err := transport.gatedMaListen(ma.StringCast("/ip4/127.0.0.1/tcp/0/ws"))
+ require.NoError(t, err)
+ defer wsListener.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*handshakeTimeout)
+ defer cancel()
+ conn, resp, err := fastWSDialer.DialContext(ctx, wsListener.Addr().String(), nil)
+ if !assert.NoError(t, err) {
+ return
+ }
+ conn.Close()
+ resp.Body.Close()
+
+ ctx, cancel = context.WithTimeout(context.Background(), 10*handshakeTimeout)
+ defer cancel()
+ conn, resp, err = slowWSDialer.DialContext(ctx, wsListener.Addr().String(), nil)
+ if err == nil {
+ conn.Close()
+ resp.Body.Close()
+ t.Fatal("should error as the handshake will time out")
+ }
+ })
+
+ t.Run("wss", func(t *testing.T) {
+ // test the gatedMaListener as we're interested in the websocket handshake timeout and not the upgrader steps.
+ wsListener, err := transport.gatedMaListen(ma.StringCast("/ip4/127.0.0.1/tcp/0/wss"))
+ require.NoError(t, err)
+ defer wsListener.Close()
+
+ // Test that the normal dial works fine
+ ctx, cancel := context.WithTimeout(context.Background(), 10*handshakeTimeout)
+ defer cancel()
+ wsConn, resp, err := fastWSDialer.DialContext(ctx, wsListener.Addr().String(), nil)
+ require.NoError(t, err)
+ wsConn.Close()
+ resp.Body.Close()
+
+ ctx, cancel = context.WithTimeout(context.Background(), 10*handshakeTimeout)
+ defer cancel()
+ wsConn, resp, err = slowWSDialer.DialContext(ctx, wsListener.Addr().String(), nil)
+ if err == nil {
+ wsConn.Close()
+ resp.Body.Close()
+ t.Fatal("websocket handshake should have timed out")
+ }
+ })
+}
diff --git a/p2p/transport/webtransport/cert_manager.go b/p2p/transport/webtransport/cert_manager.go
new file mode 100644
index 0000000000..ede638fa85
--- /dev/null
+++ b/p2p/transport/webtransport/cert_manager.go
@@ -0,0 +1,215 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "crypto/sha256"
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/benbjohnson/clock"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multihash"
+)
+
+// Allow for a bit of clock skew.
+// When we generate a certificate, the NotBefore time is set to clockSkewAllowance before the current time.
+// Similarly, we stop using a certificate one clockSkewAllowance before its expiry time.
+const clockSkewAllowance = time.Hour
+const validityMinusTwoSkew = certValidity - (2 * clockSkewAllowance)
+
+type certConfig struct {
+ tlsConf *tls.Config
+ sha256 [32]byte // cached from the tlsConf
+}
+
+func (c *certConfig) Start() time.Time { return c.tlsConf.Certificates[0].Leaf.NotBefore }
+func (c *certConfig) End() time.Time { return c.tlsConf.Certificates[0].Leaf.NotAfter }
+
+func newCertConfig(key ic.PrivKey, start, end time.Time) (*certConfig, error) {
+ conf, err := getTLSConf(key, start, end)
+ if err != nil {
+ return nil, err
+ }
+ return &certConfig{
+ tlsConf: conf,
+ sha256: sha256.Sum256(conf.Certificates[0].Leaf.Raw),
+ }, nil
+}
+
+// Certificate renewal logic:
+// 1. On startup, we generate one cert that is valid from now (-1h, to allow for clock skew), and another
+// cert that is valid from the expiry date of the first certificate (again, with allowance for clock skew).
+// 2. Once we reach 1h before expiry of the first certificate, we switch over to the second certificate.
+// At the same time, we stop advertising the certhash of the first cert and generate the next cert.
+type certManager struct {
+ clock clock.Clock
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ refCount sync.WaitGroup
+
+ mx sync.RWMutex
+ lastConfig *certConfig // initially nil
+ currentConfig *certConfig
+ nextConfig *certConfig // nil until we have passed half the certValidity of the current config
+ addrComp ma.Multiaddr
+
+ serializedCertHashes [][]byte
+}
+
+func newCertManager(hostKey ic.PrivKey, clock clock.Clock) (*certManager, error) {
+ m := &certManager{clock: clock}
+ m.ctx, m.ctxCancel = context.WithCancel(context.Background())
+ if err := m.init(hostKey); err != nil {
+ return nil, err
+ }
+
+ m.background(hostKey)
+ return m, nil
+}
+
+// getCurrentBucketStartTime returns the canonical start time of the given time as
+// bucketed by ranges of certValidity since unix epoch (plus an offset). This
+// lets you get the same time ranges across reboots without having to persist
+// state.
+// ```
+// ... v--- epoch + offset
+// ... |--------| |--------| ...
+// ... |--------| |--------| ...
+// ```
+func getCurrentBucketStartTime(now time.Time, offset time.Duration) time.Time {
+ currentBucket := (now.UnixMilli() - offset.Milliseconds()) / validityMinusTwoSkew.Milliseconds()
+ return time.UnixMilli(offset.Milliseconds() + currentBucket*validityMinusTwoSkew.Milliseconds())
+}
+
+func (m *certManager) init(hostKey ic.PrivKey) error {
+ start := m.clock.Now()
+ pubkeyBytes, err := hostKey.GetPublic().Raw()
+ if err != nil {
+ return err
+ }
+
+ // We want to add a random offset to each start time so that not all certs
+ // rotate at the same time across the network. The offset represents moving
+ // the bucket start time some `offset` earlier.
+ offset := (time.Duration(binary.LittleEndian.Uint16(pubkeyBytes)) * time.Minute) % certValidity
+
+ // We want the certificate have been valid for at least one clockSkewAllowance
+ start = start.Add(-clockSkewAllowance)
+ startTime := getCurrentBucketStartTime(start, offset)
+ m.nextConfig, err = newCertConfig(hostKey, startTime, startTime.Add(certValidity))
+ if err != nil {
+ return err
+ }
+ return m.rollConfig(hostKey)
+}
+
+func (m *certManager) rollConfig(hostKey ic.PrivKey) error {
+ // We stop using the current certificate clockSkewAllowance before its expiry time.
+ // At this point, the next certificate needs to be valid for one clockSkewAllowance.
+ nextStart := m.nextConfig.End().Add(-2 * clockSkewAllowance)
+ c, err := newCertConfig(hostKey, nextStart, nextStart.Add(certValidity))
+ if err != nil {
+ return err
+ }
+ m.lastConfig = m.currentConfig
+ m.currentConfig = m.nextConfig
+ m.nextConfig = c
+ if err := m.cacheSerializedCertHashes(); err != nil {
+ return err
+ }
+ return m.cacheAddrComponent()
+}
+
+func (m *certManager) background(hostKey ic.PrivKey) {
+ d := m.currentConfig.End().Add(-clockSkewAllowance).Sub(m.clock.Now())
+ log.Debug("setting timer", "duration", d.String())
+ t := m.clock.Timer(d)
+ m.refCount.Add(1)
+
+ go func() {
+ defer m.refCount.Done()
+ defer t.Stop()
+
+ for {
+ select {
+ case <-m.ctx.Done():
+ return
+ case <-t.C:
+ now := m.clock.Now()
+ m.mx.Lock()
+ if err := m.rollConfig(hostKey); err != nil {
+ log.Error("rolling config failed", "error", err)
+ }
+ d := m.currentConfig.End().Add(-clockSkewAllowance).Sub(now)
+ log.Debug("rolling certificates", "next", d.String())
+ t.Reset(d)
+ m.mx.Unlock()
+ }
+ }
+ }()
+}
+
+func (m *certManager) GetConfig() *tls.Config {
+ m.mx.RLock()
+ defer m.mx.RUnlock()
+ return m.currentConfig.tlsConf
+}
+
+func (m *certManager) AddrComponent() ma.Multiaddr {
+ m.mx.RLock()
+ defer m.mx.RUnlock()
+ return m.addrComp
+}
+
+func (m *certManager) SerializedCertHashes() [][]byte {
+ return m.serializedCertHashes
+}
+
+func (m *certManager) cacheSerializedCertHashes() error {
+ hashes := make([][32]byte, 0, 3)
+ if m.lastConfig != nil {
+ hashes = append(hashes, m.lastConfig.sha256)
+ }
+ hashes = append(hashes, m.currentConfig.sha256)
+ if m.nextConfig != nil {
+ hashes = append(hashes, m.nextConfig.sha256)
+ }
+
+ m.serializedCertHashes = m.serializedCertHashes[:0]
+ for _, certHash := range hashes {
+ h, err := multihash.Encode(certHash[:], multihash.SHA2_256)
+ if err != nil {
+ return fmt.Errorf("failed to encode certificate hash: %w", err)
+ }
+ m.serializedCertHashes = append(m.serializedCertHashes, h)
+ }
+ return nil
+}
+
+func (m *certManager) cacheAddrComponent() error {
+ var addr ma.Multiaddr
+ c, err := addrComponentForCert(m.currentConfig.sha256[:])
+ if err != nil {
+ return err
+ }
+ addr = addr.AppendComponent(c)
+ if m.nextConfig != nil {
+ comp, err := addrComponentForCert(m.nextConfig.sha256[:])
+ if err != nil {
+ return err
+ }
+ addr = addr.AppendComponent(comp)
+ }
+ m.addrComp = addr
+ return nil
+}
+
+func (m *certManager) Close() error {
+ m.ctxCancel()
+ m.refCount.Wait()
+ return nil
+}
diff --git a/p2p/transport/webtransport/cert_manager_test.go b/p2p/transport/webtransport/cert_manager_test.go
new file mode 100644
index 0000000000..942d47174e
--- /dev/null
+++ b/p2p/transport/webtransport/cert_manager_test.go
@@ -0,0 +1,177 @@
+package libp2pwebtransport
+
+import (
+ "crypto/sha256"
+ "crypto/tls"
+ "fmt"
+ "testing"
+ "testing/quick"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/test"
+
+ "github.com/benbjohnson/clock"
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+ "github.com/stretchr/testify/require"
+)
+
+func certificateHashFromTLSConfig(c *tls.Config) [32]byte {
+ return sha256.Sum256(c.Certificates[0].Certificate[0])
+}
+
+func splitMultiaddr(addr ma.Multiaddr) []ma.Component {
+ var components []ma.Component
+ ma.ForEach(addr, func(c ma.Component) bool {
+ components = append(components, c)
+ return true
+ })
+ return components
+}
+
+func certHashFromComponent(t *testing.T, comp ma.Component) []byte {
+ t.Helper()
+ _, data, err := multibase.Decode(comp.Value())
+ require.NoError(t, err)
+ mh, err := multihash.Decode(data)
+ require.NoError(t, err)
+ require.Equal(t, uint64(multihash.SHA2_256), mh.Code)
+ return mh.Digest
+}
+
+func TestInitialCert(t *testing.T) {
+ cl := clock.NewMock()
+ cl.Add(1234567 * time.Hour)
+ priv, _, err := test.RandTestKeyPair(crypto.Ed25519, 256)
+ require.NoError(t, err)
+ m, err := newCertManager(priv, cl)
+ require.NoError(t, err)
+ defer m.Close()
+
+ conf := m.GetConfig()
+ require.Len(t, conf.Certificates, 1)
+ cert := conf.Certificates[0]
+ require.GreaterOrEqual(t, cl.Now().Add(-clockSkewAllowance), cert.Leaf.NotBefore)
+ require.Equal(t, cert.Leaf.NotBefore.Add(certValidity), cert.Leaf.NotAfter)
+ addr := m.AddrComponent()
+ components := splitMultiaddr(addr)
+ require.Len(t, components, 2)
+ require.Equal(t, ma.P_CERTHASH, components[0].Protocol().Code)
+ hash := certificateHashFromTLSConfig(conf)
+ require.Equal(t, hash[:], certHashFromComponent(t, components[0]))
+ require.Equal(t, ma.P_CERTHASH, components[1].Protocol().Code)
+}
+
+func TestCertRenewal(t *testing.T) {
+ cl := clock.NewMock()
+ // Add a year to avoid edge cases around the epoch
+ cl.Add(time.Hour * 24 * 365)
+ priv, _, err := test.SeededTestKeyPair(crypto.Ed25519, 256, 0)
+ require.NoError(t, err)
+ m, err := newCertManager(priv, cl)
+ require.NoError(t, err)
+ defer m.Close()
+
+ firstConf := m.GetConfig()
+ first := splitMultiaddr(m.AddrComponent())
+ require.Len(t, first, 2)
+ require.NotEqual(t, first[0].Value(), first[1].Value(), "the hashes should differ")
+ // wait for a new certificate to be generated
+ cl.Set(m.currentConfig.End().Add(-(clockSkewAllowance + time.Second)))
+ require.Never(t, func() bool {
+ for i, c := range splitMultiaddr(m.AddrComponent()) {
+ if c.Value() != first[i].Value() {
+ return true
+ }
+ }
+ return false
+ }, 100*time.Millisecond, 10*time.Millisecond)
+ cl.Add(2 * time.Second)
+ require.Eventually(t, func() bool { return m.GetConfig() != firstConf }, 200*time.Millisecond, 10*time.Millisecond)
+ secondConf := m.GetConfig()
+
+ second := splitMultiaddr(m.AddrComponent())
+ require.Len(t, second, 2)
+ for _, c := range second {
+ require.Equal(t, ma.P_CERTHASH, c.Protocol().Code)
+ }
+ // check that the 2nd certificate from the beginning was rolled over to be the 1st certificate
+ require.Equal(t, first[1].Value(), second[0].Value())
+ require.NotEqual(t, first[0].Value(), second[1].Value())
+
+ cl.Add(certValidity - 2*clockSkewAllowance + time.Second)
+ require.Eventually(t, func() bool { return m.GetConfig() != secondConf }, 200*time.Millisecond, 10*time.Millisecond)
+ third := splitMultiaddr(m.AddrComponent())
+ require.Len(t, third, 2)
+ for _, c := range third {
+ require.Equal(t, ma.P_CERTHASH, c.Protocol().Code)
+ }
+ // check that the 2nd certificate from the beginning was rolled over to be the 1st certificate
+ require.Equal(t, second[1].Value(), third[0].Value())
+}
+
+func TestDeterministicCertsAcrossReboots(t *testing.T) {
+ // Run this test 100 times to make sure it's deterministic
+ runs := 100
+ for i := 0; i < runs; i++ {
+ t.Run(fmt.Sprintf("Run=%d", i), func(t *testing.T) {
+ cl := clock.NewMock()
+ priv, _, err := test.SeededTestKeyPair(crypto.Ed25519, 256, 0)
+ require.NoError(t, err)
+ m, err := newCertManager(priv, cl)
+ require.NoError(t, err)
+ defer m.Close()
+
+ conf := m.GetConfig()
+ require.Len(t, conf.Certificates, 1)
+ oldCerts := m.serializedCertHashes
+
+ m.Close()
+
+ cl.Add(time.Hour)
+ // reboot
+ m, err = newCertManager(priv, cl)
+ require.NoError(t, err)
+ defer m.Close()
+
+ newCerts := m.serializedCertHashes
+
+ require.Equal(t, oldCerts, newCerts)
+ })
+ }
+}
+
+func TestDeterministicTimeBuckets(t *testing.T) {
+ cl := clock.NewMock()
+ cl.Add(time.Hour * 24 * 365)
+ startA := getCurrentBucketStartTime(cl.Now(), 0)
+ startB := getCurrentBucketStartTime(cl.Now().Add(time.Hour*24), 0)
+ require.Equal(t, startA, startB)
+
+ // 15 Days later
+ startC := getCurrentBucketStartTime(cl.Now().Add(time.Hour*24*15), 0)
+ require.NotEqual(t, startC, startB)
+}
+
+func TestGetCurrentBucketStartTimeIsWithinBounds(t *testing.T) {
+ require.NoError(t, quick.Check(func(timeSinceUnixEpoch time.Duration, offset time.Duration) bool {
+ if offset < 0 {
+ offset = -offset
+ }
+ if timeSinceUnixEpoch < 0 {
+ timeSinceUnixEpoch = -timeSinceUnixEpoch
+ }
+
+ offset = offset % certValidity
+ // Bound this to 100 years
+ timeSinceUnixEpoch = timeSinceUnixEpoch % (time.Hour * 24 * 365 * 100)
+ // Start a bit further in the future to avoid edge cases around epoch
+ timeSinceUnixEpoch += time.Hour * 24 * 365
+ start := time.UnixMilli(timeSinceUnixEpoch.Milliseconds())
+
+ bucketStart := getCurrentBucketStartTime(start.Add(-clockSkewAllowance), offset)
+ return !bucketStart.After(start.Add(-clockSkewAllowance)) || bucketStart.Equal(start.Add(-clockSkewAllowance))
+ }, nil))
+}
diff --git a/p2p/transport/webtransport/conn.go b/p2p/transport/webtransport/conn.go
new file mode 100644
index 0000000000..44b4d2fb8f
--- /dev/null
+++ b/p2p/transport/webtransport/conn.go
@@ -0,0 +1,91 @@
+package libp2pwebtransport
+
+import (
+ "context"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/webtransport-go"
+)
+
+type connSecurityMultiaddrs struct {
+ network.ConnSecurity
+ network.ConnMultiaddrs
+}
+
+type connMultiaddrs struct {
+ local, remote ma.Multiaddr
+}
+
+var _ network.ConnMultiaddrs = &connMultiaddrs{}
+
+func (c *connMultiaddrs) LocalMultiaddr() ma.Multiaddr { return c.local }
+func (c *connMultiaddrs) RemoteMultiaddr() ma.Multiaddr { return c.remote }
+
+type conn struct {
+ *connSecurityMultiaddrs
+
+ transport *transport
+ session *webtransport.Session
+
+ scope network.ConnManagementScope
+ qconn *quic.Conn
+}
+
+var _ tpt.CapableConn = &conn{}
+
+func newConn(tr *transport, sess *webtransport.Session, sconn *connSecurityMultiaddrs, scope network.ConnManagementScope, qconn *quic.Conn) *conn {
+ return &conn{
+ connSecurityMultiaddrs: sconn,
+ transport: tr,
+ session: sess,
+ scope: scope,
+ qconn: qconn,
+ }
+}
+
+func (c *conn) OpenStream(ctx context.Context) (network.MuxedStream, error) {
+ str, err := c.session.OpenStreamSync(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return &stream{str}, nil
+}
+
+func (c *conn) AcceptStream() (network.MuxedStream, error) {
+ str, err := c.session.AcceptStream(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ return &stream{str}, nil
+}
+
+func (c *conn) allowWindowIncrease(size uint64) bool {
+ return c.scope.ReserveMemory(int(size), network.ReservationPriorityMedium) == nil
+}
+
+// Close closes the connection.
+// It must be called even if the peer closed the connection in order for
+// garbage collection to properly work in this package.
+func (c *conn) Close() error {
+ defer c.scope.Done()
+ c.transport.removeConn(c.qconn)
+ err := c.session.CloseWithError(0, "")
+ _ = c.qconn.CloseWithError(1, "")
+ return err
+}
+
+func (c *conn) CloseWithError(_ network.ConnErrorCode) error {
+ return c.Close()
+}
+
+func (c *conn) IsClosed() bool { return c.session.Context().Err() != nil }
+func (c *conn) Scope() network.ConnScope { return c.scope }
+func (c *conn) Transport() tpt.Transport { return c.transport }
+
+func (c *conn) ConnState() network.ConnectionState {
+ return network.ConnectionState{Transport: "webtransport"}
+}
diff --git a/p2p/transport/webtransport/crypto.go b/p2p/transport/webtransport/crypto.go
new file mode 100644
index 0000000000..90504ead01
--- /dev/null
+++ b/p2p/transport/webtransport/crypto.go
@@ -0,0 +1,164 @@
+package libp2pwebtransport
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/sha256"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "time"
+
+ "golang.org/x/crypto/hkdf"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+
+ "github.com/multiformats/go-multihash"
+ "github.com/quic-go/quic-go/http3"
+)
+
+const deterministicCertInfo = "determinisitic cert"
+
+func getTLSConf(key ic.PrivKey, start, end time.Time) (*tls.Config, error) {
+ cert, priv, err := generateCert(key, start, end)
+ if err != nil {
+ return nil, err
+ }
+ return &tls.Config{
+ Certificates: []tls.Certificate{{
+ Certificate: [][]byte{cert.Raw},
+ PrivateKey: priv,
+ Leaf: cert,
+ }},
+ NextProtos: []string{http3.NextProtoH3},
+ }, nil
+}
+
+// generateCert generates certs deterministically based on the `key` and start
+// time passed in. Uses `golang.org/x/crypto/hkdf`.
+func generateCert(key ic.PrivKey, start, end time.Time) (*x509.Certificate, *ecdsa.PrivateKey, error) {
+ keyBytes, err := key.Raw()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ startTimeSalt := make([]byte, 8)
+ binary.LittleEndian.PutUint64(startTimeSalt, uint64(start.UnixNano()))
+ deterministicHKDFReader := newDeterministicReader(keyBytes, startTimeSalt, deterministicCertInfo)
+
+ b := make([]byte, 8)
+ if _, err := deterministicHKDFReader.Read(b); err != nil {
+ return nil, nil, err
+ }
+ serial := int64(binary.BigEndian.Uint64(b))
+ if serial < 0 {
+ serial = -serial
+ }
+ certTempl := &x509.Certificate{
+ SerialNumber: big.NewInt(serial),
+ Subject: pkix.Name{},
+ NotBefore: start,
+ NotAfter: end,
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ }
+
+ caPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), deterministicHKDFReader)
+ if err != nil {
+ return nil, nil, err
+ }
+ caBytes, err := x509.CreateCertificate(deterministicHKDFReader, certTempl, certTempl, caPrivateKey.Public(), caPrivateKey)
+ if err != nil {
+ return nil, nil, err
+ }
+ ca, err := x509.ParseCertificate(caBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ return ca, caPrivateKey, nil
+}
+
+type ErrCertHashMismatch struct {
+ Expected []byte
+ Actual [][]byte
+}
+
+func (e ErrCertHashMismatch) Error() string {
+ return fmt.Sprintf("cert hash not found: %x (expected: %#x)", e.Expected, e.Actual)
+}
+
+func verifyRawCerts(rawCerts [][]byte, certHashes []multihash.DecodedMultihash) error {
+ if len(rawCerts) < 1 {
+ return errors.New("no cert")
+ }
+ leaf := rawCerts[len(rawCerts)-1]
+ // The W3C WebTransport specification currently only allows SHA-256 certificates for serverCertificateHashes.
+ hash := sha256.Sum256(leaf)
+ var verified bool
+ for _, h := range certHashes {
+ if h.Code == multihash.SHA2_256 && bytes.Equal(h.Digest, hash[:]) {
+ verified = true
+ break
+ }
+ }
+ if !verified {
+ digests := make([][]byte, 0, len(certHashes))
+ for _, h := range certHashes {
+ digests = append(digests, h.Digest)
+ }
+ return ErrCertHashMismatch{Expected: hash[:], Actual: digests}
+ }
+
+ cert, err := x509.ParseCertificate(leaf)
+ if err != nil {
+ return err
+ }
+ // TODO: is this the best (and complete?) way to identify RSA certificates?
+ switch cert.SignatureAlgorithm {
+ case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.MD2WithRSA, x509.MD5WithRSA:
+ return errors.New("cert uses RSA")
+ }
+ if l := cert.NotAfter.Sub(cert.NotBefore); l > 14*24*time.Hour {
+ return fmt.Errorf("cert must not be valid for longer than 14 days (NotBefore: %s, NotAfter: %s, Length: %s)", cert.NotBefore, cert.NotAfter, l)
+ }
+ now := time.Now()
+ if now.Before(cert.NotBefore) || now.After(cert.NotAfter) {
+ return fmt.Errorf("cert not valid (NotBefore: %s, NotAfter: %s)", cert.NotBefore, cert.NotAfter)
+ }
+ return nil
+}
+
+// deterministicReader is a hack. It counter-acts the Go library's attempt at
+// making ECDSA signatures non-deterministic. Go adds non-determinism by
+// randomly dropping a singly byte from the reader stream. This counteracts this
+// by detecting when a read is a single byte and using a different reader
+// instead.
+type deterministicReader struct {
+ reader io.Reader
+ singleByteReader io.Reader
+}
+
+func newDeterministicReader(seed []byte, salt []byte, info string) io.Reader {
+ reader := hkdf.New(sha256.New, seed, salt, []byte(info))
+ singleByteReader := hkdf.New(sha256.New, seed, salt, []byte(info+" single byte"))
+
+ return &deterministicReader{
+ reader: reader,
+ singleByteReader: singleByteReader,
+ }
+}
+
+func (r *deterministicReader) Read(p []byte) (n int, err error) {
+ if len(p) == 1 {
+ return r.singleByteReader.Read(p)
+ }
+ return r.reader.Read(p)
+}
diff --git a/p2p/transport/webtransport/crypto_test.go b/p2p/transport/webtransport/crypto_test.go
new file mode 100644
index 0000000000..ba439c28af
--- /dev/null
+++ b/p2p/transport/webtransport/crypto_test.go
@@ -0,0 +1,196 @@
+package libp2pwebtransport
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "fmt"
+ "io"
+ "math/big"
+ mrand "math/rand"
+ "testing"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/multiformats/go-multihash"
+ "github.com/stretchr/testify/require"
+)
+
+func sha256Multihash(t *testing.T, b []byte) multihash.DecodedMultihash {
+ t.Helper()
+ hash := sha256.Sum256(b)
+ h, err := multihash.Encode(hash[:], multihash.SHA2_256)
+ require.NoError(t, err)
+ dh, err := multihash.Decode(h)
+ require.NoError(t, err)
+ return *dh
+}
+
+func generateCertWithKey(t *testing.T, key crypto.PrivateKey, start, end time.Time) *x509.Certificate {
+ t.Helper()
+ serial := int64(mrand.Uint64())
+ if serial < 0 {
+ serial = -serial
+ }
+ certTempl := &x509.Certificate{
+ SerialNumber: big.NewInt(serial),
+ Subject: pkix.Name{},
+ NotBefore: start,
+ NotAfter: end,
+ IsCA: true,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
+ KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ }
+ caBytes, err := x509.CreateCertificate(rand.Reader, certTempl, certTempl, key.(interface{ Public() crypto.PublicKey }).Public(), key)
+ require.NoError(t, err)
+ ca, err := x509.ParseCertificate(caBytes)
+ require.NoError(t, err)
+ return ca
+}
+
+func TestCertificateVerification(t *testing.T) {
+ now := time.Now()
+ ecdsaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ require.NoError(t, err)
+ rsaKey, err := rsa.GenerateKey(rand.Reader, 1024)
+ require.NoError(t, err)
+
+ t.Run("accepting a valid cert", func(t *testing.T) {
+ validCert := generateCertWithKey(t, ecdsaKey, now, now.Add(14*24*time.Hour))
+ require.NoError(t, verifyRawCerts([][]byte{validCert.Raw}, []multihash.DecodedMultihash{sha256Multihash(t, validCert.Raw)}))
+ })
+
+ for _, tc := range [...]struct {
+ name string
+ cert *x509.Certificate
+ errStr string
+ }{
+ {
+ name: "validitity period too long",
+ cert: generateCertWithKey(t, ecdsaKey, now, now.Add(15*24*time.Hour)),
+ errStr: "cert must not be valid for longer than 14 days",
+ },
+ {
+ name: "uses RSA key",
+ cert: generateCertWithKey(t, rsaKey, now, now.Add(14*24*time.Hour)),
+ errStr: "RSA",
+ },
+ {
+ name: "expired certificate",
+ cert: generateCertWithKey(t, ecdsaKey, now.Add(-14*24*time.Hour), now),
+ errStr: "cert not valid",
+ },
+ {
+ name: "not yet valid",
+ cert: generateCertWithKey(t, ecdsaKey, now.Add(time.Hour), now.Add(time.Hour+14*24*time.Hour)),
+ errStr: "cert not valid",
+ },
+ } {
+ tc := tc
+ t.Run(fmt.Sprintf("rejecting invalid certificates: %s", tc.name), func(t *testing.T) {
+ err := verifyRawCerts([][]byte{tc.cert.Raw}, []multihash.DecodedMultihash{sha256Multihash(t, tc.cert.Raw)})
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.errStr)
+ })
+ }
+
+ for _, tc := range [...]struct {
+ name string
+ certs [][]byte
+ hashes []multihash.DecodedMultihash
+ errStr string
+ }{
+ {
+ name: "no certificates",
+ hashes: []multihash.DecodedMultihash{sha256Multihash(t, []byte("foobar"))},
+ errStr: "no cert",
+ },
+ {
+ name: "certificate not parseable",
+ certs: [][]byte{[]byte("foobar")},
+ hashes: []multihash.DecodedMultihash{sha256Multihash(t, []byte("foobar"))},
+ errStr: "x509: malformed certificate",
+ },
+ {
+ name: "hash mismatch",
+ certs: [][]byte{generateCertWithKey(t, ecdsaKey, now, now.Add(15*24*time.Hour)).Raw},
+ hashes: []multihash.DecodedMultihash{sha256Multihash(t, []byte("foobar"))},
+ errStr: "cert hash not found",
+ },
+ } {
+ tc := tc
+ t.Run(fmt.Sprintf("rejecting invalid certificates: %s", tc.name), func(t *testing.T) {
+ err := verifyRawCerts(tc.certs, tc.hashes)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.errStr)
+ })
+ }
+}
+
+func TestDeterministicCertHashes(t *testing.T) {
+ // Run this test 1000 times since we want to make sure the signatures are deterministic
+ runs := 1000
+ for i := 0; i < runs; i++ {
+ zeroSeed := [32]byte{}
+ priv, _, err := ic.GenerateEd25519Key(bytes.NewReader(zeroSeed[:]))
+ require.NoError(t, err)
+ cert, certPriv, err := generateCert(priv, time.Time{}, time.Time{}.Add(time.Hour*24*14))
+ require.NoError(t, err)
+
+ keyBytes, err := x509.MarshalECPrivateKey(certPriv)
+ require.NoError(t, err)
+
+ cert2, certPriv2, err := generateCert(priv, time.Time{}, time.Time{}.Add(time.Hour*24*14))
+ require.NoError(t, err)
+
+ require.Equal(t, cert2.Signature, cert.Signature)
+ require.Equal(t, cert2.Raw, cert.Raw)
+ keyBytes2, err := x509.MarshalECPrivateKey(certPriv2)
+ require.NoError(t, err)
+ require.Equal(t, keyBytes, keyBytes2)
+ }
+}
+
+// TestDeterministicSig tests that our hack around making ECDSA signatures
+// deterministic works. If this fails, this means we need to try another
+// strategy to make deterministic signatures or try something else entirely.
+// See deterministicReader for more context.
+func TestDeterministicSig(t *testing.T) {
+ // Run this test 1000 times since we want to make sure the signatures are deterministic
+ runs := 1000
+ for i := 0; i < runs; i++ {
+ zeroSeed := [32]byte{}
+ deterministicHKDFReader := newDeterministicReader(zeroSeed[:], nil, deterministicCertInfo)
+ b := [1024]byte{}
+ io.ReadFull(deterministicHKDFReader, b[:])
+ caPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), deterministicHKDFReader)
+ require.NoError(t, err)
+
+ sig, err := caPrivateKey.Sign(deterministicHKDFReader, b[:], crypto.SHA256)
+ require.NoError(t, err)
+
+ deterministicHKDFReader = newDeterministicReader(zeroSeed[:], nil, deterministicCertInfo)
+ b2 := [1024]byte{}
+ io.ReadFull(deterministicHKDFReader, b2[:])
+ caPrivateKey2, err := ecdsa.GenerateKey(elliptic.P256(), deterministicHKDFReader)
+ require.NoError(t, err)
+
+ sig2, err := caPrivateKey2.Sign(deterministicHKDFReader, b2[:], crypto.SHA256)
+ require.NoError(t, err)
+
+ keyBytes, err := x509.MarshalECPrivateKey(caPrivateKey)
+ require.NoError(t, err)
+ keyBytes2, err := x509.MarshalECPrivateKey(caPrivateKey2)
+ require.NoError(t, err)
+
+ require.Equal(t, sig, sig2)
+ require.Equal(t, keyBytes, keyBytes2)
+ }
+}
diff --git a/p2p/transport/webtransport/listener.go b/p2p/transport/webtransport/listener.go
new file mode 100644
index 0000000000..7cd647f72b
--- /dev/null
+++ b/p2p/transport/webtransport/listener.go
@@ -0,0 +1,330 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/webtransport-go"
+)
+
+const queueLen = 16
+const handshakeTimeout = 10 * time.Second
+
+type connKey struct{}
+
+type listener struct {
+ transport *transport
+ isStaticTLSConf bool
+ reuseListener quicreuse.Listener
+
+ server webtransport.Server
+
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
+ serverClosed chan struct{} // is closed when server.Serve returns
+
+ addr net.Addr
+ multiaddr ma.Multiaddr
+
+ queue chan tpt.CapableConn
+
+ mx sync.Mutex
+ pendingConns map[*quic.Conn]*negotiatingConn
+}
+
+var _ tpt.Listener = &listener{}
+
+func newListener(reuseListener quicreuse.Listener, t *transport, isStaticTLSConf bool) (tpt.Listener, error) {
+ localMultiaddr, err := toWebtransportMultiaddr(reuseListener.Addr())
+ if err != nil {
+ return nil, err
+ }
+
+ ln := &listener{
+ reuseListener: reuseListener,
+ transport: t,
+ isStaticTLSConf: isStaticTLSConf,
+ queue: make(chan tpt.CapableConn, queueLen),
+ serverClosed: make(chan struct{}),
+ addr: reuseListener.Addr(),
+ multiaddr: localMultiaddr,
+ server: webtransport.Server{
+ H3: http3.Server{
+ ConnContext: func(ctx context.Context, c *quic.Conn) context.Context {
+ return context.WithValue(ctx, connKey{}, c)
+ },
+ },
+ CheckOrigin: func(_ *http.Request) bool { return true },
+ },
+ pendingConns: make(map[*quic.Conn]*negotiatingConn),
+ }
+ ln.ctx, ln.ctxCancel = context.WithCancel(context.Background())
+ mux := http.NewServeMux()
+ mux.HandleFunc(webtransportHTTPEndpoint, ln.httpHandler)
+ ln.server.H3.Handler = mux
+ go func() {
+ defer close(ln.serverClosed)
+ for {
+ conn, err := ln.reuseListener.Accept(context.Background())
+ if err != nil {
+ log.Debug("serving failed", "addr", ln.Addr(), "error", err)
+ return
+ }
+ err = ln.startHandshake(conn)
+ if err != nil {
+ log.Debug("failed to start handshake", "error", err)
+ continue
+ }
+ go ln.server.ServeQUICConn(conn)
+ }
+ }()
+ return ln, nil
+}
+
+func (l *listener) startHandshake(conn *quic.Conn) error {
+ ctx, cancel := context.WithTimeout(l.ctx, handshakeTimeout)
+ stopHandshakeTimeout := context.AfterFunc(ctx, func() {
+ log.Debug("failed to handshake on conn", "remote_addr", conn.RemoteAddr())
+ conn.CloseWithError(1, "")
+ l.mx.Lock()
+ delete(l.pendingConns, conn)
+ l.mx.Unlock()
+ })
+ l.mx.Lock()
+ defer l.mx.Unlock()
+ // don't add to map if the context is already cancelled
+ if ctx.Err() != nil {
+ cancel()
+ return ctx.Err()
+ }
+ l.pendingConns[conn] = &negotiatingConn{
+ Conn: conn,
+ ctx: ctx,
+ cancel: cancel,
+ stopHandshakeTimeout: stopHandshakeTimeout,
+ }
+ return nil
+}
+
+// negotiatingConn is a wrapper around a *quic.Conn that lets us wrap it in
+// our own context for the duration of the upgrade process. Upgrading a quic
+// connection to an h3 connection to a webtransport session.
+type negotiatingConn struct {
+ *quic.Conn
+ ctx context.Context
+ cancel context.CancelFunc
+ // stopHandshakeTimeout is a function that stops triggering the handshake timeout. Returns true if the handshake timeout was not triggered.
+ stopHandshakeTimeout func() bool
+ err error
+}
+
+func (c *negotiatingConn) StopHandshakeTimeout() error {
+ defer c.cancel()
+ if c.stopHandshakeTimeout != nil {
+ // cancel the handshake timeout function
+ if !c.stopHandshakeTimeout() {
+ c.err = errTimeout
+ }
+ c.stopHandshakeTimeout = nil
+ }
+ if c.err != nil {
+ return c.err
+ }
+ return nil
+}
+
+var errTimeout = errors.New("timeout")
+
+func (l *listener) httpHandler(w http.ResponseWriter, r *http.Request) {
+ typ, ok := r.URL.Query()["type"]
+ if !ok || len(typ) != 1 || typ[0] != "noise" {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ remoteMultiaddr, err := stringToWebtransportMultiaddr(r.RemoteAddr)
+ if err != nil {
+ // This should never happen.
+ log.Error("converting remote address failed", "remote", r.RemoteAddr, "error", err)
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+ if l.transport.gater != nil && !l.transport.gater.InterceptAccept(&connMultiaddrs{local: l.multiaddr, remote: remoteMultiaddr}) {
+ w.WriteHeader(http.StatusForbidden)
+ return
+ }
+ connScope, err := network.UnwrapConnManagementScope(r.Context())
+ if err != nil {
+ connScope = nil
+ // Don't error here.
+ // Setup scope if we don't have scope from quicreuse.
+ // This is better than failing so that users that don't use quicreuse.ConnContext option with the resource
+ // manager still work correctly.
+ }
+ if connScope == nil {
+ connScope, err = l.transport.rcmgr.OpenConnection(network.DirInbound, false, remoteMultiaddr)
+ if err != nil {
+ log.Debug("resource manager blocked incoming connection", "addr", r.RemoteAddr, "error", err)
+ w.WriteHeader(http.StatusServiceUnavailable)
+ return
+ }
+ }
+ err = l.httpHandlerWithConnScope(w, r, connScope)
+ if err != nil {
+ connScope.Done()
+ }
+}
+
+func (l *listener) httpHandlerWithConnScope(w http.ResponseWriter, r *http.Request, connScope network.ConnManagementScope) error {
+ sess, err := l.server.Upgrade(w, r)
+ if err != nil {
+ log.Debug("upgrade failed", "error", err)
+ // TODO: think about the status code to use here
+ w.WriteHeader(500)
+ return err
+ }
+ ctx, cancel := context.WithTimeout(l.ctx, handshakeTimeout)
+ sconn, err := l.handshake(ctx, sess)
+ if err != nil {
+ cancel()
+ log.Debug("handshake failed", "error", err)
+ sess.CloseWithError(1, "")
+ return err
+ }
+ cancel()
+
+ if l.transport.gater != nil && !l.transport.gater.InterceptSecured(network.DirInbound, sconn.RemotePeer(), sconn) {
+ // TODO: can we close with a specific error here?
+ sess.CloseWithError(errorCodeConnectionGating, "")
+ return errors.New("gater blocked connection")
+ }
+
+ if err := connScope.SetPeer(sconn.RemotePeer()); err != nil {
+ log.Debug("resource manager blocked incoming connection for peer", "peer", sconn.RemotePeer(), "addr", r.RemoteAddr, "error", err)
+ sess.CloseWithError(1, "")
+ return err
+ }
+
+ connVal := r.Context().Value(connKey{})
+ if connVal == nil {
+ log.Error("missing conn from context")
+ sess.CloseWithError(1, "")
+ return errors.New("invalid context")
+ }
+ qconn := connVal.(*quic.Conn)
+
+ l.mx.Lock()
+ nconn, ok := l.pendingConns[qconn]
+ delete(l.pendingConns, qconn)
+ l.mx.Unlock()
+ if !ok {
+ log.Debug("handshake timed out", "remote_addr", r.RemoteAddr)
+ sess.CloseWithError(1, "")
+ return errTimeout
+ }
+ if err := nconn.StopHandshakeTimeout(); err != nil {
+ log.Debug("handshake timed out", "remote_addr", r.RemoteAddr)
+ sess.CloseWithError(1, "")
+ return err
+ }
+
+ conn := newConn(l.transport, sess, sconn, connScope, qconn)
+ l.transport.addConn(qconn, conn)
+ select {
+ case l.queue <- conn:
+ default:
+ log.Debug("accept queue full, dropping incoming connection", "peer", sconn.RemotePeer(), "addr", r.RemoteAddr, "error", err)
+ conn.Close()
+ return errors.New("accept queue full")
+ }
+
+ return nil
+}
+
+func (l *listener) Accept() (tpt.CapableConn, error) {
+ select {
+ case <-l.ctx.Done():
+ return nil, tpt.ErrListenerClosed
+ case c := <-l.queue:
+ return c, nil
+ }
+}
+
+func (l *listener) handshake(ctx context.Context, sess *webtransport.Session) (*connSecurityMultiaddrs, error) {
+ local, err := toWebtransportMultiaddr(sess.LocalAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determiniting local addr: %w", err)
+ }
+ remote, err := toWebtransportMultiaddr(sess.RemoteAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determiniting remote addr: %w", err)
+ }
+
+ str, err := sess.AcceptStream(ctx)
+ if err != nil {
+ return nil, err
+ }
+ var earlyData [][]byte
+ if !l.isStaticTLSConf {
+ earlyData = l.transport.certManager.SerializedCertHashes()
+ }
+
+ n, err := l.transport.noise.WithSessionOptions(noise.EarlyData(
+ nil,
+ newEarlyDataSender(&pb.NoiseExtensions{WebtransportCerthashes: earlyData}),
+ ))
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize Noise session: %w", err)
+ }
+ c, err := n.SecureInbound(ctx, webtransportStream{Stream: str, wsess: sess}, "")
+ if err != nil {
+ return nil, err
+ }
+
+ return &connSecurityMultiaddrs{
+ ConnSecurity: c,
+ ConnMultiaddrs: &connMultiaddrs{local: local, remote: remote},
+ }, nil
+}
+
+func (l *listener) Addr() net.Addr {
+ return l.addr
+}
+
+func (l *listener) Multiaddr() ma.Multiaddr {
+ if l.transport.certManager == nil {
+ return l.multiaddr
+ }
+ return l.multiaddr.Encapsulate(l.transport.certManager.AddrComponent())
+}
+
+func (l *listener) Close() error {
+ l.ctxCancel()
+ l.reuseListener.Close()
+ err := l.server.Close()
+ <-l.serverClosed
+loop:
+ for {
+ select {
+ case conn := <-l.queue:
+ conn.Close()
+ default:
+ break loop
+ }
+ }
+ return err
+}
diff --git a/p2p/transport/webtransport/mock_connection_gater_test.go b/p2p/transport/webtransport/mock_connection_gater_test.go
new file mode 100644
index 0000000000..d0a4747dbe
--- /dev/null
+++ b/p2p/transport/webtransport/mock_connection_gater_test.go
@@ -0,0 +1,115 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: github.com/libp2p/go-libp2p/core/connmgr (interfaces: ConnectionGater)
+//
+// Generated by this command:
+//
+// mockgen -package libp2pwebtransport_test -destination mock_connection_gater_test.go github.com/libp2p/go-libp2p/core/connmgr ConnectionGater
+//
+
+// Package libp2pwebtransport_test is a generated GoMock package.
+package libp2pwebtransport_test
+
+import (
+ reflect "reflect"
+
+ control "github.com/libp2p/go-libp2p/core/control"
+ network "github.com/libp2p/go-libp2p/core/network"
+ peer "github.com/libp2p/go-libp2p/core/peer"
+ multiaddr "github.com/multiformats/go-multiaddr"
+ gomock "go.uber.org/mock/gomock"
+)
+
+// MockConnectionGater is a mock of ConnectionGater interface.
+type MockConnectionGater struct {
+ ctrl *gomock.Controller
+ recorder *MockConnectionGaterMockRecorder
+ isgomock struct{}
+}
+
+// MockConnectionGaterMockRecorder is the mock recorder for MockConnectionGater.
+type MockConnectionGaterMockRecorder struct {
+ mock *MockConnectionGater
+}
+
+// NewMockConnectionGater creates a new mock instance.
+func NewMockConnectionGater(ctrl *gomock.Controller) *MockConnectionGater {
+ mock := &MockConnectionGater{ctrl: ctrl}
+ mock.recorder = &MockConnectionGaterMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockConnectionGater) EXPECT() *MockConnectionGaterMockRecorder {
+ return m.recorder
+}
+
+// InterceptAccept mocks base method.
+func (m *MockConnectionGater) InterceptAccept(arg0 network.ConnMultiaddrs) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptAccept", arg0)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptAccept indicates an expected call of InterceptAccept.
+func (mr *MockConnectionGaterMockRecorder) InterceptAccept(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptAccept", reflect.TypeOf((*MockConnectionGater)(nil).InterceptAccept), arg0)
+}
+
+// InterceptAddrDial mocks base method.
+func (m *MockConnectionGater) InterceptAddrDial(arg0 peer.ID, arg1 multiaddr.Multiaddr) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptAddrDial", arg0, arg1)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptAddrDial indicates an expected call of InterceptAddrDial.
+func (mr *MockConnectionGaterMockRecorder) InterceptAddrDial(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptAddrDial", reflect.TypeOf((*MockConnectionGater)(nil).InterceptAddrDial), arg0, arg1)
+}
+
+// InterceptPeerDial mocks base method.
+func (m *MockConnectionGater) InterceptPeerDial(p peer.ID) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptPeerDial", p)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptPeerDial indicates an expected call of InterceptPeerDial.
+func (mr *MockConnectionGaterMockRecorder) InterceptPeerDial(p any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptPeerDial", reflect.TypeOf((*MockConnectionGater)(nil).InterceptPeerDial), p)
+}
+
+// InterceptSecured mocks base method.
+func (m *MockConnectionGater) InterceptSecured(arg0 network.Direction, arg1 peer.ID, arg2 network.ConnMultiaddrs) bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptSecured", arg0, arg1, arg2)
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// InterceptSecured indicates an expected call of InterceptSecured.
+func (mr *MockConnectionGaterMockRecorder) InterceptSecured(arg0, arg1, arg2 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptSecured", reflect.TypeOf((*MockConnectionGater)(nil).InterceptSecured), arg0, arg1, arg2)
+}
+
+// InterceptUpgraded mocks base method.
+func (m *MockConnectionGater) InterceptUpgraded(arg0 network.Conn) (bool, control.DisconnectReason) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "InterceptUpgraded", arg0)
+ ret0, _ := ret[0].(bool)
+ ret1, _ := ret[1].(control.DisconnectReason)
+ return ret0, ret1
+}
+
+// InterceptUpgraded indicates an expected call of InterceptUpgraded.
+func (mr *MockConnectionGaterMockRecorder) InterceptUpgraded(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptUpgraded", reflect.TypeOf((*MockConnectionGater)(nil).InterceptUpgraded), arg0)
+}
diff --git a/p2p/transport/webtransport/multiaddr.go b/p2p/transport/webtransport/multiaddr.go
new file mode 100644
index 0000000000..6b7b37f487
--- /dev/null
+++ b/p2p/transport/webtransport/multiaddr.go
@@ -0,0 +1,113 @@
+package libp2pwebtransport
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+)
+
+var webtransportMA = ma.StringCast("/quic-v1/webtransport")
+
+func toWebtransportMultiaddr(na net.Addr) (ma.Multiaddr, error) {
+ addr, err := manet.FromNetAddr(na)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := addr.ValueForProtocol(ma.P_UDP); err != nil {
+ return nil, errors.New("not a UDP address")
+ }
+ return addr.Encapsulate(webtransportMA), nil
+}
+
+func stringToWebtransportMultiaddr(str string) (ma.Multiaddr, error) {
+ host, portStr, err := net.SplitHostPort(str)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return nil, errors.New("failed to parse IP")
+ }
+ return toWebtransportMultiaddr(&net.UDPAddr{IP: ip, Port: int(port)})
+}
+
+func extractCertHashes(addr ma.Multiaddr) ([]multihash.DecodedMultihash, error) {
+ certHashesStr := make([]string, 0, 2)
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ certHashesStr = append(certHashesStr, c.Value())
+ }
+ return true
+ })
+ certHashes := make([]multihash.DecodedMultihash, 0, len(certHashesStr))
+ for _, s := range certHashesStr {
+ _, ch, err := multibase.Decode(s)
+ if err != nil {
+ return nil, fmt.Errorf("failed to multibase-decode certificate hash: %w", err)
+ }
+ dh, err := multihash.Decode(ch)
+ if err != nil {
+ return nil, fmt.Errorf("failed to multihash-decode certificate hash: %w", err)
+ }
+ certHashes = append(certHashes, *dh)
+ }
+ return certHashes, nil
+}
+
+func addrComponentForCert(hash []byte) (*ma.Component, error) {
+ mh, err := multihash.Encode(hash, multihash.SHA2_256)
+ if err != nil {
+ return nil, err
+ }
+ certStr, err := multibase.Encode(multibase.Base58BTC, mh)
+ if err != nil {
+ return nil, err
+ }
+ return ma.NewComponent(ma.ProtocolWithCode(ma.P_CERTHASH).Name, certStr)
+}
+
+// IsWebtransportMultiaddr returns true if the given multiaddr is a well formed
+// webtransport multiaddr. Returns the number of certhashes found.
+func IsWebtransportMultiaddr(multiaddr ma.Multiaddr) (bool, int) {
+ const (
+ init = iota
+ foundUDP
+ foundQuicV1
+ foundWebTransport
+ )
+ state := init
+ certhashCount := 0
+
+ ma.ForEach(multiaddr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_UDP:
+ if state == init {
+ state = foundUDP
+ }
+ case ma.P_QUIC_V1:
+ if state == foundUDP {
+ state = foundQuicV1
+ }
+ case ma.P_WEBTRANSPORT:
+ if state == foundQuicV1 {
+ state = foundWebTransport
+ }
+ case ma.P_CERTHASH:
+ if state == foundWebTransport {
+ certhashCount++
+ }
+ }
+ return true
+ })
+ return state == foundWebTransport, certhashCount
+}
diff --git a/p2p/transport/webtransport/multiaddr_test.go b/p2p/transport/webtransport/multiaddr_test.go
new file mode 100644
index 0000000000..3f0a3ec0bf
--- /dev/null
+++ b/p2p/transport/webtransport/multiaddr_test.go
@@ -0,0 +1,131 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "testing"
+
+ ma "github.com/multiformats/go-multiaddr"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+ "github.com/stretchr/testify/require"
+)
+
+func TestWebtransportMultiaddr(t *testing.T) {
+ t.Run("valid", func(t *testing.T) {
+ addr, err := toWebtransportMultiaddr(&net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337})
+ require.NoError(t, err)
+ require.Equal(t, "/ip4/127.0.0.1/udp/1337/quic-v1/webtransport", addr.String())
+ })
+
+ t.Run("invalid", func(t *testing.T) {
+ _, err := toWebtransportMultiaddr(&net.TCPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1337})
+ require.EqualError(t, err, "not a UDP address")
+ })
+}
+
+func TestWebtransportMultiaddrFromString(t *testing.T) {
+ t.Run("valid", func(t *testing.T) {
+ addr, err := stringToWebtransportMultiaddr("1.2.3.4:60042")
+ require.NoError(t, err)
+ require.Equal(t, "/ip4/1.2.3.4/udp/60042/quic-v1/webtransport", addr.String())
+ })
+
+ t.Run("invalid", func(t *testing.T) {
+ for _, addr := range [...]string{
+ "1.2.3.4", // missing port
+ "1.2.3.4:123456", // invalid port
+ ":1234", // missing IP
+ "foobar",
+ } {
+ _, err := stringToWebtransportMultiaddr(addr)
+ require.Error(t, err)
+ }
+ })
+}
+
+func encodeCertHash(t *testing.T, b []byte, mh uint64, mb multibase.Encoding) string {
+ t.Helper()
+ h, err := multihash.Encode(b, mh)
+ require.NoError(t, err)
+ str, err := multibase.Encode(mb, h)
+ require.NoError(t, err)
+ return str
+}
+
+func TestExtractCertHashes(t *testing.T) {
+ fooHash := encodeCertHash(t, []byte("foo"), multihash.SHA2_256, multibase.Base58BTC)
+ barHash := encodeCertHash(t, []byte("bar"), multihash.BLAKE2B_MAX, multibase.Base32)
+
+ // valid cases
+ for _, tc := range [...]struct {
+ addr string
+ hashes []string
+ }{
+ {addr: "/ip4/127.0.0.1/udp/1234/quic-v1/webtransport"},
+ {addr: fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s", fooHash), hashes: []string{"foo"}},
+ {addr: fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s/certhash/%s", fooHash, barHash), hashes: []string{"foo", "bar"}},
+ } {
+ ch, err := extractCertHashes(ma.StringCast(tc.addr))
+ require.NoError(t, err)
+ require.Len(t, ch, len(tc.hashes))
+ for i, h := range tc.hashes {
+ require.Equal(t, h, string(ch[i].Digest))
+ }
+ }
+}
+
+func TestWebtransportResolve(t *testing.T) {
+ testCases := []string{
+ "/dns4/example.com/udp/1337/quic-v1/webtransport",
+ "/dnsaddr/example.com/udp/1337/quic-v1/webtransport",
+ "/ip4/127.0.0.1/udp/1337/quic-v1/sni/example.com/webtransport",
+ }
+
+ tpt := &transport{}
+ ctx := context.Background()
+
+ for _, tc := range testCases {
+ t.Run(tc, func(t *testing.T) {
+ outMa, err := tpt.Resolve(ctx, ma.StringCast(tc))
+ require.NoError(t, err)
+ sni, err := outMa[0].ValueForProtocol(ma.P_SNI)
+ require.NoError(t, err)
+ require.Equal(t, "example.com", sni)
+ })
+ }
+
+ t.Run("No sni", func(t *testing.T) {
+ outMa, err := tpt.Resolve(ctx, ma.StringCast("/ip4/127.0.0.1/udp/1337/quic-v1/webtransport"))
+ require.NoError(t, err)
+ _, err = outMa[0].ValueForProtocol(ma.P_SNI)
+ require.Error(t, err)
+ })
+}
+
+func TestIsWebtransportMultiaddr(t *testing.T) {
+ fooHash := encodeCertHash(t, []byte("foo"), multihash.SHA2_256, multibase.Base58BTC)
+ barHash := encodeCertHash(t, []byte("bar"), multihash.SHA2_256, multibase.Base58BTC)
+
+ testCases := []struct {
+ addr string
+ want bool
+ certhashCount int
+ }{
+ {addr: "/ip4/1.2.3.4/udp/60042/quic-v1/webtransport", want: true},
+ {addr: "/ip4/1.2.3.4/udp/60042/quic-v1/webtransport/certhash/" + fooHash, want: true, certhashCount: 1},
+ {addr: "/ip4/1.2.3.4/udp/60042/quic-v1/webtransport/certhash/" + fooHash + "/certhash/" + barHash, want: true, certhashCount: 2},
+ {addr: "/dns4/example.com/udp/60042/quic-v1/webtransport/certhash/" + fooHash, want: true, certhashCount: 1},
+ {addr: "/dns4/example.com/tcp/60042/quic-v1/webtransport/certhash/" + fooHash, want: false},
+ {addr: "/dns4/example.com/udp/60042/webrtc/certhash/" + fooHash, want: false},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.addr, func(t *testing.T) {
+ got, n := IsWebtransportMultiaddr(ma.StringCast(tc.addr))
+ require.Equal(t, tc.want, got)
+ require.Equal(t, tc.certhashCount, n)
+ })
+ }
+}
diff --git a/p2p/transport/webtransport/noise_early_data.go b/p2p/transport/webtransport/noise_early_data.go
new file mode 100644
index 0000000000..6ca8d9ddb7
--- /dev/null
+++ b/p2p/transport/webtransport/noise_early_data.go
@@ -0,0 +1,36 @@
+package libp2pwebtransport
+
+import (
+ "context"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+)
+
+type earlyDataHandler struct {
+ earlyData *pb.NoiseExtensions
+ receive func(extensions *pb.NoiseExtensions) error
+}
+
+var _ noise.EarlyDataHandler = &earlyDataHandler{}
+
+func newEarlyDataSender(earlyData *pb.NoiseExtensions) noise.EarlyDataHandler {
+ return &earlyDataHandler{earlyData: earlyData}
+}
+
+func newEarlyDataReceiver(receive func(*pb.NoiseExtensions) error) noise.EarlyDataHandler {
+ return &earlyDataHandler{receive: receive}
+}
+
+func (e *earlyDataHandler) Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions {
+ return e.earlyData
+}
+
+func (e *earlyDataHandler) Received(_ context.Context, _ net.Conn, ext *pb.NoiseExtensions) error {
+ if e.receive == nil {
+ return nil
+ }
+ return e.receive(ext)
+}
diff --git a/p2p/transport/webtransport/stream.go b/p2p/transport/webtransport/stream.go
new file mode 100644
index 0000000000..3b157c9f08
--- /dev/null
+++ b/p2p/transport/webtransport/stream.go
@@ -0,0 +1,96 @@
+package libp2pwebtransport
+
+import (
+ "errors"
+ "net"
+
+ "github.com/libp2p/go-libp2p/core/network"
+
+ "github.com/quic-go/webtransport-go"
+)
+
+const (
+ reset webtransport.StreamErrorCode = 0
+)
+
+type webtransportStream struct {
+ *webtransport.Stream
+ wsess *webtransport.Session
+}
+
+var _ net.Conn = webtransportStream{}
+
+func (s webtransportStream) LocalAddr() net.Addr {
+ return s.wsess.LocalAddr()
+}
+
+func (s webtransportStream) RemoteAddr() net.Addr {
+ return s.wsess.RemoteAddr()
+}
+
+type stream struct {
+ *webtransport.Stream
+}
+
+var _ network.MuxedStream = stream{}
+
+func (s stream) Read(b []byte) (n int, err error) {
+ n, err = s.Stream.Read(b)
+ if err != nil {
+ var streamErr *webtransport.StreamError
+ if errors.As(err, &streamErr) {
+ err = &network.StreamError{
+ ErrorCode: 0,
+ Remote: streamErr.Remote,
+ TransportError: err,
+ }
+ }
+ }
+ return n, err
+}
+
+func (s stream) Write(b []byte) (n int, err error) {
+ n, err = s.Stream.Write(b)
+ if err != nil {
+ var streamErr *webtransport.StreamError
+ if errors.As(err, &streamErr) {
+ err = &network.StreamError{
+ ErrorCode: 0,
+ Remote: streamErr.Remote,
+ TransportError: err,
+ }
+ }
+ }
+ return n, err
+}
+
+func (s stream) Reset() error {
+ s.Stream.CancelRead(reset)
+ s.Stream.CancelWrite(reset)
+ return nil
+}
+
+// ResetWithError resets the stream ignoring the error code. Error codes aren't
+// specified for WebTransport as the current implementation of WebTransport in
+// browsers(https://www.ietf.org/archive/id/draft-kinnear-webtransport-http2-02.html)
+// only supports 1 byte error codes. For more details, see
+// https://github.com/libp2p/specs/blob/4eca305185c7aef219e936bef76c48b1ab0a8b43/error-codes/README.md?plain=1#L84
+func (s stream) ResetWithError(_ network.StreamErrorCode) error {
+ s.Stream.CancelRead(reset)
+ s.Stream.CancelWrite(reset)
+ return nil
+}
+
+func (s stream) Close() error {
+ s.Stream.CancelRead(reset)
+ return s.Stream.Close()
+}
+
+func (s stream) CloseRead() error {
+ s.Stream.CancelRead(reset)
+ return nil
+}
+
+func (s stream) CloseWrite() error {
+ return s.Stream.Close()
+}
diff --git a/p2p/transport/webtransport/transport.go b/p2p/transport/webtransport/transport.go
new file mode 100644
index 0000000000..c18ace1606
--- /dev/null
+++ b/p2p/transport/webtransport/transport.go
@@ -0,0 +1,441 @@
+package libp2pwebtransport
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/connmgr"
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/pnet"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+
+ "github.com/benbjohnson/clock"
+ logging "github.com/libp2p/go-libp2p/gologshim"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multihash"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ "github.com/quic-go/webtransport-go"
+)
+
+var log = logging.Logger("webtransport")
+
+const webtransportHTTPEndpoint = "/.well-known/libp2p-webtransport"
+
+const errorCodeConnectionGating = 0x47415445 // GATE in ASCII
+
+const certValidity = 14 * 24 * time.Hour
+
+type Option func(*transport) error
+
+func WithClock(cl clock.Clock) Option {
+ return func(t *transport) error {
+ t.clock = cl
+ return nil
+ }
+}
+
+// WithTLSClientConfig sets a custom tls.Config used for dialing.
+// This option is most useful for setting a custom tls.Config.RootCAs certificate pool.
+// When dialing a multiaddr that contains a /certhash component, this library will set InsecureSkipVerify and
+// overwrite the VerifyPeerCertificate callback.
+func WithTLSClientConfig(c *tls.Config) Option {
+ return func(t *transport) error {
+ t.tlsClientConf = c
+ return nil
+ }
+}
+
+func WithHandshakeTimeout(d time.Duration) Option {
+ return func(t *transport) error {
+ t.handshakeTimeout = d
+ return nil
+ }
+}
+
+type transport struct {
+ privKey ic.PrivKey
+ pid peer.ID
+ clock clock.Clock
+
+ connManager *quicreuse.ConnManager
+ rcmgr network.ResourceManager
+ gater connmgr.ConnectionGater
+
+ listenOnce sync.Once
+ listenOnceErr error
+ certManager *certManager
+ hasCertManager atomic.Bool // set to true once the certManager is initialized
+ staticTLSConf *tls.Config
+ tlsClientConf *tls.Config
+
+ noise *noise.Transport
+
+ connMx sync.Mutex
+ conns map[*quic.Conn]*conn // quic connection -> *conn
+ handshakeTimeout time.Duration
+}
+
+var _ tpt.Transport = &transport{}
+var _ tpt.Resolver = &transport{}
+var _ io.Closer = &transport{}
+
+func New(key ic.PrivKey, psk pnet.PSK, connManager *quicreuse.ConnManager, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, opts ...Option) (tpt.Transport, error) {
+ if len(psk) > 0 {
+ log.Error("WebTransport doesn't support private networks yet.")
+ return nil, errors.New("WebTransport doesn't support private networks yet")
+ }
+ if rcmgr == nil {
+ rcmgr = &network.NullResourceManager{}
+ }
+ id, err := peer.IDFromPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ t := &transport{
+ pid: id,
+ privKey: key,
+ rcmgr: rcmgr,
+ gater: gater,
+ clock: clock.New(),
+ connManager: connManager,
+ conns: map[*quic.Conn]*conn{},
+ handshakeTimeout: handshakeTimeout,
+ }
+ for _, opt := range opts {
+ if err := opt(t); err != nil {
+ return nil, err
+ }
+ }
+ n, err := noise.New(noise.ID, key, nil)
+ if err != nil {
+ return nil, err
+ }
+ t.noise = n
+ return t, nil
+}
+
+func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) {
+ scope, err := t.rcmgr.OpenConnection(network.DirOutbound, false, raddr)
+ if err != nil {
+ log.Debug("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err)
+ return nil, err
+ }
+
+ c, err := t.dialWithScope(ctx, raddr, p, scope)
+ if err != nil {
+ scope.Done()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (tpt.CapableConn, error) {
+ _, addr, err := manet.DialArgs(raddr)
+ if err != nil {
+ return nil, err
+ }
+ url := fmt.Sprintf("https://%s%s?type=noise", addr, webtransportHTTPEndpoint)
+ certHashes, err := extractCertHashes(raddr)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(certHashes) == 0 {
+ return nil, errors.New("can't dial webtransport without certhashes")
+ }
+
+ sni, _ := extractSNI(raddr)
+
+ if err := scope.SetPeer(p); err != nil {
+ log.Debug("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err)
+ return nil, err
+ }
+
+ maddr, _ := ma.SplitFunc(raddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBTRANSPORT })
+ sess, qconn, err := t.dial(ctx, maddr, url, sni, certHashes)
+ if err != nil {
+ return nil, err
+ }
+ sconn, err := t.upgrade(ctx, sess, p, certHashes)
+ if err != nil {
+ sess.CloseWithError(1, "")
+ qconn.CloseWithError(1, "")
+ return nil, err
+ }
+ if t.gater != nil && !t.gater.InterceptSecured(network.DirOutbound, p, sconn) {
+ sess.CloseWithError(errorCodeConnectionGating, "")
+ qconn.CloseWithError(errorCodeConnectionGating, "")
+ return nil, fmt.Errorf("secured connection gated")
+ }
+ conn := newConn(t, sess, sconn, scope, qconn)
+ t.addConn(qconn, conn)
+ return conn, nil
+}
+
+func (t *transport) dial(ctx context.Context, addr ma.Multiaddr, url, sni string, certHashes []multihash.DecodedMultihash) (*webtransport.Session, *quic.Conn, error) {
+ var tlsConf *tls.Config
+ if t.tlsClientConf != nil {
+ tlsConf = t.tlsClientConf.Clone()
+ } else {
+ tlsConf = &tls.Config{}
+ }
+ tlsConf.NextProtos = append(tlsConf.NextProtos, http3.NextProtoH3)
+
+ if sni != "" {
+ tlsConf.ServerName = sni
+ }
+
+ if len(certHashes) > 0 {
+ // This is not insecure. We verify the certificate ourselves.
+ // See https://www.w3.org/TR/webtransport/#certificate-hashes.
+ tlsConf.InsecureSkipVerify = true
+ tlsConf.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
+ return verifyRawCerts(rawCerts, certHashes)
+ }
+ }
+ ctx = quicreuse.WithAssociation(ctx, t)
+ conn, err := t.connManager.DialQUIC(ctx, addr, tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, nil, err
+ }
+ dialer := webtransport.Dialer{
+ DialAddr: func(_ context.Context, _ string, _ *tls.Config, _ *quic.Config) (*quic.Conn, error) {
+ return conn, nil
+ },
+ QUICConfig: t.connManager.ClientConfig().Clone(),
+ }
+ rsp, sess, err := dialer.Dial(ctx, url, nil)
+ if err != nil {
+ conn.CloseWithError(1, "")
+ return nil, nil, err
+ }
+ if rsp.StatusCode < 200 || rsp.StatusCode > 299 {
+ conn.CloseWithError(1, "")
+ return nil, nil, fmt.Errorf("invalid response status code: %d", rsp.StatusCode)
+ }
+ return sess, conn, err
+}
+
+func (t *transport) upgrade(ctx context.Context, sess *webtransport.Session, p peer.ID, certHashes []multihash.DecodedMultihash) (*connSecurityMultiaddrs, error) {
+ local, err := toWebtransportMultiaddr(sess.LocalAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determining local addr: %w", err)
+ }
+ remote, err := toWebtransportMultiaddr(sess.RemoteAddr())
+ if err != nil {
+ return nil, fmt.Errorf("error determining remote addr: %w", err)
+ }
+
+ str, err := sess.OpenStreamSync(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer str.Close()
+
+ // Now run a Noise handshake (using early data) and get all the certificate hashes from the server.
+ // We will verify that the certhashes we used to dial is a subset of the certhashes we received from the server.
+ var verified bool
+ n, err := t.noise.WithSessionOptions(noise.EarlyData(newEarlyDataReceiver(func(b *pb.NoiseExtensions) error {
+ decodedCertHashes, err := decodeCertHashesFromProtobuf(b.WebtransportCerthashes)
+ if err != nil {
+ return err
+ }
+ for _, sent := range certHashes {
+ var found bool
+ for _, rcvd := range decodedCertHashes {
+ if sent.Code == rcvd.Code && bytes.Equal(sent.Digest, rcvd.Digest) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("missing cert hash: %v", sent)
+ }
+ }
+ verified = true
+ return nil
+ }), nil))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create Noise transport: %w", err)
+ }
+ c, err := n.SecureOutbound(ctx, webtransportStream{Stream: str, wsess: sess}, p)
+ if err != nil {
+ return nil, err
+ }
+ defer c.Close()
+ // The Noise handshake _should_ guarantee that our verification callback is called.
+ // Double-check just in case.
+ if !verified {
+ return nil, errors.New("didn't verify")
+ }
+ return &connSecurityMultiaddrs{
+ ConnSecurity: c,
+ ConnMultiaddrs: &connMultiaddrs{local: local, remote: remote},
+ }, nil
+}
+
+func decodeCertHashesFromProtobuf(b [][]byte) ([]multihash.DecodedMultihash, error) {
+ hashes := make([]multihash.DecodedMultihash, 0, len(b))
+ for _, h := range b {
+ dh, err := multihash.Decode(h)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode hash: %w", err)
+ }
+ hashes = append(hashes, *dh)
+ }
+ return hashes, nil
+}
+
+func (t *transport) CanDial(addr ma.Multiaddr) bool {
+ ok, _ := IsWebtransportMultiaddr(addr)
+ return ok
+}
+
+func (t *transport) Listen(laddr ma.Multiaddr) (tpt.Listener, error) {
+ isWebTransport, certhashCount := IsWebtransportMultiaddr(laddr)
+ if !isWebTransport {
+ return nil, fmt.Errorf("cannot listen on non-WebTransport addr: %s", laddr)
+ }
+ if certhashCount > 0 {
+ return nil, fmt.Errorf("cannot listen on a specific certhash non-WebTransport addr: %s", laddr)
+ }
+ if t.staticTLSConf == nil {
+ t.listenOnce.Do(func() {
+ t.certManager, t.listenOnceErr = newCertManager(t.privKey, t.clock)
+ t.hasCertManager.Store(true)
+ })
+ if t.listenOnceErr != nil {
+ return nil, t.listenOnceErr
+ }
+ } else {
+ return nil, errors.New("static TLS config not supported on WebTransport")
+ }
+ tlsConf := t.staticTLSConf.Clone()
+ if tlsConf == nil {
+ tlsConf = &tls.Config{GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) {
+ return t.certManager.GetConfig(), nil
+ }}
+ }
+ tlsConf.NextProtos = append(tlsConf.NextProtos, http3.NextProtoH3)
+
+ ln, err := t.connManager.ListenQUICAndAssociate(t, laddr, tlsConf, t.allowWindowIncrease)
+ if err != nil {
+ return nil, err
+ }
+ return newListener(ln, t, t.staticTLSConf != nil)
+}
+
+func (t *transport) Protocols() []int {
+ return []int{ma.P_WEBTRANSPORT}
+}
+
+func (t *transport) Proxy() bool {
+ return false
+}
+
+func (t *transport) Close() error {
+ t.listenOnce.Do(func() {})
+ if t.certManager != nil {
+ return t.certManager.Close()
+ }
+ return nil
+}
+
+func (t *transport) allowWindowIncrease(conn *quic.Conn, size uint64) bool {
+ t.connMx.Lock()
+ defer t.connMx.Unlock()
+
+ c, ok := t.conns[conn]
+ if !ok {
+ return false
+ }
+ return c.allowWindowIncrease(size)
+}
+
+func (t *transport) addConn(conn *quic.Conn, c *conn) {
+ t.connMx.Lock()
+ t.conns[conn] = c
+ t.connMx.Unlock()
+}
+
+func (t *transport) removeConn(conn *quic.Conn) {
+ t.connMx.Lock()
+ delete(t.conns, conn)
+ t.connMx.Unlock()
+}
+
+// extractSNI returns what the SNI should be for the given maddr. If there is an
+// SNI component in the multiaddr, then it will be returned and
+// foundSniComponent will be true. If there's no SNI component, but there is a
+// DNS-like component, then that will be returned for the sni and
+// foundSniComponent will be false (since we didn't find an actual sni component).
+func extractSNI(maddr ma.Multiaddr) (sni string, foundSniComponent bool) {
+ ma.ForEach(maddr, func(c ma.Component) bool {
+ switch c.Protocol().Code {
+ case ma.P_SNI:
+ sni = c.Value()
+ foundSniComponent = true
+ return false
+ case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR:
+ sni = c.Value()
+ // Keep going in case we find an `sni` component
+ return true
+ }
+ return true
+ })
+ return sni, foundSniComponent
+}
+
+// Resolve implements transport.Resolver
+func (t *transport) Resolve(_ context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) {
+ sni, foundSniComponent := extractSNI(maddr)
+
+ if foundSniComponent || sni == "" {
+ // The multiaddr already had an sni field, we can keep using it. Or we don't have any sni like thing
+ return []ma.Multiaddr{maddr}, nil
+ }
+
+ beforeQuicMA, afterIncludingQuicMA := ma.SplitFunc(maddr, func(c ma.Component) bool {
+ return c.Protocol().Code == ma.P_QUIC_V1
+ })
+ if len(afterIncludingQuicMA) == 0 {
+ return nil, fmt.Errorf("no quic component found in %s", maddr)
+ }
+ quicComponent, afterQuicMA := ma.SplitFirst(afterIncludingQuicMA)
+ if quicComponent == nil {
+ // Should not happen since we split on P_QUIC_V1 already
+ return nil, fmt.Errorf("no quic component found in %s", maddr)
+ }
+ sniComponent, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_SNI).Name, sni)
+ if err != nil {
+ return nil, err
+ }
+ result := beforeQuicMA.AppendComponent(quicComponent, sniComponent)
+ result = append(result, afterQuicMA...)
+ return []ma.Multiaddr{result}, nil
+}
+
+// AddCertHashes adds the current certificate hashes to a multiaddress.
+// If called before Listen, it's a no-op.
+func (t *transport) AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool) {
+ if !t.hasCertManager.Load() {
+ return m, false
+ }
+ return m.Encapsulate(t.certManager.AddrComponent()), true
+}
diff --git a/p2p/transport/webtransport/transport_test.go b/p2p/transport/webtransport/transport_test.go
new file mode 100644
index 0000000000..266e01d18f
--- /dev/null
+++ b/p2p/transport/webtransport/transport_test.go
@@ -0,0 +1,871 @@
+package libp2pwebtransport_test
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "testing/quick"
+ "time"
+
+ ic "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/network"
+ mocknetwork "github.com/libp2p/go-libp2p/core/network/mocks"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/test"
+ tpt "github.com/libp2p/go-libp2p/core/transport"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+
+ "github.com/benbjohnson/clock"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "github.com/multiformats/go-multibase"
+ "github.com/multiformats/go-multihash"
+ "github.com/quic-go/quic-go"
+ "github.com/quic-go/quic-go/http3"
+ quicproxy "github.com/quic-go/quic-go/integrationtests/tools/proxy"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/mock/gomock"
+)
+
+const clockSkewAllowance = time.Hour
+const certValidity = 14 * 24 * time.Hour
+
+func newIdentity(t *testing.T) (peer.ID, ic.PrivKey) {
+ key, _, err := ic.GenerateEd25519Key(rand.Reader)
+ require.NoError(t, err)
+ id, err := peer.IDFromPrivateKey(key)
+ require.NoError(t, err)
+ return id, key
+}
+
+func randomMultihash(t *testing.T) string {
+ t.Helper()
+ b := make([]byte, 16)
+ rand.Read(b)
+ h, err := multihash.Encode(b, multihash.KECCAK_224)
+ require.NoError(t, err)
+ s, err := multibase.Encode(multibase.Base32hex, h)
+ require.NoError(t, err)
+ return s
+}
+
+func extractCertHashes(addr ma.Multiaddr) []string {
+ var certHashesStr []string
+ ma.ForEach(addr, func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ certHashesStr = append(certHashesStr, c.Value())
+ }
+ return true
+ })
+ return certHashesStr
+}
+
+func stripCertHashes(addr ma.Multiaddr) ma.Multiaddr {
+ for {
+ _, err := addr.ValueForProtocol(ma.P_CERTHASH)
+ if err != nil {
+ return addr
+ }
+ addr, _ = ma.SplitLast(addr)
+ }
+}
+
+// create a /certhash multiaddr component using the SHA256 of foobar
+func getCerthashComponent(t *testing.T, b []byte) *ma.Component {
+ t.Helper()
+ h := sha256.Sum256(b)
+ mh, err := multihash.Encode(h[:], multihash.SHA2_256)
+ require.NoError(t, err)
+ certStr, err := multibase.Encode(multibase.Base58BTC, mh)
+ require.NoError(t, err)
+ ha, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_CERTHASH).Name, certStr)
+ require.NoError(t, err)
+ return ha
+}
+
+func newConnManager(t *testing.T, opts ...quicreuse.Option) *quicreuse.ConnManager {
+ t.Helper()
+ cm, err := quicreuse.NewConnManager(quic.StatelessResetKey{}, quic.TokenGeneratorKey{}, opts...)
+ require.NoError(t, err)
+ t.Cleanup(func() { cm.Close() })
+ return cm
+}
+
+func TestTransport(t *testing.T) {
+ serverID, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, nil)
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ addrChan := make(chan ma.Multiaddr)
+ go func() {
+ _, clientKey := newIdentity(t)
+ tr2, err := libp2pwebtransport.New(clientKey, nil, newConnManager(t), nil, nil)
+ require.NoError(t, err)
+ defer tr2.(io.Closer).Close()
+
+ conn, err := tr2.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ str, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ _, err = str.Write([]byte("foobar"))
+ require.NoError(t, err)
+ require.NoError(t, str.Close())
+
+ // check RemoteMultiaddr
+ _, addr, err := manet.DialArgs(ln.Multiaddr())
+ require.NoError(t, err)
+ _, port, err := net.SplitHostPort(addr)
+ require.NoError(t, err)
+ require.Equal(t, fmt.Sprintf("/ip4/127.0.0.1/udp/%s/quic-v1/webtransport", port), conn.RemoteMultiaddr().String())
+ addrChan <- conn.RemoteMultiaddr()
+ }()
+
+ conn, err := ln.Accept()
+ require.NoError(t, err)
+ require.False(t, conn.IsClosed())
+ str, err := conn.AcceptStream()
+ require.NoError(t, err)
+ data, err := io.ReadAll(str)
+ require.NoError(t, err)
+ require.Equal(t, "foobar", string(data))
+ require.Equal(t, (<-addrChan).String(), conn.LocalMultiaddr().String())
+ require.NoError(t, conn.Close())
+ require.True(t, conn.IsClosed())
+}
+
+func TestHashVerification(t *testing.T) {
+ serverID, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ _, err := ln.Accept()
+ require.Error(t, err)
+ }()
+
+ _, clientKey := newIdentity(t)
+ tr2, err := libp2pwebtransport.New(clientKey, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr2.(io.Closer).Close()
+
+ foobarHash := getCerthashComponent(t, []byte("foobar"))
+
+ t.Run("fails using only a wrong hash", func(t *testing.T) {
+ // replace the certificate hash in the multiaddr with a fake hash
+ addr := stripCertHashes(ln.Multiaddr()).Encapsulate(foobarHash)
+ _, err := tr2.Dial(context.Background(), addr, serverID)
+ require.Error(t, err)
+ var trErr *quic.TransportError
+ require.ErrorAs(t, err, &trErr)
+ require.Equal(t, quic.TransportErrorCode(0x12a), trErr.ErrorCode)
+ var errMismatchHash libp2pwebtransport.ErrCertHashMismatch
+ require.ErrorAs(t, err, &errMismatchHash)
+
+ e := sha256.Sum256([]byte("foobar"))
+ require.EqualValues(t, e[:], errMismatchHash.Actual[0])
+ })
+
+ t.Run("fails when adding a wrong hash", func(t *testing.T) {
+ _, err := tr2.Dial(context.Background(), ln.Multiaddr().Encapsulate(foobarHash), serverID)
+ require.Error(t, err)
+ })
+
+ require.NoError(t, ln.Close())
+ <-done
+}
+
+func TestCanDial(t *testing.T) {
+ valid := []ma.Multiaddr{
+ ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/" + randomMultihash(t)),
+ ma.StringCast("/ip6/b16b:8255:efc6:9cd5:1a54:ee86:2d7a:c2e6/udp/1234/quic-v1/webtransport/certhash/" + randomMultihash(t)),
+ ma.StringCast(fmt.Sprintf("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport/certhash/%s/certhash/%s/certhash/%s", randomMultihash(t), randomMultihash(t), randomMultihash(t))),
+ ma.StringCast("/ip4/127.0.0.1/udp/1234/quic-v1/webtransport"), // no certificate hash
+ }
+
+ invalid := []ma.Multiaddr{
+ ma.StringCast("/ip4/127.0.0.1/udp/1234"), // missing webtransport
+ ma.StringCast("/ip4/127.0.0.1/udp/1234/webtransport"), // missing quic
+ ma.StringCast("/ip4/127.0.0.1/tcp/1234/webtransport"), // WebTransport over TCP? Is this a joke?
+ }
+
+ _, key := newIdentity(t)
+ tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+
+ for _, addr := range valid {
+ require.Truef(t, tr.CanDial(addr), "expected to be able to dial %s", addr)
+ }
+ for _, addr := range invalid {
+ require.Falsef(t, tr.CanDial(addr), "expected to not be able to dial %s", addr)
+ }
+}
+
+func TestListenAddrValidity(t *testing.T) {
+ valid := []ma.Multiaddr{
+ ma.StringCast("/ip6/::/udp/0/quic-v1/webtransport/"),
+ }
+
+ invalid := []ma.Multiaddr{
+ ma.StringCast("/ip4/127.0.0.1/udp/0"), // missing webtransport
+ ma.StringCast("/ip4/127.0.0.1/udp/0/webtransport"), // missing quic
+ ma.StringCast("/ip4/127.0.0.1/tcp/0/webtransport"), // WebTransport over TCP? Is this a joke?
+ ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport/certhash/" + randomMultihash(t)), // We can't listen on a specific certhash
+ }
+
+ _, key := newIdentity(t)
+ tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+
+ for _, addr := range valid {
+ ln, err := tr.Listen(addr)
+ require.NoErrorf(t, err, "expected to be able to listen on %s", addr)
+ ln.Close()
+ }
+ for _, addr := range invalid {
+ _, err := tr.Listen(addr)
+ require.Errorf(t, err, "expected to not be able to listen on %s", addr)
+ }
+}
+
+func TestListenerAddrs(t *testing.T) {
+ _, key := newIdentity(t)
+ tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+
+ ln1, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ ln2, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ hashes1 := extractCertHashes(ln1.Multiaddr())
+ require.Len(t, hashes1, 2)
+ hashes2 := extractCertHashes(ln2.Multiaddr())
+ require.Equal(t, hashes1, hashes2)
+}
+
+func TestResourceManagerDialing(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+
+ addr := ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport")
+ p := peer.ID("foobar")
+
+ _, key := newIdentity(t)
+ tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, rcmgr)
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ l, err := tr.Listen(addr)
+ require.NoError(t, err)
+
+ addr = l.Multiaddr()
+
+ scope := mocknetwork.NewMockConnManagementScope(ctrl)
+ rcmgr.EXPECT().OpenConnection(network.DirOutbound, false, addr).Return(scope, nil)
+ scope.EXPECT().SetPeer(p).Return(errors.New("denied"))
+ scope.EXPECT().Done()
+
+ _, err = tr.Dial(context.Background(), addr, p)
+ require.EqualError(t, err, "denied")
+}
+
+func TestResourceManagerListening(t *testing.T) {
+ clientID, key := newIdentity(t)
+ cl, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer cl.(io.Closer).Close()
+
+ t.Run("blocking the connection", func(t *testing.T) {
+ serverID, key := newIdentity(t)
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, rcmgr)
+ require.NoError(t, err)
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ rcmgr.EXPECT().OpenConnection(network.DirInbound, false, gomock.Any()).DoAndReturn(func(_ network.Direction, _ bool, addr ma.Multiaddr) (network.ConnManagementScope, error) {
+ _, err := addr.ValueForProtocol(ma.P_WEBTRANSPORT)
+ require.NoError(t, err, "expected a WebTransport multiaddr")
+ _, addrStr, err := manet.DialArgs(addr)
+ require.NoError(t, err)
+ host, _, err := net.SplitHostPort(addrStr)
+ require.NoError(t, err)
+ require.Equal(t, "127.0.0.1", host)
+ return nil, errors.New("denied")
+ })
+
+ _, err = cl.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.EqualError(t, err, "received status 503")
+ })
+
+ t.Run("blocking the peer", func(t *testing.T) {
+ serverID, key := newIdentity(t)
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ rcmgr := mocknetwork.NewMockResourceManager(ctrl)
+ tr, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, rcmgr)
+ require.NoError(t, err)
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ serverDone := make(chan struct{})
+ scope := mocknetwork.NewMockConnManagementScope(ctrl)
+ rcmgr.EXPECT().OpenConnection(network.DirInbound, false, gomock.Any()).Return(scope, nil)
+ scope.EXPECT().SetPeer(clientID).Return(errors.New("denied"))
+ scope.EXPECT().Done().Do(func() { close(serverDone) })
+
+ // The handshake will complete, but the server will immediately close the connection.
+ conn, err := cl.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ clientDone := make(chan struct{})
+ go func() {
+ defer close(clientDone)
+ _, err = conn.AcceptStream()
+ require.Error(t, err)
+ }()
+ select {
+ case <-clientDone:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout")
+ }
+ select {
+ case <-serverDone:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout")
+ }
+ })
+}
+
+// TODO: unify somehow. We do the same in libp2pquic.
+//go:generate sh -c "go run go.uber.org/mock/mockgen -package libp2pwebtransport_test -destination mock_connection_gater_test.go github.com/libp2p/go-libp2p/core/connmgr ConnectionGater && go run golang.org/x/tools/cmd/goimports -w mock_connection_gater_test.go"
+
+func TestConnectionGaterDialing(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ serverID, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ connGater.EXPECT().InterceptSecured(network.DirOutbound, serverID, gomock.Any()).Do(func(_ network.Direction, _ peer.ID, addrs network.ConnMultiaddrs) {
+ require.Equal(t, stripCertHashes(ln.Multiaddr()).String(), addrs.RemoteMultiaddr().String())
+ })
+ _, key := newIdentity(t)
+ cl, err := libp2pwebtransport.New(key, nil, newConnManager(t), connGater, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer cl.(io.Closer).Close()
+ _, err = cl.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.EqualError(t, err, "secured connection gated")
+}
+
+func TestConnectionGaterInterceptAccept(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ serverID, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), connGater, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Do(func(addrs network.ConnMultiaddrs) {
+ require.Equal(t, stripCertHashes(ln.Multiaddr()).String(), addrs.LocalMultiaddr().String())
+ require.NotEqual(t, stripCertHashes(ln.Multiaddr()).String(), addrs.RemoteMultiaddr().String())
+ })
+
+ _, key := newIdentity(t)
+ cl, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer cl.(io.Closer).Close()
+ _, err = cl.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.EqualError(t, err, "received status 403")
+}
+
+func TestConnectionGaterInterceptSecured(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ defer ctrl.Finish()
+ connGater := NewMockConnectionGater(ctrl)
+
+ serverID, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), connGater, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ clientID, key := newIdentity(t)
+ cl, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer cl.(io.Closer).Close()
+
+ connGater.EXPECT().InterceptAccept(gomock.Any()).Return(true)
+ connGater.EXPECT().InterceptSecured(network.DirInbound, clientID, gomock.Any()).Do(func(_ network.Direction, _ peer.ID, addrs network.ConnMultiaddrs) {
+ require.Equal(t, stripCertHashes(ln.Multiaddr()).String(), addrs.LocalMultiaddr().String())
+ require.NotEqual(t, stripCertHashes(ln.Multiaddr()).String(), addrs.RemoteMultiaddr().String())
+ })
+ // The handshake will complete, but the server will immediately close the connection.
+ conn, err := cl.Dial(context.Background(), ln.Multiaddr(), serverID)
+ require.NoError(t, err)
+ defer conn.Close()
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ _, err = conn.AcceptStream()
+ require.Error(t, err)
+ }()
+ select {
+ case <-done:
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout")
+ }
+}
+
+func TestAcceptQueueFilledUp(t *testing.T) {
+ serverID, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ newConn := func() (tpt.CapableConn, error) {
+ t.Helper()
+ _, key := newIdentity(t)
+ cl, err := libp2pwebtransport.New(key, nil, newConnManager(t), nil, &network.NullResourceManager{})
+ require.NoError(t, err)
+ defer cl.(io.Closer).Close()
+ return cl.Dial(context.Background(), ln.Multiaddr(), serverID)
+ }
+
+ const num = 16 + 1 // one more than the accept queue capacity
+ // Dial one more connection than the accept queue can hold.
+ errChan := make(chan error, num)
+ for i := 0; i < num; i++ {
+ go func() {
+ conn, err := newConn()
+ if err != nil {
+ errChan <- err
+ return
+ }
+ _, err = conn.AcceptStream()
+ errChan <- err
+ }()
+ }
+
+ // Since the handshakes complete asynchronously, we won't know _which_ one is rejected,
+ // so the only thing we can test for is that exactly one connection attempt is rejected.
+ select {
+ case <-errChan:
+ case <-time.After(time.Second):
+ t.Fatal("expected one connection to be rejected")
+ }
+ select {
+ case <-errChan:
+ t.Fatal("only expected one connection to be rejected")
+ case <-time.After(100 * time.Millisecond):
+ }
+
+ // test shutdown
+ require.NoError(t, ln.Close())
+ var count int
+ timer := time.NewTimer(time.Second)
+ defer timer.Stop()
+ for i := 0; i < 16; i++ {
+ select {
+ case <-errChan:
+ count++
+ if count == 16 {
+ return
+ }
+ case <-timer.C:
+ t.Fatal("shutdown failed")
+ }
+ }
+}
+
+type reportingRcmgr struct {
+ network.NullResourceManager
+ report chan<- int
+}
+
+func (m *reportingRcmgr) OpenConnection(_ network.Direction, _ bool, _ ma.Multiaddr) (network.ConnManagementScope, error) {
+ return &reportingScope{report: m.report}, nil
+}
+
+type reportingScope struct {
+ network.NullScope
+ report chan<- int
+}
+
+func (s *reportingScope) ReserveMemory(size int, _ uint8) error {
+ s.report <- size
+ return nil
+}
+
+func newUDPConnLocalhost(t testing.TB) *net.UDPConn {
+ t.Helper()
+ conn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 0})
+ require.NoError(t, err)
+ t.Cleanup(func() { conn.Close() })
+ return conn
+}
+
+func TestFlowControlWindowIncrease(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("this test is flaky on Windows")
+ }
+
+ rtt := 10 * time.Millisecond
+ timeout := 5 * time.Second
+
+ if os.Getenv("CI") != "" {
+ rtt = 40 * time.Millisecond
+ timeout = 15 * time.Second
+ }
+
+ serverID, serverKey := newIdentity(t)
+ serverWindowIncreases := make(chan int, 100)
+ serverRcmgr := &reportingRcmgr{report: serverWindowIncreases}
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, serverRcmgr)
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ go func() {
+ conn, err := ln.Accept()
+ require.NoError(t, err)
+ str, err := conn.AcceptStream()
+ require.NoError(t, err)
+ _, err = io.CopyBuffer(str, str, make([]byte, 2<<10))
+ require.NoError(t, err)
+ str.CloseWrite()
+ }()
+
+ proxy := quicproxy.Proxy{
+ Conn: newUDPConnLocalhost(t),
+ ServerAddr: ln.Addr().(*net.UDPAddr),
+ DelayPacket: func(quicproxy.Direction, net.Addr, net.Addr, []byte) time.Duration { return rtt / 2 },
+ }
+ require.NoError(t, proxy.Start())
+ defer proxy.Close()
+
+ _, clientKey := newIdentity(t)
+ clientWindowIncreases := make(chan int, 100)
+ clientRcmgr := &reportingRcmgr{report: clientWindowIncreases}
+ tr2, err := libp2pwebtransport.New(clientKey, nil, newConnManager(t), nil, clientRcmgr)
+ require.NoError(t, err)
+ defer tr2.(io.Closer).Close()
+
+ var addr ma.Multiaddr
+ for _, comp := range ln.Multiaddr() {
+ if _, err := comp.ValueForProtocol(ma.P_UDP); err == nil {
+ addr = addr.Encapsulate(ma.StringCast(fmt.Sprintf("/udp/%d", proxy.LocalAddr().(*net.UDPAddr).Port)))
+ continue
+ }
+ addr = append(addr, comp)
+ }
+
+ conn, err := tr2.Dial(context.Background(), addr, serverID)
+ require.NoError(t, err)
+ str, err := conn.OpenStream(context.Background())
+ require.NoError(t, err)
+ var increasesDone atomic.Bool
+ go func() {
+ for {
+ _, err := str.Write(bytes.Repeat([]byte{0x42}, 1<<10))
+ require.NoError(t, err)
+ if increasesDone.Load() {
+ str.CloseWrite()
+ return
+ }
+ }
+ }()
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ _, err := io.ReadAll(str)
+ require.NoError(t, err)
+ }()
+
+ var numServerIncreases, numClientIncreases int
+ timer := time.NewTimer(timeout)
+ defer timer.Stop()
+ for {
+ select {
+ case <-serverWindowIncreases:
+ numServerIncreases++
+ case <-clientWindowIncreases:
+ numClientIncreases++
+ case <-timer.C:
+ t.Fatalf("didn't receive enough window increases (client: %d, server: %d)", numClientIncreases, numServerIncreases)
+ }
+ if numClientIncreases >= 1 && numServerIncreases >= 1 {
+ increasesDone.Store(true)
+ break
+ }
+ }
+
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ t.Fatal("timeout")
+ }
+}
+
+var errTimeout = errors.New("timeout")
+
+func serverSendsBackValidCert(t *testing.T, timeSinceUnixEpoch time.Duration, keySeed int64, randomClientSkew time.Duration) error {
+ if timeSinceUnixEpoch < 0 {
+ timeSinceUnixEpoch = -timeSinceUnixEpoch
+ }
+
+ // Bound this to 100 years
+ timeSinceUnixEpoch = timeSinceUnixEpoch % (time.Hour * 24 * 365 * 100)
+ // Start a bit further in the future to avoid edge cases around epoch
+ timeSinceUnixEpoch += time.Hour * 24 * 365
+ start := time.UnixMilli(timeSinceUnixEpoch.Milliseconds())
+
+ randomClientSkew = randomClientSkew % clockSkewAllowance
+
+ cl := clock.NewMock()
+ cl.Set(start)
+
+ priv, _, err := test.SeededTestKeyPair(ic.Ed25519, 256, keySeed)
+ require.NoError(t, err)
+ tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl))
+ require.NoError(t, err)
+ l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer l.Close()
+
+ conn, err := quic.DialAddr(context.Background(), l.Addr().String(), &tls.Config{
+ NextProtos: []string{http3.NextProtoH3},
+ InsecureSkipVerify: true,
+ VerifyPeerCertificate: func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
+ for _, c := range rawCerts {
+ cert, err := x509.ParseCertificate(c)
+ if err != nil {
+ return err
+ }
+
+ for _, clientSkew := range []time.Duration{randomClientSkew, -clockSkewAllowance, clockSkewAllowance} {
+ clientTime := cl.Now().Add(clientSkew)
+ if clientTime.After(cert.NotAfter) || clientTime.Before(cert.NotBefore) {
+ return fmt.Errorf("Times are not valid: server_now=%v client_now=%v certstart=%v certend=%v", cl.Now().UTC(), clientTime.UTC(), cert.NotBefore.UTC(), cert.NotAfter.UTC())
+ }
+ }
+
+ }
+ return nil
+ },
+ }, &quic.Config{MaxIdleTimeout: time.Second})
+
+ if err != nil {
+ if _, ok := err.(*quic.IdleTimeoutError); ok {
+ return errTimeout
+ }
+ return err
+ }
+ defer conn.CloseWithError(0, "")
+
+ return nil
+}
+
+func TestServerSendsBackValidCert(t *testing.T) {
+ var maxTimeoutErrors = 10
+ require.NoError(t, quick.Check(func(timeSinceUnixEpoch time.Duration, keySeed int64, randomClientSkew time.Duration) bool {
+ err := serverSendsBackValidCert(t, timeSinceUnixEpoch, keySeed, randomClientSkew)
+ if err == errTimeout {
+ maxTimeoutErrors -= 1
+ if maxTimeoutErrors <= 0 {
+ fmt.Println("Too many timeout errors")
+ return false
+ }
+ // Sporadic timeout errors on macOS
+ return true
+ } else if err != nil {
+ fmt.Println("Err:", err)
+ return false
+ }
+
+ return true
+ }, nil))
+}
+
+func TestServerRotatesCertCorrectly(t *testing.T) {
+ require.NoError(t, quick.Check(func(timeSinceUnixEpoch time.Duration, keySeed int64) bool {
+ if timeSinceUnixEpoch < 0 {
+ timeSinceUnixEpoch = -timeSinceUnixEpoch
+ }
+
+ // Bound this to 100 years
+ timeSinceUnixEpoch = timeSinceUnixEpoch % (time.Hour * 24 * 365 * 100)
+ // Start a bit further in the future to avoid edge cases around epoch
+ timeSinceUnixEpoch += time.Hour * 24 * 365
+ start := time.UnixMilli(timeSinceUnixEpoch.Milliseconds())
+
+ cl := clock.NewMock()
+ cl.Set(start)
+
+ priv, _, err := test.SeededTestKeyPair(ic.Ed25519, 256, keySeed)
+ if err != nil {
+ return false
+ }
+ tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl))
+ if err != nil {
+ return false
+ }
+
+ l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ if err != nil {
+ return false
+ }
+ certhashes := extractCertHashes(l.Multiaddr())
+ l.Close()
+
+ // These two certificates together are valid for at most certValidity - (4*clockSkewAllowance)
+ cl.Add(certValidity - (4 * clockSkewAllowance) - time.Second)
+ tr, err = libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl))
+ if err != nil {
+ return false
+ }
+
+ l, err = tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ if err != nil {
+ return false
+ }
+ defer l.Close()
+
+ var found bool
+ ma.ForEach(l.Multiaddr(), func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ for _, prevCerthash := range certhashes {
+ if c.Value() == prevCerthash {
+ found = true
+ return false
+ }
+ }
+ }
+ return true
+ })
+
+ return found
+
+ }, nil))
+}
+
+func TestServerRotatesCertCorrectlyAfterSteps(t *testing.T) {
+ cl := clock.NewMock()
+ // Move one year ahead to avoid edge cases around epoch
+ cl.Add(time.Hour * 24 * 365)
+
+ priv, _, err := test.RandTestKeyPair(ic.Ed25519, 256)
+ require.NoError(t, err)
+ tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl))
+ require.NoError(t, err)
+
+ l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+
+ certhashes := extractCertHashes(l.Multiaddr())
+ l.Close()
+
+ // Traverse various time boundaries and make sure we always keep a common certhash.
+ // e.g. certhash/A/certhash/B ... -> ... certhash/B/certhash/C ... -> ... certhash/C/certhash/D
+ for i := 0; i < 200; i++ {
+ cl.Add(24 * time.Hour)
+ tr, err := libp2pwebtransport.New(priv, nil, newConnManager(t), nil, &network.NullResourceManager{}, libp2pwebtransport.WithClock(cl))
+ require.NoError(t, err)
+ l, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+
+ var found bool
+ ma.ForEach(l.Multiaddr(), func(c ma.Component) bool {
+ if c.Protocol().Code == ma.P_CERTHASH {
+ for _, prevCerthash := range certhashes {
+ if prevCerthash == c.Value() {
+ found = true
+ return false
+ }
+ }
+ }
+ return true
+ })
+ certhashes = extractCertHashes(l.Multiaddr())
+ l.Close()
+
+ require.True(t, found, "Failed after hour: %v", i)
+ }
+}
+
+func TestH3ConnClosed(t *testing.T) {
+ _, serverKey := newIdentity(t)
+ tr, err := libp2pwebtransport.New(serverKey, nil, newConnManager(t), nil, nil, libp2pwebtransport.WithHandshakeTimeout(1*time.Second))
+ require.NoError(t, err)
+ defer tr.(io.Closer).Close()
+ ln, err := tr.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1/webtransport"))
+ require.NoError(t, err)
+ defer ln.Close()
+
+ p, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+ conn, err := quic.Dial(context.Background(), p, ln.Addr(), &tls.Config{
+ InsecureSkipVerify: true,
+ NextProtos: []string{http3.NextProtoH3},
+ }, nil)
+ require.NoError(t, err)
+ rt := &http3.Transport{}
+ rt.NewClientConn(conn)
+ require.Eventually(t, func() bool {
+ c := http.Client{
+ Transport: rt,
+ Timeout: 1 * time.Second,
+ }
+ resp, err := c.Get(fmt.Sprintf("https://%s", ln.Addr().String()))
+ if err != nil {
+ return true
+ }
+ resp.Body.Close()
+ return false
+ }, 10*time.Second, 1*time.Second)
+}
diff --git a/package.json b/package.json
deleted file mode 100644
index f1b93a680f..0000000000
--- a/package.json
+++ /dev/null
@@ -1,305 +0,0 @@
-{
- "author": "whyrusleeping",
- "bugs": {
- "url": "https://github.com/libp2p/go-libp2p"
- },
- "gx": {
- "dvcsimport": "github.com/libp2p/go-libp2p",
- "goversion": "1.5.2"
- },
- "gxDependencies": [
- {
- "hash": "QmcrrEpx3VMUbrbgVroH3YiYyUS5c4YAykzyPJWKspUYLa",
- "name": "go-semver",
- "version": "0.0.0"
- },
- {
- "hash": "QmWSvDKkcno2UyDg13rUBwWfhRsdj7uR3daAq57VoG5QeN",
- "name": "mdns",
- "version": "0.1.1"
- },
- {
- "hash": "QmWBug6eBS7AxRdCDVuSY5CnSit7cS2XnPFYJWqWDumhCG",
- "name": "go-msgio",
- "version": "0.0.3"
- },
- {
- "hash": "QmNiJuT8Ja3hMVpBHXv3Q6dwmperaQ6JjLtpMQgMCD7xvx",
- "name": "go-ipfs-util",
- "version": "1.2.7"
- },
- {
- "hash": "QmUusaX99BZoELh7dmPgirqRQ1FAmMnmnBn3oiqDFGBUSc",
- "name": "go-keyspace",
- "version": "1.0.0"
- },
- {
- "hash": "QmTnsezaB1wWNRHeHnYrm8K4d5i9wtyj3GsqjC3Rt5b5v5",
- "name": "go-multistream",
- "version": "0.3.6"
- },
- {
- "hash": "Qmf7HqcW7LtCi1W8y2bdx2eJpze74jkbKqpByxgXikdbLF",
- "name": "go-detect-race",
- "version": "1.0.1"
- },
- {
- "hash": "QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP",
- "name": "goprocess",
- "version": "1.0.0"
- },
- {
- "hash": "QmTG23dvpBCBjqQwyDxV8CQT6jmS4PSftNr1VqHhE3MLy7",
- "name": "go-log",
- "version": "1.4.1"
- },
- {
- "hash": "QmRK2LxanhK2gZq6k6R7vk5ZoYZk8ULSSTB7FzDsMUX6CB",
- "name": "go-multiaddr-net",
- "version": "1.5.7"
- },
- {
- "hash": "QmZyZDi491cCNTLfAhwcaDii2Kg4pwKRkhqQzURGDvY6ua",
- "name": "go-multihash",
- "version": "1.0.7"
- },
- {
- "hash": "QmSMZwvs3n4GBikZ7hKzT17c3bk65FmyZo2JqtJ16swqCv",
- "name": "multiaddr-filter",
- "version": "1.0.2"
- },
- {
- "hash": "QmaPHkZLbQQbvcyavn8q1GFHg6o6yeceyHFSJ3Pjf3p3TQ",
- "name": "go-crypto",
- "version": "0.0.0"
- },
- {
- "hash": "QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV",
- "name": "gogo-protobuf",
- "version": "0.0.0"
- },
- {
- "hash": "QmWWQ2Txc2c6tqjsBpzg5Ar652cHPGNsQQp2SejkNmkUMb",
- "name": "go-multiaddr",
- "version": "1.2.6"
- },
- {
- "hash": "QmeYJHEk8UjVVZ4XCRTZe6dFQrb8pGWD81LYCgeLp8CvMB",
- "name": "go-metrics",
- "version": "0.0.0"
- },
- {
- "hash": "QmYvsG72GsfLgUeSojXArjnU6L4Wmwk7wuAxtNLuyXcc1T",
- "name": "randbo",
- "version": "0.0.0"
- },
- {
- "hash": "QmeQW4ayVqi7Jjay1SrP2wYydsH9KwSrzQBnqyC25gPFnG",
- "name": "go-notifier",
- "version": "1.0.0"
- },
- {
- "hash": "QmWHgLqrghM9zw77nF6gdvT9ExQ2RB9pLxkd8sDHZf1rWb",
- "name": "go-temp-err-catcher",
- "version": "0.0.0"
- },
- {
- "hash": "QmVXXxPsnDY16szK4gPy1oz4qKd8HHshemX1miZR2frtJo",
- "name": "go-peerstream",
- "version": "2.1.5"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmTy17Jm1foTnvUS9JXRhLbRQ3XuC64jPTjUfpB4mHz2QM",
- "name": "mafmt",
- "version": "1.2.5"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmTd4Jgb4nbJq5uR55KJgGLyHWmM3dovS21D1HcwRneSLu",
- "name": "gorocheck",
- "version": "0.0.0"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmPDZJxtWGfcwLPazJxD4h3v3aDs43V7UNAVs3Jz1Wo7o4",
- "name": "go-libp2p-loggables",
- "version": "1.1.14"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmP47neqyP4NR9CKbjVogZ8U9Gybxfcfsa8HtPSPSxwiA8",
- "name": "go-libp2p-secio",
- "version": "1.2.7"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmdeiKhUy1TVGBaKxt7y1QmBDLBdisSrLJ1x58Eoj4PXUh",
- "name": "go-libp2p-peerstore",
- "version": "1.4.17"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmPUHzTLPZFYqv8WqcBTuMFYTgeom4uHHEaxzk7bd5GYZB",
- "name": "go-libp2p-transport",
- "version": "2.2.14"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmdxKHpkZCTV3C7xdE1iJdPfFm5LVvMPvirdFmKu1TimzY",
- "name": "go-tcp-transport",
- "version": "1.2.9"
- },
- {
- "author": "whyrusleeping",
- "hash": "Qmf2UAmRwDG4TvnkQpHZWPAzw7rpCYVhxmRXmYxXr5LD1g",
- "name": "go-maddr-filter",
- "version": "1.1.6"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN",
- "name": "go-libp2p-protocol",
- "version": "1.0.0"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmTGSre9j1otFgsr1opCUQDXTPSM6BTZnMWwPeA5nYJM7w",
- "name": "go-addr-util",
- "version": "1.2.7"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmUJzxQQ2kzwQubsMqBTr1NGDpLfh7pGA2E1oaJULcKDPq",
- "name": "go-testutil",
- "version": "1.2.1"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmaHsbK8b39AzQWEwDsysCutdJXyfa3k9oFh1cr6dfMhHT",
- "name": "go-libp2p-conn",
- "version": "1.7.6"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmXoz9o2PT3tEzf7hicegwex5UgVP54n3k82K7jrWFyN86",
- "name": "go-libp2p-net",
- "version": "2.0.7"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmVvu4bS5QLfS19ePkp5Wgzn2ZUma5oXTT9BgDFyQLxUZF",
- "name": "go-libp2p-metrics",
- "version": "2.0.6"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmYDNqBAMWVMHKndYR35Sd8PfEVWBiDmpHYkuRJTunJDeJ",
- "name": "go-libp2p-interface-conn",
- "version": "0.4.13"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmfZTdmunzKzAGJrSvXXQbQ5kLLUiEMX5vdwux7iXkdk7D",
- "name": "go-libp2p-host",
- "version": "2.1.7"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmRqfgh56f8CrqpwH7D2s6t8zQRsvPoftT3sp5Y6SUhNA3",
- "name": "go-libp2p-swarm",
- "version": "2.1.7"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmXtFH52dAPCq5i4iYjr1g8xVFVJD3fwKWWyNHjVB4sHRp",
- "name": "go-libp2p-nat",
- "version": "0.0.8"
- },
- {
- "author": "whyrusleeping",
- "hash": "Qmb6BsZf6Y3kxffXMNTubGPF1w1bkHtpvhfYbmnwP3NQyw",
- "name": "go-libp2p-netutil",
- "version": "0.3.11"
- },
- {
- "author": "whyrusleeping",
- "hash": "Qmc64U41EEB4nPG7wxjEqFwKJajS2f8kk5q2TvUrQf78Xu",
- "name": "go-libp2p-blankhost",
- "version": "0.2.7"
- },
- {
- "author": "whyrusleeping",
- "hash": "Qme1knMqwt1hKZbc1BmQFmnm9f36nyQGwXxPGVpVJ9rMK5",
- "name": "go-libp2p-crypto",
- "version": "1.6.2"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmNWCEvi7bPRcvqAV8AKLGVNoQdArWi7NJayka2SM4XtRe",
- "name": "go-smux-yamux",
- "version": "2.0.1"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmQvJiADDe7JR4m968MwXobTCCzUqQkP87aRHe29MEBGHV",
- "name": "go-logging",
- "version": "0.0.0"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmVniQJkdzLZaZwzwMdd3dJTvWiJ1DQEkreVy6hs6h7Vk5",
- "name": "go-smux-multistream",
- "version": "2.0.0"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmcJukH2sAFjY3HdBKq35WDzWoL3UUu2gt9wdfqZTUyM74",
- "name": "go-libp2p-peer",
- "version": "2.3.2"
- },
- {
- "author": "vyzo",
- "hash": "QmZRbCo2gw7ghw5m7L77a8FvvQTVr62J4hmy8ozpdq7dHF",
- "name": "go-libp2p-circuit",
- "version": "2.0.12"
- },
- {
- "author": "lgierth",
- "hash": "QmQMRYmPn77CKRFf4YFjX3M5e6uw6DFAgsQffCX6mwZ4mA",
- "name": "go-multiaddr-dns",
- "version": "0.2.2"
- },
- {
- "author": "why",
- "hash": "QmfQNieWBPwmnUjXWPZbjJPzhNwFFabTb5RQ79dyVWGujQ",
- "name": "go-libp2p-interface-connmgr",
- "version": "0.0.8"
- },
- {
- "author": "whyrusleeping",
- "hash": "QmenmFuirGzv8S1R3DyvbZ6tFmQapkGeDCebgYzni1Ntn3",
- "name": "go-smux-multiplex",
- "version": "3.0.6"
- },
- {
- "author": "multiformats",
- "hash": "QmRDePEiL4Yupq5EkcK3L3ko3iMgYaqUdLu7xc1kqs7dnV",
- "name": "go-multicodec",
- "version": "0.1.5"
- },
- {
- "author": "satori",
- "hash": "QmcBWojPoNh4qm7zvv4qiepvCnnc7ALS9qcp7TNwwxT1gT",
- "name": "go.uuid",
- "version": "1.1.0"
- }
- ],
- "gxVersion": "0.4.0",
- "language": "go",
- "license": "MIT",
- "name": "go-libp2p",
- "releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
- "version": "5.0.17"
-}
-
diff --git a/proto_test.go b/proto_test.go
new file mode 100644
index 0000000000..4c81749b55
--- /dev/null
+++ b/proto_test.go
@@ -0,0 +1,38 @@
+package libp2p_test
+
+import (
+ "testing"
+
+ // Import all protobuf packages to ensure their `init` functions run.
+ // This may not be strictly necessary if they are imported in the `libp2p` package, but
+ // we do it here in case the imports in non-test files change.
+ _ "github.com/libp2p/go-libp2p/core/crypto/pb"
+ _ "github.com/libp2p/go-libp2p/core/peer/pb"
+ _ "github.com/libp2p/go-libp2p/core/record/pb"
+ _ "github.com/libp2p/go-libp2p/core/sec/insecure/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/host/autonat/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/protocol/autonatv2/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/security/noise/pb"
+ _ "github.com/libp2p/go-libp2p/p2p/transport/webrtc/pb"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+//go:generate scripts/gen-proto.sh .
+
+func TestProtoImportsAndPathsAreConsistent(t *testing.T) {
+ protoregistry.GlobalFiles.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+ imports := fd.Imports()
+ for i := 0; i < imports.Len(); i++ {
+ path := imports.Get(i).Path()
+ if _, err := protoregistry.GlobalFiles.FindFileByPath(path); err != nil {
+ t.Fatalf("find dependency %s: %v", path, err)
+ }
+ }
+ return true
+ })
+}
diff --git a/scripts/.gitignore b/scripts/.gitignore
new file mode 100644
index 0000000000..7371c9e452
--- /dev/null
+++ b/scripts/.gitignore
@@ -0,0 +1,2 @@
+protobuf-bin/
+protoc-gen-go
diff --git a/scripts/download-protoc.sh b/scripts/download-protoc.sh
new file mode 100755
index 0000000000..6870c22c9f
--- /dev/null
+++ b/scripts/download-protoc.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+set -eou pipefail
+
+# Specify the protobuf release version
+PROTOBUF_VERSION="29.2"
+
+# Define SHA-256 hashes for each supported platform
+# Update these hashes by running ./print-protoc-hashes.sh
+declare -A SHA256_HASHES=(
+ ["linux-aarch64"]="0019dfc4b32d63c1392aa264aed2253c1e0c2fb09216f8e2cc269bbfb8bb49b5"
+ ["linux-x86_64"]="52e9e7ece55c7e30e7e8bbd254b4b21b408a5309bca826763c7124b696a132e9"
+ ["darwin-aarch64"]="0e153a38d6da19594c980e7f7cd3ea0ddd52c9da1068c03c0d8533369fbfeb20"
+)
+
+# Determine the platform
+OS="$(uname -s | tr '[:upper:]' '[:lower:]')"
+ARCH="$(uname -m)"
+[[ "${ARCH}" == "arm64" ]] && ARCH="aarch64"
+
+PLATFORM="${OS}-${ARCH}"
+
+# Set the download URL based on the platform
+case "${PLATFORM}" in
+linux-x86_64)
+ PROTOC_ZIP="protoc-${PROTOBUF_VERSION}-linux-x86_64.zip"
+ ;;
+linux-aarch64)
+ PROTOC_ZIP="protoc-${PROTOBUF_VERSION}-linux-aarch64.zip"
+ ;;
+darwin-aarch64)
+ PROTOC_ZIP="protoc-${PROTOBUF_VERSION}-osx-aarch_64.zip"
+ ;;
+*)
+ echo "Unsupported platform: ${PLATFORM}" >&2
+ exit 1
+ ;;
+esac
+
+# Download the specified version of protobuf
+DOWNLOAD_URL="https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_ZIP}"
+echo "Downloading from: ${DOWNLOAD_URL}" >&2
+curl -LO "${DOWNLOAD_URL}"
+
+# Verify checksum
+EXPECTED_SHA256="${SHA256_HASHES[${PLATFORM}]}"
+if command -v shasum >/dev/null 2>&1; then
+ ACTUAL_SHA256=$(shasum -a 256 "${PROTOC_ZIP}" | cut -d' ' -f1)
+else
+ ACTUAL_SHA256=$(sha256sum "${PROTOC_ZIP}" | cut -d' ' -f1)
+fi
+
+if [[ "${ACTUAL_SHA256}" != "${EXPECTED_SHA256}" ]]; then
+ echo "Checksum verification failed!" >&2
+ echo "Expected: ${EXPECTED_SHA256}" >&2
+ echo "Got: ${ACTUAL_SHA256}" >&2
+ rm "${PROTOC_ZIP}"
+ exit 1
+fi
+
+echo "Checksum verified successfully" >&2
+
+# Create a directory for extraction
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+INSTALL_DIR="$SCRIPT_DIR/protobuf-bin/protoc-${PROTOBUF_VERSION}"
+mkdir -p "${INSTALL_DIR}"
+
+# Unzip the downloaded file
+unzip -q -o "${PROTOC_ZIP}" -d "${INSTALL_DIR}"
+
+# Clean up the zip file
+rm "${PROTOC_ZIP}"
+
+# Return a new PATH with the protobuf binary
+PROTOC_BIN="${INSTALL_DIR}/bin"
+echo "Installed protoc ${PROTOBUF_VERSION} to ${INSTALL_DIR}" >&2
+
+# Return the protoc bin path to stdout
+printf "${PROTOC_BIN}"
diff --git a/scripts/gen-proto.sh b/scripts/gen-proto.sh
new file mode 100755
index 0000000000..dac2cff2f9
--- /dev/null
+++ b/scripts/gen-proto.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+set -eou pipefail
+
+root=$1
+
+proto_array=(
+ core/crypto/pb/crypto.proto
+ core/record/pb/envelope.proto
+ core/peer/pb/peer_record.proto
+ core/sec/insecure/pb/plaintext.proto
+ p2p/host/autonat/pb/autonat.proto
+ p2p/security/noise/pb/payload.proto
+ p2p/transport/webrtc/pb/message.proto
+ p2p/protocol/identify/pb/identify.proto
+ p2p/protocol/circuitv2/pb/circuit.proto
+ p2p/protocol/circuitv2/pb/voucher.proto
+ p2p/protocol/autonatv2/pb/autonatv2.proto
+ p2p/protocol/holepunch/pb/holepunch.proto
+ p2p/host/peerstore/pstoreds/pb/pstore.proto
+)
+
+proto_paths=""
+for path in "${proto_array[@]}"; do
+ proto_paths+="$path "
+done
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROTOC_BIN_PATH="$("${SCRIPT_DIR}/download-protoc.sh")"
+export PATH="$PROTOC_BIN_PATH:$PATH"
+
+echo protoc --version $(protoc --version)
+(cd ${SCRIPT_DIR} && go build -o protoc-gen-go google.golang.org/protobuf/cmd/protoc-gen-go)
+
+echo protoc-gen-go --version $(${SCRIPT_DIR}/protoc-gen-go --version)
+protoc --plugin="${SCRIPT_DIR}/protoc-gen-go" --proto_path=$root --go_out=$root --go_opt=paths=source_relative $proto_paths
diff --git a/scripts/mkreleaselog b/scripts/mkreleaselog
new file mode 100755
index 0000000000..cc63eb399a
--- /dev/null
+++ b/scripts/mkreleaselog
@@ -0,0 +1,256 @@
+#!/bin/zsh
+#set -x
+set -euo pipefail
+export GO111MODULE=on
+export GOPATH="$(go env GOPATH)"
+
+alias jq="jq --unbuffered"
+
+AUTHORS=(
+ # orgs
+ ipfs
+ ipld
+ libp2p
+ multiformats
+ filecoin-project
+ ipfs-shipyard
+
+ # Authors of personal repos used by go-libp2p that should be mentioned in the
+ # release notes.
+ whyrusleeping
+ Kubuxu
+ jbenet
+ Stebalien
+ marten-seemann
+ hsanjuan
+ lucas-clemente
+ warpfork
+)
+
+[[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})"
+
+[[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci|\.gen\.go\)$'
+
+NL=$'\n'
+
+ROOT_DIR="$(git rev-parse --show-toplevel)"
+
+msg() {
+ echo "$*" >&2
+}
+
+statlog() {
+ local module="$1"
+ local rpath="$GOPATH/src/$(strip_version "$module")"
+ local start="${2:-}"
+ local end="${3:-HEAD}"
+ local mailmap_file="$rpath/.mailmap"
+ if ! [[ -e "$mailmap_file" ]]; then
+ mailmap_file="$ROOT_DIR/.mailmap"
+ fi
+
+ git -C "$rpath" -c mailmap.file="$mailmap_file" log --use-mailmap --shortstat --no-merges --pretty="tformat:%H%n%aN%n%aE" "$start..$end" | while
+ read hash
+ read name
+ read email
+ read _ # empty line
+ read changes
+ do
+ if [[ $name == "web3-bot" ]]; then
+ continue
+ fi
+ changed=0
+ insertions=0
+ deletions=0
+ while read count event; do
+ if [[ "$event" =~ ^file ]]; then
+ changed=$count
+ elif [[ "$event" =~ ^insertion ]]; then
+ insertions=$count
+ elif [[ "$event" =~ ^deletion ]]; then
+ deletions=$count
+ else
+ echo "unknown event $event" >&2
+ exit 1
+ fi
+ done<<<"${changes//,/$NL}"
+
+ jq -n \
+ --arg "hash" "$hash" \
+ --arg "name" "$name" \
+ --arg "email" "$email" \
+ --argjson "changed" "$changed" \
+ --argjson "insertions" "$insertions" \
+ --argjson "deletions" "$deletions" \
+ '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}'
+ done
+}
+
+# Returns a stream of deps changed between $1 and $2.
+dep_changes() {
+ {
+ <"$1"
+ <"$2"
+ } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)'
+}
+
+# resolve_commits resolves a git ref for each version.
+resolve_commits() {
+ jq '. + {Ref: (.Version|capture("^((?.*)\\+incompatible|v.*-(0\\.)?[0-9]{14}-(?[a-f0-9]{12})|(?v.*))$") | .ref1 // .ref2 // .ref3)}'
+}
+
+pr_link() {
+ local repo="$1"
+ local prnum="$2"
+ local ghname="${repo##github.com/}"
+ printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum"
+}
+
+# Generate a release log for a range of commits in a single repo.
+release_log() {
+ setopt local_options BASH_REMATCH
+
+ local module="$1"
+ local start="$2"
+ local end="${3:-HEAD}"
+ local repo="$(strip_version "$1")"
+ local dir="$GOPATH/src/$repo"
+
+ local commit pr
+ git -C "$dir" log \
+ --format='tformat:%H %s' \
+ --first-parent \
+ "$start..$end" |
+ while read commit subject; do
+ # Skip gx-only PRs.
+ if git rev-parse '$commit^' >/dev/null 2>&1 &&
+ ! git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" | grep -v "${IGNORED_FILES}" >/dev/null; then
+ continue
+ fi
+
+ if [[ "$subject" =~ "^sync: update CI config files" ]]; then
+ continue
+ fi
+
+ if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then
+ local prnum="${BASH_REMATCH[2]}"
+ local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)"
+ if [[ "$desc" =~ "^sync: update CI config files" ]]; then
+ continue
+ fi
+ printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")"
+ elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then
+ local prnum="${BASH_REMATCH[2]}"
+ printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")"
+ else
+ printf -- "- %s\n" "$subject"
+ fi
+ done
+}
+
+indent() {
+ sed -e 's/^/ /'
+}
+
+mod_deps() {
+ go list -mod=mod -json -m all | jq 'select(.Version != null)'
+}
+
+ensure() {
+ local repo="$(strip_version "$1")"
+ local commit="$2"
+ local rpath="$GOPATH/src/$repo"
+ if [[ ! -d "$rpath" ]]; then
+ msg "Cloning $repo..."
+ git clone "http://$repo" "$rpath" >&2
+ fi
+
+ if ! git -C "$rpath" rev-parse --verify "$commit" >/dev/null; then
+ msg "Fetching $repo..."
+ git -C "$rpath" fetch --all >&2
+ fi
+
+ git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1
+}
+
+statsummary() {
+ jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' |
+ jq '. + {Lines: (.Deletions + .Insertions)}'
+}
+
+strip_version() {
+ local repo="$1"
+ if [[ "$repo" =~ '.*/v[0-9]+$' ]]; then
+ repo="$(dirname "$repo")"
+ fi
+ echo "$repo"
+}
+
+recursive_release_log() {
+ local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}"
+ local end="${2:-$(git rev-parse HEAD)}"
+ local repo_root="$(git rev-parse --show-toplevel)"
+ local module="$(go list -m)"
+ local dir="$(go list -m -f '{{.Dir}}')"
+
+ if [[ "${GOPATH}/${module}" -ef "${dir}" ]]; then
+ echo "This script requires the target module and all dependencies to live in a GOPATH."
+ return 1
+ fi
+
+ (
+ local result=0
+ local workspace="$(mktemp -d)"
+ trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT
+ cd "$workspace"
+
+ echo "Computing old deps..." >&2
+ git -C "$repo_root" show "$start:go.mod" >go.mod
+ mod_deps | resolve_commits | jq -s > old_deps.json
+
+ echo "Computing new deps..." >&2
+ git -C "$repo_root" show "$end:go.mod" >go.mod
+ mod_deps | resolve_commits | jq -s > new_deps.json
+
+ rm -f go.mod go.sum
+
+ printf -- "Generating Changelog for %s %s..%s\n" "$module" "$start" "$end" >&2
+
+ printf -- "- %s:\n" "$module"
+ release_log "$module" "$start" "$end" | indent
+
+
+ statlog "$module" "$start" "$end" > statlog.json
+
+ dep_changes old_deps.json new_deps.json |
+ jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' |
+ # Compute changelogs
+ jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' |
+ while read module new new_ref old old_ref; do
+ if ! ensure "$module" "$new_ref"; then
+ result=1
+ local changelog="failed to fetch repo"
+ else
+ statlog "$module" "$old_ref" "$new_ref" >> statlog.json
+ local changelog="$(release_log "$module" "$old_ref" "$new_ref")"
+ fi
+ if [[ -n "$changelog" ]]; then
+ printf -- "- %s (%s -> %s):\n" "$module" "$old" "$new"
+ echo "$changelog" | indent
+ fi
+ done
+
+ echo
+ echo "Contributors"
+ echo
+
+ echo "| Contributor | Commits | Lines ยฑ | Files Changed |"
+ echo "|-------------|---------|---------|---------------|"
+ statsummary /dev/null 2>&1; then
+ sha256_hash=$(shasum -a 256 "${protoc_zip}" | cut -d' ' -f1)
+ else
+ sha256_hash=$(sha256sum "${protoc_zip}" | cut -d' ' -f1)
+ fi
+
+ # Store the hash in the array
+ HASHES["${platform}"]="${sha256_hash}"
+
+ # Clean up the zip file
+ rm "${protoc_zip}"
+}
+
+# Iterate over the platforms and calculate the hashes
+for platform in "${PLATFORMS[@]}"; do
+ calculate_hash "${platform}"
+done
+
+# Print all the hashes together at the end
+echo "Expected SHA-256 hashes for protobuf ${PROTOBUF_VERSION}:"
+for platform in "${!HASHES[@]}"; do
+ echo "[\"${platform}\"]=\"${HASHES[${platform}]}\""
+done
diff --git a/scripts/test_analysis/cmd/gotest2sql/main.go b/scripts/test_analysis/cmd/gotest2sql/main.go
new file mode 100644
index 0000000000..f8c22590d3
--- /dev/null
+++ b/scripts/test_analysis/cmd/gotest2sql/main.go
@@ -0,0 +1,100 @@
+// gotest2sql inserts the output of go test -json ./... into a sqlite database
+package main
+
+import (
+ "bufio"
+ "database/sql"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "time"
+
+ _ "modernc.org/sqlite"
+)
+
+type TestEvent struct {
+ Time time.Time // encodes as an RFC3339-format string
+ Action string
+ Package string
+ Test string
+ Elapsed float64 // seconds
+ Output string
+}
+
+func main() {
+ outputPath := flag.String("output", "", "output db file")
+ verbose := flag.Bool("v", false, "Print test output to stdout")
+ flag.Parse()
+
+ if *outputPath == "" {
+ log.Fatal("-output path is required")
+ }
+
+ db, err := sql.Open("sqlite", *outputPath)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a table to store test results.
+ _, err = db.Exec(`
+ CREATE TABLE IF NOT EXISTS test_results (
+ Time TEXT,
+ Action TEXT,
+ Package TEXT,
+ Test TEXT,
+ Elapsed REAL,
+ Output TEXT,
+ BatchInsertTime TEXT
+ )`)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ tx, err := db.Begin()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Prepare the insert statement once
+ insertTime := time.Now().Format(time.RFC3339Nano)
+ stmt, err := tx.Prepare(`
+ INSERT INTO test_results (Time, Action, Package, Test, Elapsed, Output, BatchInsertTime)
+ VALUES (?, ?, ?, ?, ?, ?, ?)`)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer stmt.Close() // Ensure the statement is closed after use
+
+ s := bufio.NewScanner(os.Stdin)
+ for s.Scan() {
+ line := s.Bytes()
+ var ev TestEvent
+ err = json.Unmarshal(line, &ev)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if *verbose && ev.Action == "output" {
+ fmt.Print(ev.Output)
+ }
+
+ _, err = stmt.Exec(
+ ev.Time.Format(time.RFC3339Nano),
+ ev.Action,
+ ev.Package,
+ ev.Test,
+ ev.Elapsed,
+ ev.Output,
+ insertTime,
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ // Commit the transaction
+ if err := tx.Commit(); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/scripts/test_analysis/go.mod b/scripts/test_analysis/go.mod
new file mode 100644
index 0000000000..269a3153a4
--- /dev/null
+++ b/scripts/test_analysis/go.mod
@@ -0,0 +1,18 @@
+module github.com/libp2p/go-libp2p/scripts/test_analysis
+
+go 1.24
+
+require modernc.org/sqlite v1.36.0
+
+require (
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/ncruces/go-strftime v0.1.9 // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 // indirect
+ golang.org/x/sys v0.30.0 // indirect
+ modernc.org/libc v1.61.13 // indirect
+ modernc.org/mathutil v1.7.1 // indirect
+ modernc.org/memory v1.8.2 // indirect
+)
diff --git a/scripts/test_analysis/go.sum b/scripts/test_analysis/go.sum
new file mode 100644
index 0000000000..e4f5e5a1f2
--- /dev/null
+++ b/scripts/test_analysis/go.sum
@@ -0,0 +1,47 @@
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
+github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
+github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 h1:pVgRXcIictcr+lBQIFeiwuwtDIs4eL21OuM9nyAADmo=
+golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
+golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
+golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
+modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
+modernc.org/ccgo/v4 v4.23.16 h1:Z2N+kk38b7SfySC1ZkpGLN2vthNJP1+ZzGZIlH7uBxo=
+modernc.org/ccgo/v4 v4.23.16/go.mod h1:nNma8goMTY7aQZQNTyN9AIoJfxav4nvTnvKThAeMDdo=
+modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
+modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
+modernc.org/gc/v2 v2.6.3 h1:aJVhcqAte49LF+mGveZ5KPlsp4tdGdAOT4sipJXADjw=
+modernc.org/gc/v2 v2.6.3/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
+modernc.org/libc v1.61.13 h1:3LRd6ZO1ezsFiX1y+bHd1ipyEHIJKvuprv0sLTBwLW8=
+modernc.org/libc v1.61.13/go.mod h1:8F/uJWL/3nNil0Lgt1Dpz+GgkApWh04N3el3hxJcA6E=
+modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
+modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
+modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI=
+modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
+modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
+modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
+modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
+modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
+modernc.org/sqlite v1.36.0 h1:EQXNRn4nIS+gfsKeUTymHIz1waxuv5BzU7558dHSfH8=
+modernc.org/sqlite v1.36.0/go.mod h1:7MPwH7Z6bREicF9ZVUR78P1IKuxfZ8mRIDHD0iD+8TU=
+modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
+modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
diff --git a/scripts/test_analysis/main.go b/scripts/test_analysis/main.go
new file mode 100644
index 0000000000..a96e559864
--- /dev/null
+++ b/scripts/test_analysis/main.go
@@ -0,0 +1,311 @@
+package main
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ _ "modernc.org/sqlite"
+)
+
+const dbPath = "./test_results.db"
+const retryCount = 4 // For a total of 5 runs
+
+var coverRegex = regexp.MustCompile(`-cover`)
+
+func main() {
+ var t tester
+ if len(os.Args) >= 2 {
+ if os.Args[1] == "summarize" {
+ md, err := t.summarize()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Print(md)
+ return
+ }
+ }
+
+ passThruFlags := os.Args[1:]
+ err := t.runTests(passThruFlags)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+type tester struct {
+ Dir string
+}
+
+func (t *tester) runTests(passThruFlags []string) error {
+ err := t.goTestAll(passThruFlags)
+ if err == nil {
+ // No failed tests, nothing to do
+ return nil
+ }
+ log.Printf("Not all tests passed: %v", err)
+
+ timedOutPackages, err := t.findTimedoutTests(context.Background())
+ if err != nil {
+ return err
+ }
+ if len(timedOutPackages) > 0 {
+ // Fail immediately if we find any timeouts. We'd have to run all tests
+ // in the package, and this could take a long time.
+ log.Printf("Found %d timed out packages. Failing", len(timedOutPackages))
+ return errors.New("one or more tests timed out")
+ }
+
+ failedTests, err := t.findFailedTests(context.Background())
+ if err != nil {
+ return err
+ }
+
+ log.Printf("Found %d failed tests. Retrying them %d times", len(failedTests), retryCount)
+ hasOneNonFlakyFailure := false
+ loggedFlaky := map[string]struct{}{}
+
+ for _, ft := range failedTests {
+ isFlaky := false
+ for i := 0; i < retryCount; i++ {
+ log.Printf("Retrying %s.%s", ft.Package, ft.Test)
+ if err := t.goTestPkgTest(ft.Package, ft.Test, filterOutFlags(passThruFlags, coverRegex)); err != nil {
+ log.Printf("Failed to run %s.%s: %v", ft.Package, ft.Test, err)
+ } else {
+ isFlaky = true
+ flakyName := ft.Package + "." + ft.Test
+ if _, ok := loggedFlaky[flakyName]; !ok {
+ loggedFlaky[flakyName] = struct{}{}
+ log.Printf("Test %s.%s is flaky.", ft.Package, ft.Test)
+ }
+ }
+ }
+ if !isFlaky {
+ hasOneNonFlakyFailure = true
+ }
+ }
+
+ // A test consistently failed, so we should exit with a non-zero exit code.
+ if hasOneNonFlakyFailure {
+ return errors.New("one or more tests consistently failed")
+ }
+ return nil
+}
+
+func (t *tester) goTestAll(extraFlags []string) error {
+ flags := []string{"./..."}
+ flags = append(flags, extraFlags...)
+ return t.goTest(flags)
+}
+
+func (t *tester) goTestPkgTest(pkg, testname string, extraFlags []string) error {
+ flags := []string{
+ pkg, "-run", "^" + testname + "$", "-count", "1",
+ }
+ flags = append(flags, extraFlags...)
+ return t.goTest(flags)
+}
+
+func (t *tester) goTest(extraFlags []string) error {
+ flags := []string{
+ "test", "-json",
+ }
+ flags = append(flags, extraFlags...)
+ cmd := exec.Command("go", flags...)
+ cmd.Dir = t.Dir
+ cmd.Stderr = os.Stderr
+
+ gotest2sql := exec.Command("gotest2sql", "-v", "-output", dbPath)
+ gotest2sql.Dir = t.Dir
+ gotest2sql.Stdin, _ = cmd.StdoutPipe()
+ gotest2sql.Stdout = os.Stdout
+ gotest2sql.Stderr = os.Stderr
+ err := gotest2sql.Start()
+ if err != nil {
+ return err
+ }
+
+ err = cmd.Run()
+ return errors.Join(err, gotest2sql.Wait())
+}
+
+type failedTest struct {
+ Package string
+ Test string
+}
+
+type timedOutPackage struct {
+ Package string
+ Outputs string
+}
+
+func (t *tester) findFailedTests(ctx context.Context) ([]failedTest, error) {
+ db, err := sql.Open("sqlite", t.Dir+dbPath)
+ if err != nil {
+ return nil, err
+ }
+ defer db.Close()
+
+ rows, err := db.QueryContext(ctx, "SELECT DISTINCT Package, Test FROM test_results where Action='fail' and Test != ''")
+ if err != nil {
+ return nil, err
+ }
+ var out []failedTest
+ for rows.Next() {
+ var pkg, test string
+ if err := rows.Scan(&pkg, &test); err != nil {
+ return nil, err
+ }
+ out = append(out, failedTest{pkg, test})
+ }
+ return out, nil
+}
+
+func (t *tester) findTimedoutTests(ctx context.Context) ([]timedOutPackage, error) {
+ db, err := sql.Open("sqlite", t.Dir+dbPath)
+ if err != nil {
+ return nil, err
+ }
+ defer db.Close()
+
+ rows, err := db.QueryContext(ctx, `WITH failed_packages AS (
+ SELECT
+ Package
+ FROM
+ test_results
+ WHERE
+ Action = 'fail'
+ AND Elapsed > 300
+)
+SELECT
+ test_results.Package, GROUP_CONCAT(Output, "") as Outputs
+FROM
+ test_results
+INNER JOIN
+ failed_packages
+ON
+ test_results.Package = failed_packages.Package
+GROUP BY
+ test_results.Package
+HAVING
+ Outputs LIKE '%timed out%'
+ORDER BY Time;`)
+ if err != nil {
+ return nil, err
+ }
+ var out []timedOutPackage
+ for rows.Next() {
+ var pkg, outputs string
+ if err := rows.Scan(&pkg, &outputs); err != nil {
+ return nil, err
+ }
+ out = append(out, timedOutPackage{pkg, outputs})
+ }
+ return out, nil
+}
+
+func filterOutFlags(flags []string, exclude *regexp.Regexp) []string {
+ out := make([]string, 0, len(flags))
+ for _, f := range flags {
+ if !exclude.MatchString(f) {
+ out = append(out, f)
+ }
+ }
+ return out
+}
+
+// summarize returns a markdown string of the test results.
+func (t *tester) summarize() (string, error) {
+ ctx := context.Background()
+ var out strings.Builder
+
+ testFailures, err := t.findFailedTests(ctx)
+ if err != nil {
+ return "", err
+ }
+ timeouts, err := t.findTimedoutTests(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ testFailureCount := len(testFailures) + len(timeouts)
+
+ plural := "s"
+ if testFailureCount == 1 {
+ plural = ""
+ }
+ out.WriteString(fmt.Sprintf("## %d Test Failure%s\n\n", testFailureCount, plural))
+
+ if len(timeouts) > 0 {
+ out.WriteString("### Timed Out Tests\n\n")
+ for _, timeout := range timeouts {
+ _, err = out.WriteString(fmt.Sprintf(`
+%s
+
+%s
+
+ `, timeout.Package, timeout.Outputs))
+ if err != nil {
+ return "", err
+ }
+ }
+ out.WriteString("\n")
+ }
+
+ if len(testFailures) > 0 {
+ out.WriteString("### Failed Tests\n\n")
+
+ db, err := sql.Open("sqlite", t.Dir+dbPath)
+ if err != nil {
+ return "", err
+ }
+ defer db.Close()
+
+ rows, err := db.QueryContext(ctx, `SELECT
+ tr_output.Package,
+ tr_output.Test,
+ GROUP_CONCAT(tr_output.Output, "") AS Outputs
+FROM
+ test_results tr_fail
+JOIN
+ test_results tr_output
+ON
+ tr_fail.Test = tr_output.Test
+ AND tr_fail.BatchInsertTime = tr_output.BatchInsertTime
+ AND tr_fail.Package = tr_output.Package
+WHERE
+ tr_fail.Action = 'fail'
+ AND tr_output.Test != ''
+GROUP BY
+ tr_output.BatchInsertTime,
+ tr_output.Package,
+ tr_output.Test
+ORDER BY
+ MIN(tr_output.Time);`)
+ if err != nil {
+ return "", err
+ }
+ for rows.Next() {
+ var pkg, test, outputs string
+ if err := rows.Scan(&pkg, &test, &outputs); err != nil {
+ return "", err
+ }
+ _, err = out.WriteString(fmt.Sprintf(`
+%s.%s
+
+%s
+
+ `, pkg, test, outputs))
+ if err != nil {
+ return "", err
+ }
+ }
+ }
+ return out.String(), nil
+}
diff --git a/scripts/test_analysis/main_test.go b/scripts/test_analysis/main_test.go
new file mode 100644
index 0000000000..b8a5dae1be
--- /dev/null
+++ b/scripts/test_analysis/main_test.go
@@ -0,0 +1,56 @@
+package main
+
+import (
+ "os"
+ "testing"
+)
+
+func TestFailsOnConsistentFailure(t *testing.T) {
+ tmpDir := t.TempDir() + "/"
+ os.WriteFile(tmpDir+"/main.go", []byte(`package main
+func main() {}`), 0644)
+ // Add a test that fails consistently.
+ os.WriteFile(tmpDir+"/main_test.go", []byte(`package main
+
+import (
+ "testing"
+)
+func TestConsistentFailure(t *testing.T) {
+ t.Fatal("consistent failure")
+}`), 0644)
+ os.WriteFile(tmpDir+"/go.mod", []byte(`module example.com/test`), 0644)
+
+ tstr := tester{Dir: tmpDir}
+ err := tstr.runTests(nil)
+ if err == nil {
+ t.Fatal("Should have failed with a consistent failure")
+ }
+}
+
+func TestPassesOnFlakyFailure(t *testing.T) {
+ tmpDir := t.TempDir() + "/"
+ os.WriteFile(tmpDir+"/main.go", []byte(`package main
+func main() {
+}`), 0644)
+ // Add a test that fails the first time.
+ os.WriteFile(tmpDir+"/main_test.go", []byte(`package main
+import (
+ "os"
+ "testing"
+)
+func TestFlakyFailure(t *testing.T) {
+ _, err := os.Stat("foo")
+ if err != nil {
+ os.WriteFile("foo", []byte("hello"), 0644)
+ t.Fatal("flaky failure")
+ }
+}`), 0644)
+ os.WriteFile(tmpDir+"/go.mod", []byte(`module example.com/test`), 0644)
+
+ // Run the test.
+ tstr := tester{Dir: tmpDir}
+ err := tstr.runTests(nil)
+ if err != nil {
+ t.Fatal("Should have passed with a flaky test")
+ }
+}
diff --git a/test-plans/.gitignore b/test-plans/.gitignore
new file mode 100644
index 0000000000..281feaa30c
--- /dev/null
+++ b/test-plans/.gitignore
@@ -0,0 +1,2 @@
+ping-image.tar
+ping-image.json
diff --git a/test-plans/PingDockerfile b/test-plans/PingDockerfile
new file mode 100644
index 0000000000..f5d872f98b
--- /dev/null
+++ b/test-plans/PingDockerfile
@@ -0,0 +1,17 @@
+# syntax=docker/dockerfile:1
+# This is run from the parent directory to copy the whole go-libp2p codebase
+
+FROM golang:1.24-alpine AS builder
+
+WORKDIR /app/
+
+COPY ./ .
+WORKDIR /app/test-plans
+RUN go mod download
+RUN go build -o /testplan ./cmd/ping
+
+FROM alpine
+WORKDIR /app
+
+COPY --from=builder /testplan /testplan
+ENTRYPOINT [ "/testplan"]
diff --git a/test-plans/README.md b/test-plans/README.md
new file mode 100644
index 0000000000..0606f85279
--- /dev/null
+++ b/test-plans/README.md
@@ -0,0 +1,37 @@
+# test-plans test implementation
+
+This folder defines the implementation for the test-plans interop tests.
+
+# Running this test locally
+
+You can run this test locally by having a local Redis instance and by having
+another peer that this test can dial or listen for. For example to test that we
+can dial/listen for ourselves we can do the following:
+
+1. Start redis (needed by the tests): `docker run --rm -it -p 6379:6379
+ redis/redis-stack`.
+2. In one terminal run the dialer: `redis_addr=localhost:6379 ip="0.0.0.0"
+ transport=quic-v1 security=quic muxer=quic is_dialer="true" go run
+ ./cmd/ping`
+3. In another terminal, run the listener: `redis_addr=localhost:6379
+ ip="0.0.0.0" transport=quic-v1 security=quic muxer=quic is_dialer="false" go
+ run ./cmd/ping`
+
+
+To test the interop with other versions do something similar, except replace one
+of these nodes with the other version's interop test.
+
+# Running all interop tests locally with Compose
+
+To run this test against all released libp2p versions you'll need to have the
+[libp2p/test-plans](https://github.com/libp2p/test-plans) checked out. Then do
+the following (from the root directory of this repository):
+
+1. Build the image: `docker build -t go-libp2p-head -f test-plans/PingDockerfile .`.
+2. Build the images for all released versions in `libp2p/test-plans`: `(cd /libp2p/test-plans/transport-interop/ && make)`.
+3. Run the test:
+```
+GO_LIBP2P="$PWD"; (cd /libp2p/test-plans/transport-interop/ && npm run test -- --extra-version=$GO_LIBP2P/test-plans/ping-version.json --name-filter="go-libp2p-head")
+
+```
diff --git a/test-plans/cmd/ping/main.go b/test-plans/cmd/ping/main.go
new file mode 100644
index 0000000000..c836a72e72
--- /dev/null
+++ b/test-plans/cmd/ping/main.go
@@ -0,0 +1,241 @@
+package main
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/json"
+ "fmt"
+ "log"
+ "math/big"
+ "os"
+ "strconv"
+ "time"
+
+ libp2pwebrtc "github.com/libp2p/go-libp2p/p2p/transport/webrtc"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/muxer/yamux"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ "github.com/libp2p/go-libp2p/p2p/security/noise"
+ libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/tcp"
+ "github.com/libp2p/go-libp2p/p2p/transport/websocket"
+ libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport"
+ ma "github.com/multiformats/go-multiaddr"
+ manet "github.com/multiformats/go-multiaddr/net"
+)
+
+func main() {
+ var (
+ transport = os.Getenv("transport")
+ muxer = os.Getenv("muxer")
+ secureChannel = os.Getenv("security")
+ isDialerStr = os.Getenv("is_dialer")
+ ip = os.Getenv("ip")
+ redisAddr = os.Getenv("redis_addr")
+ testTimeoutStr = os.Getenv("test_timeout_seconds")
+ )
+
+ testTimeout := 3 * time.Minute
+ if testTimeoutStr != "" {
+ secs, err := strconv.ParseInt(testTimeoutStr, 10, 32)
+ if err == nil {
+ testTimeout = time.Duration(secs) * time.Second
+ }
+ }
+
+ if ip == "" {
+ ip = "0.0.0.0"
+ }
+
+ if redisAddr == "" {
+ redisAddr = "redis:6379"
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
+ defer cancel()
+
+ // Get peer information via redis
+ rClient := redis.NewClient(&redis.Options{
+ DialTimeout: testTimeout,
+ Addr: redisAddr,
+ Password: "",
+ DB: 0,
+ })
+ defer rClient.Close()
+
+ for {
+ if ctx.Err() != nil {
+ log.Fatal("timeout waiting for redis")
+ }
+
+ // Wait for redis to be ready
+ _, err := rClient.Ping(ctx).Result()
+ if err == nil {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ isDialer := isDialerStr == "true"
+
+ options := []libp2p.Option{}
+
+ var listenAddr string
+ switch transport {
+ case "ws":
+ options = append(options, libp2p.Transport(websocket.New))
+ listenAddr = fmt.Sprintf("/ip4/%s/tcp/0/ws", ip)
+ case "wss":
+ options = append(options, libp2p.Transport(websocket.New, websocket.WithTLSConfig(generateTLSConfig()), websocket.WithTLSClientConfig(&tls.Config{InsecureSkipVerify: true})))
+ listenAddr = fmt.Sprintf("/ip4/%s/tcp/0/wss", ip)
+ case "tcp":
+ options = append(options, libp2p.Transport(tcp.NewTCPTransport))
+ listenAddr = fmt.Sprintf("/ip4/%s/tcp/0", ip)
+ case "quic-v1":
+ options = append(options, libp2p.Transport(libp2pquic.NewTransport))
+ listenAddr = fmt.Sprintf("/ip4/%s/udp/0/quic-v1", ip)
+ case "webtransport":
+ options = append(options, libp2p.Transport(libp2pwebtransport.New))
+ listenAddr = fmt.Sprintf("/ip4/%s/udp/0/quic-v1/webtransport", ip)
+ case "webrtc-direct":
+ options = append(options, libp2p.Transport(libp2pwebrtc.New))
+ listenAddr = fmt.Sprintf("/ip4/%s/udp/0/webrtc-direct", ip)
+ default:
+ log.Fatalf("Unsupported transport: %s", transport)
+ }
+ options = append(options, libp2p.ListenAddrStrings(listenAddr))
+
+ // Skipped for certain transports
+ var skipMuxer bool
+ var skipSecureChannel bool
+ switch transport {
+ case "quic-v1":
+ fallthrough
+ case "webtransport":
+ fallthrough
+ case "webrtc-direct":
+ skipMuxer = true
+ skipSecureChannel = true
+ }
+
+ if !skipSecureChannel {
+ switch secureChannel {
+ case "tls":
+ options = append(options, libp2p.Security(libp2ptls.ID, libp2ptls.New))
+ case "noise":
+ options = append(options, libp2p.Security(noise.ID, noise.New))
+ default:
+ log.Fatalf("Unsupported secure channel: %s", secureChannel)
+ }
+ }
+
+ if !skipMuxer {
+ switch muxer {
+ case "yamux":
+ options = append(options, libp2p.Muxer("/yamux/1.0.0", yamux.DefaultTransport))
+ default:
+ log.Fatalf("Unsupported muxer: %s", muxer)
+ }
+ }
+
+ host, err := libp2p.New(options...)
+
+ if err != nil {
+ log.Fatalf("failed to instantiate libp2p instance: %s", err)
+ }
+ defer host.Close()
+
+ log.Println("My multiaddr is: ", host.Addrs())
+
+ if isDialer {
+ val, err := rClient.BLPop(ctx, testTimeout, "listenerAddr").Result()
+ if err != nil {
+ log.Fatal("Failed to wait for listener to be ready")
+ }
+ otherMa := ma.StringCast(val[1])
+ log.Println("Other peer multiaddr is: ", otherMa)
+ otherMa, p2pComponent := ma.SplitLast(otherMa)
+ otherPeerId, err := peer.Decode(p2pComponent.Value())
+ if err != nil {
+ log.Fatal("Failed to get peer id from multiaddr")
+ }
+
+ handshakeStartInstant := time.Now()
+ err = host.Connect(ctx, peer.AddrInfo{
+ ID: otherPeerId,
+ Addrs: []ma.Multiaddr{otherMa},
+ })
+ if err != nil {
+ log.Fatal("Failed to connect to other peer")
+ }
+
+ ping := ping.NewPingService(host)
+
+ res := <-ping.Ping(ctx, otherPeerId)
+ if res.Error != nil {
+ log.Fatal(res.Error)
+ }
+ handshakePlusOneRTT := time.Since(handshakeStartInstant)
+
+ testResult := struct {
+ HandshakePlusOneRTTMillis float32 `json:"handshakePlusOneRTTMillis"`
+ PingRTTMilllis float32 `json:"pingRTTMilllis"`
+ }{
+ HandshakePlusOneRTTMillis: float32(handshakePlusOneRTT.Microseconds()) / 1000,
+ PingRTTMilllis: float32(res.RTT.Microseconds()) / 1000,
+ }
+
+ testResultJSON, err := json.Marshal(testResult)
+ if err != nil {
+ log.Fatalf("Failed to marshal test result: %v", err)
+ }
+ fmt.Println(string(testResultJSON))
+ } else {
+ var listenAddr ma.Multiaddr
+ for _, addr := range host.Addrs() {
+ if !manet.IsIPLoopback(addr) {
+ listenAddr = addr
+ break
+ }
+ }
+ _, err := rClient.RPush(ctx, "listenerAddr", listenAddr.Encapsulate(ma.StringCast("/p2p/"+host.ID().String())).String()).Result()
+ if err != nil {
+ log.Fatal("Failed to send listener address")
+ }
+ time.Sleep(testTimeout)
+ os.Exit(1)
+ }
+}
+
+func generateTLSConfig() *tls.Config {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ log.Fatal(err)
+ }
+ tmpl := &x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{},
+ SignatureAlgorithm: x509.SHA256WithRSA,
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour), // valid for an hour
+ BasicConstraintsValid: true,
+ }
+ certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, priv.Public(), priv)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return &tls.Config{
+ Certificates: []tls.Certificate{{
+ PrivateKey: priv,
+ Certificate: [][]byte{certDER},
+ }},
+ }
+}
diff --git a/test-plans/go.mod b/test-plans/go.mod
new file mode 100644
index 0000000000..682850ba2f
--- /dev/null
+++ b/test-plans/go.mod
@@ -0,0 +1,101 @@
+module github.com/libp2p/go-libp2p/test-plans/m/v2
+
+go 1.24.6
+
+require (
+ github.com/go-redis/redis/v8 v8.11.5
+ github.com/libp2p/go-libp2p v0.0.0
+ github.com/multiformats/go-multiaddr v0.16.0
+)
+
+require (
+ github.com/benbjohnson/clock v1.3.5 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+ github.com/flynn/noise v1.1.0 // indirect
+ github.com/francoispqt/gojay v1.2.13 // indirect
+ github.com/google/gopacket v1.1.19 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/gorilla/websocket v1.5.3 // indirect
+ github.com/huin/goupnp v1.3.0 // indirect
+ github.com/ipfs/go-cid v0.5.0 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2 // indirect
+ github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/koron/go-ssdp v0.0.6 // indirect
+ github.com/libp2p/go-buffer-pool v0.1.0 // indirect
+ github.com/libp2p/go-flow-metrics v0.2.0 // indirect
+ github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
+ github.com/libp2p/go-msgio v0.3.0 // indirect
+ github.com/libp2p/go-netroute v0.2.2 // indirect
+ github.com/libp2p/go-reuseport v0.4.0 // indirect
+ github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
+ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
+ github.com/miekg/dns v1.1.66 // indirect
+ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
+ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
+ github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/mr-tron/base58 v1.2.0 // indirect
+ github.com/multiformats/go-base32 v0.1.0 // indirect
+ github.com/multiformats/go-base36 v0.2.0 // indirect
+ github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
+ github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
+ github.com/multiformats/go-multibase v0.2.0 // indirect
+ github.com/multiformats/go-multicodec v0.9.1 // indirect
+ github.com/multiformats/go-multihash v0.2.3 // indirect
+ github.com/multiformats/go-multistream v0.6.1 // indirect
+ github.com/multiformats/go-varint v0.0.7 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+ github.com/onsi/gomega v1.36.3 // indirect
+ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
+ github.com/pion/datachannel v1.5.10 // indirect
+ github.com/pion/dtls/v2 v2.2.12 // indirect
+ github.com/pion/dtls/v3 v3.0.6 // indirect
+ github.com/pion/ice/v4 v4.0.10 // indirect
+ github.com/pion/interceptor v0.1.40 // indirect
+ github.com/pion/logging v0.2.3 // indirect
+ github.com/pion/mdns/v2 v2.0.7 // indirect
+ github.com/pion/randutil v0.1.0 // indirect
+ github.com/pion/rtcp v1.2.15 // indirect
+ github.com/pion/rtp v1.8.19 // indirect
+ github.com/pion/sctp v1.8.39 // indirect
+ github.com/pion/sdp/v3 v3.0.13 // indirect
+ github.com/pion/srtp/v3 v3.0.6 // indirect
+ github.com/pion/stun v0.6.1 // indirect
+ github.com/pion/stun/v3 v3.0.0 // indirect
+ github.com/pion/transport/v2 v2.2.10 // indirect
+ github.com/pion/transport/v3 v3.0.7 // indirect
+ github.com/pion/turn/v4 v4.0.2 // indirect
+ github.com/pion/webrtc/v4 v4.1.2 // indirect
+ github.com/prometheus/client_golang v1.22.0 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.64.0 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/quic-go/qpack v0.5.1 // indirect
+ github.com/quic-go/quic-go v0.54.0 // indirect
+ github.com/quic-go/webtransport-go v0.9.0 // indirect
+ github.com/spaolacci/murmur3 v1.1.0 // indirect
+ github.com/wlynxg/anet v0.0.5 // indirect
+ go.uber.org/dig v1.19.0 // indirect
+ go.uber.org/fx v1.24.0 // indirect
+ go.uber.org/mock v0.5.2 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ golang.org/x/crypto v0.39.0 // indirect
+ golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
+ golang.org/x/mod v0.25.0 // indirect
+ golang.org/x/net v0.41.0 // indirect
+ golang.org/x/sync v0.15.0 // indirect
+ golang.org/x/sys v0.33.0 // indirect
+ golang.org/x/text v0.26.0 // indirect
+ golang.org/x/time v0.12.0 // indirect
+ golang.org/x/tools v0.34.0 // indirect
+ google.golang.org/protobuf v1.36.6 // indirect
+ lukechampine.com/blake3 v1.4.1 // indirect
+)
+
+replace github.com/libp2p/go-libp2p => ../
diff --git a/test-plans/go.sum b/test-plans/go.sum
new file mode 100644
index 0000000000..c4930e5eba
--- /dev/null
+++ b/test-plans/go.sum
@@ -0,0 +1,450 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
+github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
+github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
+github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
+github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
+github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
+github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
+github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
+github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
+github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
+github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
+github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
+github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
+github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
+github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
+github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
+github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
+github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
+github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
+github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
+github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
+github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
+github.com/libp2p/go-netroute v0.2.2 h1:Dejd8cQ47Qx2kRABg6lPwknU7+nBnFRpko45/fFPuZ8=
+github.com/libp2p/go-netroute v0.2.2/go.mod h1:Rntq6jUAH0l9Gg17w5bFGhcC9a+vk4KNXs6s7IljKYE=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
+github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
+github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
+github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/marcopolo/simnet v0.0.1 h1:rSMslhPz6q9IvJeFWDoMGxMIrlsbXau3NkuIXHGJxfg=
+github.com/marcopolo/simnet v0.0.1/go.mod h1:WDaQkgLAjqDUEBAOXz22+1j6wXKfGlC5sD5XWt3ddOs=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
+github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
+github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
+github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
+github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
+github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc=
+github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
+github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
+github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
+github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
+github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
+github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
+github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
+github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
+github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
+github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
+github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
+github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
+github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
+github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
+github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
+github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
+github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
+github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
+github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
+github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
+github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
+github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
+github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
+github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
+github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
+github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
+github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
+github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
+github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
+github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
+github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
+github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
+github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
+github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
+github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
+github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
+github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
+github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
+github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
+github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
+github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
+github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
+github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
+github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
+github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
+github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
+github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
+github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
+github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
+github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
+github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
+github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
+github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
+github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
+github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
+github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
+github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
+github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g=
+github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0=
+github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q=
+github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E=
+github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
+github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
+github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
+github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
+github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
+github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
+github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
+github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
+github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
+github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
+github.com/quic-go/webtransport-go v0.9.0 h1:jgys+7/wm6JarGDrW+lD/r9BGqBAmqY/ssklE09bA70=
+github.com/quic-go/webtransport-go v0.9.0/go.mod h1:4FUYIiUc75XSsF6HShcLeXXYZJ9AGwo/xh3L8M/P1ao=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
+github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
+github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
+github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
+github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
+go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
+go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
+go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
+go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
+golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
+lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
+sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/test-plans/ping-version.json b/test-plans/ping-version.json
new file mode 100644
index 0000000000..705934f2b7
--- /dev/null
+++ b/test-plans/ping-version.json
@@ -0,0 +1,19 @@
+{
+ "id": "go-libp2p-head",
+ "containerImageID": "go-libp2p-head",
+ "transports": [
+ "tcp",
+ "ws",
+ "wss",
+ "quic-v1",
+ "webtransport",
+ "webrtc-direct"
+ ],
+ "secureChannels": [
+ "tls",
+ "noise"
+ ],
+ "muxers": [
+ "yamux"
+ ]
+}
diff --git a/tools.go b/tools.go
new file mode 100644
index 0000000000..7a650d9c19
--- /dev/null
+++ b/tools.go
@@ -0,0 +1,9 @@
+//go:build tools
+
+package libp2p
+
+import (
+ _ "go.uber.org/mock/mockgen"
+ _ "golang.org/x/tools/cmd/goimports"
+ _ "google.golang.org/protobuf/cmd/protoc-gen-go"
+)
diff --git a/version.json b/version.json
new file mode 100644
index 0000000000..85e3091d23
--- /dev/null
+++ b/version.json
@@ -0,0 +1,3 @@
+{
+ "version": "v0.43.0"
+}
diff --git a/x/rate/limiter.go b/x/rate/limiter.go
new file mode 100644
index 0000000000..42522e11e9
--- /dev/null
+++ b/x/rate/limiter.go
@@ -0,0 +1,322 @@
+// Package rate provides rate limiting functionality at a global, network, and subnet level.
+package rate
+
+import (
+ "container/heap"
+ "net/netip"
+ "slices"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/network"
+ manet "github.com/multiformats/go-multiaddr/net"
+ "golang.org/x/time/rate"
+)
+
+// Limit is the configuration for a token bucket rate limiter.
+// The bucket has a capacity of Burst, and is refilled at a rate of RPS tokens per second.
+// Initially, buckets are completley full, i.e. tokens in the bucket is equal to `Burst`.
+// In any given time interval T seconds, maximum events allowed will be `T*RPS + Burst`.
+type Limit struct {
+ // RPS is the rate of requests per second in steady state.
+ RPS float64
+ // Burst is the number of requests allowed over the RPS.
+ Burst int
+}
+
+// PrefixLimit is a rate limit configuration that applies to a specific network prefix.
+type PrefixLimit struct {
+ Prefix netip.Prefix
+ Limit
+}
+
+// SubnetLimit is a rate limit configuration that applies to a specific subnet.
+type SubnetLimit struct {
+ PrefixLength int
+ Limit
+}
+
+// Limiter rate limits new streams for a service. It allows setting NetworkPrefix specific,
+// global, and subnet specific limits. Use 0 for no rate limiting.
+// The limiter maintains state that must be periodically cleaned up using Cleanup
+type Limiter struct {
+ // NetworkPrefixLimits are limits for streams with peer IPs belonging to specific subnets.
+ // It can be used to increase the limit for trusted networks and decrease the limit for specific networks.
+ NetworkPrefixLimits []PrefixLimit
+ // GlobalLimit is the limit for all streams where the peer IP doesn't fall within any
+ // of the `NetworkPrefixLimits`
+ GlobalLimit Limit
+ // SubnetRateLimiter is a rate limiter for subnets.
+ SubnetRateLimiter SubnetLimiter
+
+ initOnce sync.Once
+ globalBucket *rate.Limiter
+ networkPrefixBuckets []*rate.Limiter // ith element ratelimits ith NetworkPrefixLimits
+}
+
+func (r *Limiter) init() {
+ r.initOnce.Do(func() {
+ if r.GlobalLimit.RPS == 0 {
+ r.globalBucket = rate.NewLimiter(rate.Inf, 0)
+ } else {
+ r.globalBucket = rate.NewLimiter(rate.Limit(r.GlobalLimit.RPS), r.GlobalLimit.Burst)
+ }
+ // clone the slice in case it's shared with other limiters
+ r.NetworkPrefixLimits = slices.Clone(r.NetworkPrefixLimits)
+ // sort such that the widest prefix (smallest bit count) is last.
+ slices.SortFunc(r.NetworkPrefixLimits, func(a, b PrefixLimit) int { return b.Prefix.Bits() - a.Prefix.Bits() })
+ r.networkPrefixBuckets = make([]*rate.Limiter, 0, len(r.NetworkPrefixLimits))
+ for _, limit := range r.NetworkPrefixLimits {
+ if limit.RPS == 0 {
+ r.networkPrefixBuckets = append(r.networkPrefixBuckets, rate.NewLimiter(rate.Inf, 0))
+ } else {
+ r.networkPrefixBuckets = append(r.networkPrefixBuckets, rate.NewLimiter(rate.Limit(limit.RPS), limit.Burst))
+ }
+ }
+ })
+}
+
+// Limit rate limits a StreamHandler function.
+func (r *Limiter) Limit(f func(s network.Stream)) func(s network.Stream) {
+ r.init()
+ return func(s network.Stream) {
+ addr := s.Conn().RemoteMultiaddr()
+ ip, err := manet.ToIP(addr)
+ if err != nil {
+ ip = nil
+ }
+ ipAddr, ok := netip.AddrFromSlice(ip)
+ if !ok {
+ ipAddr = netip.Addr{}
+ }
+ if !r.Allow(ipAddr) {
+ _ = s.ResetWithError(network.StreamRateLimited)
+ return
+ }
+ f(s)
+ }
+}
+
+// Allow returns true if requests for `ipAddr` are within specified rate limits
+func (r *Limiter) Allow(ipAddr netip.Addr) bool {
+ r.init()
+ // Check buckets from the most specific to the least.
+ //
+ // This ensures that a single peer cannot take up all the tokens in the global
+ // rate limiting bucket. We *MUST* follow this order because the rate limiter
+ // implementation doesn't have a `ReturnToken` method. If we checked the global
+ // bucket before the specific bucket, and the specific bucket rejected the
+ // request, there's no way to return the token to the global bucket. So all
+ // rejected requests from the specific bucket would take up tokens from the global bucket.
+
+ // prefixs have been sorted from most to least specific so rejected requests for more
+ // specific prefixes don't take up tokens from the less specific prefixes.
+ isWithinNetworkPrefix := false
+ for i, limit := range r.NetworkPrefixLimits {
+ if limit.Prefix.Contains(ipAddr) {
+ if !r.networkPrefixBuckets[i].Allow() {
+ return false
+ }
+ isWithinNetworkPrefix = true
+ }
+ }
+ if isWithinNetworkPrefix {
+ return true
+ }
+
+ if !r.SubnetRateLimiter.Allow(ipAddr, time.Now()) {
+ return false
+ }
+ return r.globalBucket.Allow()
+}
+
+// SubnetLimiter rate limits requests per ip subnet.
+type SubnetLimiter struct {
+ // IPv4SubnetLimits are the per subnet limits for streams with IPv4 Peers.
+ IPv4SubnetLimits []SubnetLimit
+ // IPv6SubnetLimits are the per subnet limits for streams with IPv6 Peers.
+ IPv6SubnetLimits []SubnetLimit
+ // GracePeriod is the time to wait to remove a full capacity bucket.
+ // Keeping a bucket around helps prevent allocations
+ GracePeriod time.Duration
+
+ initOnce sync.Once
+ mx sync.Mutex
+ ipv4Heaps []*bucketHeap
+ ipv6Heaps []*bucketHeap
+}
+
+func (s *SubnetLimiter) init() {
+ s.initOnce.Do(func() {
+ // smaller prefix length, i.e. largest subnet, last
+ slices.SortFunc(s.IPv4SubnetLimits, func(a, b SubnetLimit) int { return b.PrefixLength - a.PrefixLength })
+ slices.SortFunc(s.IPv6SubnetLimits, func(a, b SubnetLimit) int { return b.PrefixLength - a.PrefixLength })
+
+ s.ipv4Heaps = make([]*bucketHeap, len(s.IPv4SubnetLimits))
+ for i := range s.IPv4SubnetLimits {
+ s.ipv4Heaps[i] = &bucketHeap{
+ prefixBucket: make([]prefixBucketWithExpiry, 0),
+ prefixToIndex: make(map[netip.Prefix]int),
+ }
+ heap.Init(s.ipv4Heaps[i])
+ }
+
+ s.ipv6Heaps = make([]*bucketHeap, len(s.IPv6SubnetLimits))
+ for i := range s.IPv6SubnetLimits {
+ s.ipv6Heaps[i] = &bucketHeap{
+ prefixBucket: make([]prefixBucketWithExpiry, 0),
+ prefixToIndex: make(map[netip.Prefix]int),
+ }
+ heap.Init(s.ipv6Heaps[i])
+ }
+ })
+}
+
+// Allow returns true if requests for `ipAddr` are within specified rate limits
+func (s *SubnetLimiter) Allow(ipAddr netip.Addr, now time.Time) bool {
+ s.init()
+ s.mx.Lock()
+ defer s.mx.Unlock()
+
+ s.cleanUp(now)
+
+ var subNetLimits []SubnetLimit
+ var heaps []*bucketHeap
+ if ipAddr.Is4() {
+ subNetLimits = s.IPv4SubnetLimits
+ heaps = s.ipv4Heaps
+ } else {
+ subNetLimits = s.IPv6SubnetLimits
+ heaps = s.ipv6Heaps
+ }
+
+ for i, limit := range subNetLimits {
+ prefix, err := ipAddr.Prefix(limit.PrefixLength)
+ if err != nil {
+ return false // we have a ipaddr this shouldn't happen
+ }
+
+ bucket := heaps[i].Get(prefix)
+ if bucket == (prefixBucketWithExpiry{}) {
+ bucket = prefixBucketWithExpiry{
+ Prefix: prefix,
+ tokenBucket: tokenBucket{rate.NewLimiter(rate.Limit(limit.RPS), limit.Burst)},
+ Expiry: now,
+ }
+ }
+
+ if !bucket.Allow() {
+ // bucket is empty, its expiry would have been set correctly the last time
+ // it allowed a request.
+ return false
+ }
+ bucket.Expiry = bucket.FullAt(now).Add(s.GracePeriod)
+ heaps[i].Upsert(bucket)
+ }
+ return true
+}
+
+// cleanUp removes limiters that have expired by now.
+func (s *SubnetLimiter) cleanUp(now time.Time) {
+ for _, h := range s.ipv4Heaps {
+ h.Expire(now)
+ }
+ for _, h := range s.ipv6Heaps {
+ h.Expire(now)
+ }
+}
+
+// tokenBucket is a *rate.Limiter with a `FullAt` method.
+type tokenBucket struct {
+ *rate.Limiter
+}
+
+// FullAt returns the instant at which the bucket will be full.
+func (b *tokenBucket) FullAt(now time.Time) time.Time {
+ tokensNeeded := float64(b.Burst()) - b.TokensAt(now)
+ refillRate := float64(b.Limit())
+ eta := time.Duration((tokensNeeded / refillRate) * float64(time.Second))
+ return now.Add(eta)
+}
+
+// prefixBucketWithExpiry is a token bucket with a prefix and Expiry. The expiry is when the bucket
+// will be full with tokens.
+type prefixBucketWithExpiry struct {
+ tokenBucket
+ Prefix netip.Prefix
+ Expiry time.Time
+}
+
+// bucketHeap is a heap of buckets ordered by their Expiry. At expiry, the bucket
+// is removed from the heap as a full bucket is indistinguishable from a new bucket.
+type bucketHeap struct {
+ prefixBucket []prefixBucketWithExpiry
+ prefixToIndex map[netip.Prefix]int
+}
+
+var _ heap.Interface = (*bucketHeap)(nil)
+
+// Upsert replaces the bucket with prefix `b.Prefix` with the provided bucket, `b`, or
+// inserts `b` if no bucket with prefix `b.Prefix` exists.
+func (h *bucketHeap) Upsert(b prefixBucketWithExpiry) {
+ if i, ok := h.prefixToIndex[b.Prefix]; ok {
+ h.prefixBucket[i] = b
+ heap.Fix(h, i)
+ return
+ }
+ heap.Push(h, b)
+}
+
+// Get returns the limiter for a prefix
+func (h *bucketHeap) Get(prefix netip.Prefix) prefixBucketWithExpiry {
+ if i, ok := h.prefixToIndex[prefix]; ok {
+ return h.prefixBucket[i]
+ }
+ return prefixBucketWithExpiry{}
+}
+
+// Expire removes elements with expiry before `expiry`
+func (h *bucketHeap) Expire(expiry time.Time) {
+ for h.Len() > 0 {
+ oldest := h.prefixBucket[0]
+ if oldest.Expiry.After(expiry) {
+ break
+ }
+ heap.Pop(h)
+ }
+}
+
+// Methods for the heap interface
+
+// Len returns the length of the heap
+func (h *bucketHeap) Len() int {
+ return len(h.prefixBucket)
+}
+
+// Less compares two elements in the heap
+func (h *bucketHeap) Less(i, j int) bool {
+ return h.prefixBucket[i].Expiry.Before(h.prefixBucket[j].Expiry)
+}
+
+// Swap swaps two elements in the heap
+func (h *bucketHeap) Swap(i, j int) {
+ h.prefixBucket[i], h.prefixBucket[j] = h.prefixBucket[j], h.prefixBucket[i]
+ h.prefixToIndex[h.prefixBucket[i].Prefix] = i
+ h.prefixToIndex[h.prefixBucket[j].Prefix] = j
+}
+
+// Push adds a new element to the heap
+func (h *bucketHeap) Push(x any) {
+ item := x.(prefixBucketWithExpiry)
+ h.prefixBucket = append(h.prefixBucket, item)
+ h.prefixToIndex[item.Prefix] = len(h.prefixBucket) - 1
+}
+
+// Pop removes and returns the top element from the heap
+func (h *bucketHeap) Pop() any {
+ n := len(h.prefixBucket)
+ item := h.prefixBucket[n-1]
+ h.prefixBucket = h.prefixBucket[0 : n-1]
+ delete(h.prefixToIndex, item.Prefix)
+ return item
+}
diff --git a/x/rate/limiter_test.go b/x/rate/limiter_test.go
new file mode 100644
index 0000000000..b3c730283f
--- /dev/null
+++ b/x/rate/limiter_test.go
@@ -0,0 +1,256 @@
+package rate
+
+import (
+ "fmt"
+ "net/netip"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/time/rate"
+)
+
+const rateLimitErrorTolerance = 0.05
+
+func getSleepDurationAndRequestCount(rps float64) (time.Duration, int) {
+ sleepDuration := 100 * time.Millisecond
+ requestCount := int(sleepDuration.Seconds() * float64(rps))
+ if requestCount < 1 {
+ // Adding 1ms to ensure we do get 1 request. If the rate is low enough that
+ // 100ms won't have a single request adding 1ms won't error here.
+ sleepDuration = time.Duration((1/rps)*float64(time.Second)) + 1*time.Millisecond
+ requestCount = 1
+ }
+ return sleepDuration, requestCount
+}
+
+func assertLimiter(t *testing.T, rl *Limiter, ipAddr netip.Addr, allowed, errorMargin int) {
+ t.Helper()
+ for i := 0; i < allowed; i++ {
+ require.True(t, rl.Allow(ipAddr))
+ }
+ for i := 0; i < errorMargin; i++ {
+ rl.Allow(ipAddr)
+ }
+ require.False(t, rl.Allow(ipAddr))
+}
+
+func TestLimiterGlobal(t *testing.T) {
+ addr := netip.MustParseAddr("127.0.0.1")
+ limits := []Limit{
+ {RPS: 0.0, Burst: 1},
+ {RPS: 0.8, Burst: 1},
+ {RPS: 10, Burst: 20},
+ {RPS: 100, Burst: 200},
+ {RPS: 1000, Burst: 2000},
+ }
+ for _, limit := range limits {
+ t.Run(fmt.Sprintf("limit %0.1f", limit.RPS), func(t *testing.T) {
+ rl := &Limiter{
+ GlobalLimit: limit,
+ }
+ if limit.RPS == 0 {
+ // 0 implies no rate limiting, any large number would do
+ for i := 0; i < 1000; i++ {
+ require.True(t, rl.Allow(addr))
+ }
+ return
+ }
+ assertLimiter(t, rl, addr, limit.Burst, int(limit.RPS*rateLimitErrorTolerance))
+ sleepDuration, requestCount := getSleepDurationAndRequestCount(limit.RPS)
+ time.Sleep(sleepDuration)
+ assertLimiter(t, rl, addr, requestCount, int(float64(requestCount)*rateLimitErrorTolerance))
+ })
+ }
+}
+
+func TestLimiterNetworkPrefix(t *testing.T) {
+ local := netip.MustParseAddr("127.0.0.1")
+ public := netip.MustParseAddr("1.1.1.1")
+ rl := &Limiter{
+ NetworkPrefixLimits: []PrefixLimit{
+ {Prefix: netip.MustParsePrefix("127.0.0.0/24"), Limit: Limit{}},
+ },
+ GlobalLimit: Limit{RPS: 10, Burst: 10},
+ }
+ // element within prefix is allowed even over the limit
+ for range rl.GlobalLimit.Burst + 100 {
+ require.True(t, rl.Allow(local))
+ }
+ // rate limit public ips
+ assertLimiter(t, rl, public, rl.GlobalLimit.Burst, int(rl.GlobalLimit.RPS*rateLimitErrorTolerance))
+
+ // public ip rejected
+ require.False(t, rl.Allow(public))
+ // local ip accepted
+ for range 100 {
+ require.True(t, rl.Allow(local))
+ }
+}
+
+func TestLimiterNetworkPrefixWidth(t *testing.T) {
+ a1 := netip.MustParseAddr("1.1.1.1")
+ a2 := netip.MustParseAddr("1.1.0.1")
+
+ wideLimit := 20
+ narrowLimit := 10
+ rl := &Limiter{
+ NetworkPrefixLimits: []PrefixLimit{
+ {Prefix: netip.MustParsePrefix("1.1.0.0/16"), Limit: Limit{RPS: 0.01, Burst: wideLimit}},
+ {Prefix: netip.MustParsePrefix("1.1.1.0/24"), Limit: Limit{RPS: 0.01, Burst: narrowLimit}},
+ },
+ }
+ for range 2 * wideLimit {
+ rl.Allow(a1)
+ }
+ // a1 rejected
+ require.False(t, rl.Allow(a1))
+ // a2 accepted
+ for range wideLimit - narrowLimit {
+ require.True(t, rl.Allow(a2))
+ }
+}
+
+func subnetAddrs(prefix netip.Prefix) func() netip.Addr {
+ next := prefix.Addr()
+ return func() netip.Addr {
+ addr := next
+ next = addr.Next()
+ if !prefix.Contains(addr) {
+ next = prefix.Addr()
+ addr = next
+ }
+ return addr
+ }
+}
+
+func TestSubnetLimiter(t *testing.T) {
+ assertOutput := func(outcome bool, rl *SubnetLimiter, subnetAddrs func() netip.Addr, n int) {
+ t.Helper()
+ for range n {
+ require.Equal(t, outcome, rl.Allow(subnetAddrs(), time.Now()), "%d", n)
+ }
+ }
+
+ t.Run("Simple", func(*testing.T) {
+ // Keep the refil rate low
+ v4Small := SubnetLimit{PrefixLength: 24, Limit: Limit{RPS: 0.0001, Burst: 10}}
+ v4Large := SubnetLimit{PrefixLength: 16, Limit: Limit{RPS: 0.0001, Burst: 19}}
+
+ v6Small := SubnetLimit{PrefixLength: 64, Limit: Limit{RPS: 0.0001, Burst: 10}}
+ v6Large := SubnetLimit{PrefixLength: 48, Limit: Limit{RPS: 0.0001, Burst: 17}}
+ rl := &SubnetLimiter{
+ IPv4SubnetLimits: []SubnetLimit{v4Large, v4Small},
+ IPv6SubnetLimits: []SubnetLimit{v6Large, v6Small},
+ }
+
+ v4SubnetAddr1 := subnetAddrs(netip.MustParsePrefix("192.168.1.1/24"))
+ v4SubnetAddr2 := subnetAddrs(netip.MustParsePrefix("192.168.2.1/24"))
+ v6SubnetAddr1 := subnetAddrs(netip.MustParsePrefix("2001:0:0:1::/64"))
+ v6SubnetAddr2 := subnetAddrs(netip.MustParsePrefix("2001:0:0:2::/64"))
+
+ assertOutput(true, rl, v4SubnetAddr1, v4Small.Burst)
+ assertOutput(false, rl, v4SubnetAddr1, v4Large.Burst)
+
+ assertOutput(true, rl, v4SubnetAddr2, v4Large.Burst-v4Small.Burst)
+ assertOutput(false, rl, v4SubnetAddr2, v4Large.Burst)
+
+ assertOutput(true, rl, v6SubnetAddr1, v6Small.Burst)
+ assertOutput(false, rl, v6SubnetAddr1, v6Large.Burst)
+
+ assertOutput(true, rl, v6SubnetAddr2, v6Large.Burst-v6Small.Burst)
+ assertOutput(false, rl, v6SubnetAddr2, v6Large.Burst)
+ })
+
+ t.Run("Complex", func(*testing.T) {
+ limits := []SubnetLimit{
+ {PrefixLength: 32, Limit: Limit{RPS: 0.01, Burst: 10}},
+ {PrefixLength: 24, Limit: Limit{RPS: 0.01, Burst: 20}},
+ {PrefixLength: 16, Limit: Limit{RPS: 0.01, Burst: 30}},
+ {PrefixLength: 8, Limit: Limit{RPS: 0.01, Burst: 40}},
+ }
+ rl := &SubnetLimiter{
+ IPv4SubnetLimits: limits,
+ }
+
+ snAddrs := []func() netip.Addr{
+ subnetAddrs(netip.MustParsePrefix("192.168.1.1/32")),
+ subnetAddrs(netip.MustParsePrefix("192.168.1.2/24")),
+ subnetAddrs(netip.MustParsePrefix("192.168.2.1/16")),
+ subnetAddrs(netip.MustParsePrefix("192.0.1.1/8")),
+ }
+ for i, addrsFunc := range snAddrs {
+ prev := 0
+ if i > 0 {
+ prev = limits[i-1].Burst
+ }
+ assertOutput(true, rl, addrsFunc, limits[i].Burst-prev)
+ assertOutput(false, rl, addrsFunc, limits[i].Burst)
+ }
+ })
+
+ t.Run("Zero", func(t *testing.T) {
+ sl := SubnetLimiter{}
+ for range 10000 {
+ require.True(t, sl.Allow(netip.IPv6Loopback(), time.Now()))
+ }
+ })
+}
+
+func TestSubnetLimiterCleanup(t *testing.T) {
+ tc := []struct {
+ Limit
+ TTL time.Duration
+ }{
+ {Limit: Limit{RPS: 1, Burst: 10}, TTL: 10 * time.Second},
+ {Limit: Limit{RPS: 0.1, Burst: 2}, TTL: 20 * time.Second},
+ {Limit: Limit{RPS: 1, Burst: 100}, TTL: 100 * time.Second},
+ {Limit: Limit{RPS: 3, Burst: 6}, TTL: 2 * time.Second},
+ }
+ for i, tt := range tc {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ ip1, ip2 := netip.IPv6Loopback(), netip.MustParseAddr("2001::")
+ sl := SubnetLimiter{IPv6SubnetLimits: []SubnetLimit{{PrefixLength: 64, Limit: tt.Limit}}}
+ now := time.Now()
+ // Empty the ip1 bucket
+ for range tt.Burst {
+ require.True(t, sl.Allow(ip1, now))
+ }
+ for range tt.Burst / 2 {
+ require.True(t, sl.Allow(ip2, now))
+ }
+ epsilon := 100 * time.Millisecond
+ // just before ip1 expiry
+ now = now.Add(tt.TTL).Add(-epsilon)
+ sl.cleanUp(now) // ip2 will be removed
+ require.Equal(t, 1, sl.ipv6Heaps[0].Len())
+ // just after ip1 expiry
+ now = now.Add(2 * epsilon)
+ require.True(t, sl.Allow(ip2, now)) // remove the ip1 bucket
+ require.Equal(t, 1, sl.ipv6Heaps[0].Len()) // ip2 added in the previous call
+ })
+ }
+}
+
+func TestTokenBucketFullAfter(t *testing.T) {
+ tc := []struct {
+ *rate.Limiter
+ FullAfter time.Duration
+ }{
+ {Limiter: rate.NewLimiter(1, 10), FullAfter: 10 * time.Second},
+ {Limiter: rate.NewLimiter(0.01, 10), FullAfter: 1000 * time.Second},
+ {Limiter: rate.NewLimiter(0.01, 1), FullAfter: 100 * time.Second},
+ }
+ for i, tt := range tc {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ b := tokenBucket{tt.Limiter}
+ now := time.Now()
+ for range b.Burst() {
+ tt.Allow()
+ }
+ epsilon := 10 * time.Millisecond
+ require.GreaterOrEqual(t, tt.FullAfter+epsilon, b.FullAt(now).Sub(now))
+ require.LessOrEqual(t, tt.FullAfter-epsilon, b.FullAt(now).Sub(now))
+ })
+ }
+}
diff --git a/x/simlibp2p/libp2p.go b/x/simlibp2p/libp2p.go
new file mode 100644
index 0000000000..47e745bb3d
--- /dev/null
+++ b/x/simlibp2p/libp2p.go
@@ -0,0 +1,258 @@
+package simconnlibp2p
+
+import (
+ "crypto/rand"
+ "fmt"
+ "net"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/config"
+ "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/host"
+ "github.com/libp2p/go-libp2p/core/network"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+ blankhost "github.com/libp2p/go-libp2p/p2p/host/blank"
+ "github.com/libp2p/go-libp2p/p2p/host/eventbus"
+ "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem"
+ "github.com/libp2p/go-libp2p/p2p/net/connmgr"
+ "github.com/libp2p/go-libp2p/p2p/net/swarm"
+ "github.com/libp2p/go-libp2p/p2p/protocol/identify"
+ libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
+ "github.com/libp2p/go-libp2p/p2p/transport/quicreuse"
+ "github.com/marcopolo/simnet"
+ "github.com/multiformats/go-multiaddr"
+ "github.com/quic-go/quic-go"
+ "github.com/stretchr/testify/require"
+ "go.uber.org/fx"
+)
+
+func MustNewHost(t *testing.T, opts ...libp2p.Option) host.Host {
+ t.Helper()
+ h, err := libp2p.New(opts...)
+ require.NoError(t, err)
+ return h
+}
+
+type MockSourceIPSelector struct {
+ ip atomic.Pointer[net.IP]
+}
+
+func (m *MockSourceIPSelector) PreferredSourceIPForDestination(_ *net.UDPAddr) (net.IP, error) {
+ return *m.ip.Load(), nil
+}
+
+const OneMbps = 1_000_000
+
+func QUICSimnet(simnet *simnet.Simnet, linkSettings simnet.NodeBiDiLinkSettings, quicReuseOpts ...quicreuse.Option) libp2p.Option {
+ m := &MockSourceIPSelector{}
+ quicReuseOpts = append(quicReuseOpts,
+ quicreuse.OverrideSourceIPSelector(func() (quicreuse.SourceIPSelector, error) {
+ return m, nil
+ }),
+ quicreuse.OverrideListenUDP(func(_ string, address *net.UDPAddr) (net.PacketConn, error) {
+ m.ip.Store(&address.IP)
+ c := simnet.NewEndpoint(address, linkSettings)
+ return c, nil
+ }))
+ return libp2p.QUICReuse(
+ func(l fx.Lifecycle, statelessResetKey quic.StatelessResetKey, tokenKey quic.TokenGeneratorKey, opts ...quicreuse.Option) (*quicreuse.ConnManager, error) {
+ cm, err := quicreuse.NewConnManager(statelessResetKey, tokenKey, opts...)
+ if err != nil {
+ return nil, err
+ }
+ l.Append(fx.StopHook(func() error {
+ // When we pass in our own conn manager, we need to close it manually (??)
+ // TODO: this seems like a bug
+ return cm.Close()
+ }))
+ return cm, nil
+ }, quicReuseOpts...)
+}
+
+type wrappedHost struct {
+ blankhost.BlankHost
+ ps peerstore.Peerstore
+ quicCM *quicreuse.ConnManager
+ idService identify.IDService
+ connMgr *connmgr.BasicConnMgr
+}
+
+func (h *wrappedHost) Close() error {
+ h.BlankHost.Close()
+ h.ps.Close()
+ h.quicCM.Close()
+ h.idService.Close()
+ h.connMgr.Close()
+ return nil
+}
+
+type BlankHostOpts struct {
+ ConnMgr *connmgr.BasicConnMgr
+ listenMultiaddr multiaddr.Multiaddr
+ simnet *simnet.Simnet
+ linkSettings simnet.NodeBiDiLinkSettings
+ quicReuseOpts []quicreuse.Option
+}
+
+func newBlankHost(opts BlankHostOpts) (*wrappedHost, error) {
+ priv, _, err := crypto.GenerateEd25519Key(rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ id, err := peer.IDFromPrivateKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ ps, err := pstoremem.NewPeerstore()
+ if err != nil {
+ return nil, err
+ }
+ ps.AddPrivKey(id, priv)
+
+ eb := eventbus.NewBus()
+
+ swarm, err := swarm.NewSwarm(id, ps, eb)
+ if err != nil {
+ return nil, err
+ }
+
+ statelessResetKey, err := config.PrivKeyToStatelessResetKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ tokenGeneratorKey, err := config.PrivKeyToTokenGeneratorKey(priv)
+ if err != nil {
+ return nil, err
+ }
+ m := &MockSourceIPSelector{}
+ quicReuseOpts := append(opts.quicReuseOpts,
+ quicreuse.OverrideSourceIPSelector(func() (quicreuse.SourceIPSelector, error) {
+ return m, nil
+ }),
+ quicreuse.OverrideListenUDP(func(_ string, address *net.UDPAddr) (net.PacketConn, error) {
+ m.ip.Store(&address.IP)
+ c := opts.simnet.NewEndpoint(address, opts.linkSettings)
+ return c, nil
+ }),
+ )
+
+ quicCM, err := quicreuse.NewConnManager(statelessResetKey, tokenGeneratorKey, quicReuseOpts...)
+ if err != nil {
+ return nil, err
+ }
+ quicTr, err := libp2pquic.NewTransport(priv, quicCM, nil, nil, &network.NullResourceManager{})
+ if err != nil {
+ return nil, err
+ }
+
+ err = swarm.AddTransport(quicTr)
+ if err != nil {
+ return nil, err
+ }
+ err = swarm.Listen(opts.listenMultiaddr)
+ if err != nil {
+ return nil, err
+ }
+
+ var cm *connmgr.BasicConnMgr
+ if opts.ConnMgr == nil {
+ cm, err = connmgr.NewConnManager(100, 200, connmgr.WithGracePeriod(time.Second*10))
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ cm = opts.ConnMgr
+ }
+
+ host := blankhost.NewBlankHost(swarm, blankhost.WithEventBus(eb), blankhost.WithConnectionManager(cm))
+
+ idService, err := identify.NewIDService(host)
+ if err != nil {
+ return nil, err
+ }
+ idService.Start()
+
+ return &wrappedHost{
+ BlankHost: *host,
+ ps: ps,
+ quicCM: quicCM,
+ idService: idService,
+ connMgr: cm,
+ }, nil
+}
+
+type NodeLinkSettingsAndCount struct {
+ LinkSettings simnet.NodeBiDiLinkSettings
+ Count int
+}
+
+type HostAndIdx struct {
+ Host host.Host
+ Idx int
+}
+
+type SimpleLibp2pNetworkMeta struct {
+ Nodes []host.Host
+ AddrToNode map[string]HostAndIdx
+}
+
+type NetworkSettings struct {
+ UseBlankHost bool
+ QUICReuseOptsForHostIdx func(idx int) []quicreuse.Option
+ BlankHostOptsForHostIdx func(idx int) BlankHostOpts
+}
+
+func SimpleLibp2pNetwork(linkSettings []NodeLinkSettingsAndCount, networkSettings NetworkSettings) (*simnet.Simnet, *SimpleLibp2pNetworkMeta, error) {
+ nw := &simnet.Simnet{}
+ meta := &SimpleLibp2pNetworkMeta{
+ AddrToNode: make(map[string]HostAndIdx),
+ }
+
+ for _, l := range linkSettings {
+ for i := 0; i < l.Count; i++ {
+ idx := len(meta.Nodes)
+ ip := simnet.IntToPublicIPv4(idx)
+ addr := fmt.Sprintf("/ip4/%s/udp/8000/quic-v1", ip)
+ var h host.Host
+ var err error
+ var quicReuseOpts []quicreuse.Option
+ if networkSettings.QUICReuseOptsForHostIdx != nil {
+ quicReuseOpts = networkSettings.QUICReuseOptsForHostIdx(idx)
+ }
+ if networkSettings.UseBlankHost {
+ var opts BlankHostOpts
+ if networkSettings.BlankHostOptsForHostIdx != nil {
+ opts = networkSettings.BlankHostOptsForHostIdx(idx)
+ }
+
+ h, err = newBlankHost(BlankHostOpts{
+ listenMultiaddr: multiaddr.StringCast(addr),
+ simnet: nw,
+ linkSettings: l.LinkSettings,
+ quicReuseOpts: quicReuseOpts,
+ ConnMgr: opts.ConnMgr,
+ })
+ } else {
+ h, err = libp2p.New(
+ libp2p.ListenAddrStrings(addr),
+ QUICSimnet(nw, l.LinkSettings, quicReuseOpts...),
+ // TODO: Currently using identify address discovery stalls
+ // synctest
+ libp2p.DisableIdentifyAddressDiscovery(),
+ libp2p.ResourceManager(&network.NullResourceManager{}),
+ )
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ meta.Nodes = append(meta.Nodes, h)
+ meta.AddrToNode[addr] = HostAndIdx{Host: h, Idx: idx}
+ }
+ }
+
+ return nw, meta, nil
+}
diff --git a/x/simlibp2p/synctest_test.go b/x/simlibp2p/synctest_test.go
new file mode 100644
index 0000000000..691c517dc8
--- /dev/null
+++ b/x/simlibp2p/synctest_test.go
@@ -0,0 +1,133 @@
+//go:build goexperiment.synctest
+
+package simconnlibp2p_test
+
+import (
+ "context"
+ "math/rand"
+ "testing"
+ "time"
+
+ "testing/synctest"
+
+ "github.com/libp2p/go-libp2p"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping"
+ simlibp2p "github.com/libp2p/go-libp2p/x/simlibp2p"
+ "github.com/marcopolo/simnet"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSimpleLibp2pNetwork_synctest(t *testing.T) {
+ synctest.Run(func() {
+ latency := 10 * time.Millisecond
+ network, meta, err := simlibp2p.SimpleLibp2pNetwork([]simlibp2p.NodeLinkSettingsAndCount{
+ {LinkSettings: simnet.NodeBiDiLinkSettings{
+ Downlink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2}, // Divide by two since this is latency for each direction
+ Uplink: simnet.LinkSettings{BitsPerSecond: 20 * simlibp2p.OneMbps, Latency: latency / 2},
+ }, Count: 100},
+ }, simlibp2p.NetworkSettings{})
+ require.NoError(t, err)
+ network.Start()
+ defer network.Close()
+
+ defer func() {
+ for _, node := range meta.Nodes {
+ node.Close()
+ }
+ }()
+
+ // Test random nodes can ping each other
+ const numQueries = 100
+ for range numQueries {
+ i := rand.Intn(len(meta.Nodes))
+ j := rand.Intn(len(meta.Nodes))
+ for i == j {
+ j = rand.Intn(len(meta.Nodes))
+ }
+ h1 := meta.Nodes[i]
+ h2 := meta.Nodes[j]
+ t.Logf("connecting %s <-> %s", h1.ID(), h2.ID())
+ err := h1.Connect(context.Background(), peer.AddrInfo{
+ ID: h2.ID(),
+ Addrs: h2.Addrs(),
+ })
+ require.NoError(t, err)
+ pingA := ping.NewPingService(h1)
+ ping.NewPingService(h2)
+ time.Sleep(1 * time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+ t.Logf("pinging %s <-> %s", h1.ID(), h2.ID())
+ res := pingA.Ping(ctx, meta.Nodes[j].ID())
+ result := <-res
+ t.Logf("pinged %s <-> %s", h1.ID(), h2.ID())
+ require.NoError(t, result.Error)
+ t.Logf("ping: (%d) <-> (%d): %v", i, j, result.RTT)
+ expectedLatency := 20 * time.Millisecond // RTT is the sum of the latency of the two links
+ percentDiff := float64(result.RTT-expectedLatency) / float64(expectedLatency)
+ if percentDiff > 0.20 {
+ t.Fatalf("latency is wrong: %v. percent off: %v", result.RTT, percentDiff)
+ }
+ }
+ })
+}
+
+func TestSimpleSimNetPing_synctest(t *testing.T) {
+ synctest.Run(func() {
+ router := &simnet.Simnet{}
+
+ const bandwidth = 10 * simlibp2p.OneMbps
+ const latency = 10 * time.Millisecond
+ linkSettings := simnet.NodeBiDiLinkSettings{
+ Downlink: simnet.LinkSettings{
+ BitsPerSecond: bandwidth,
+ Latency: latency / 2,
+ },
+ Uplink: simnet.LinkSettings{
+ BitsPerSecond: bandwidth,
+ Latency: latency / 2,
+ },
+ }
+
+ hostA := simlibp2p.MustNewHost(t,
+ libp2p.ListenAddrStrings("/ip4/1.0.0.1/udp/8000/quic-v1"),
+ libp2p.DisableIdentifyAddressDiscovery(),
+ simlibp2p.QUICSimnet(router, linkSettings),
+ )
+ hostB := simlibp2p.MustNewHost(t,
+ libp2p.ListenAddrStrings("/ip4/1.0.0.2/udp/8000/quic-v1"),
+ libp2p.DisableIdentifyAddressDiscovery(),
+ simlibp2p.QUICSimnet(router, linkSettings),
+ )
+
+ err := router.Start()
+ require.NoError(t, err)
+ defer router.Close()
+
+ defer hostA.Close()
+ defer hostB.Close()
+
+ err = hostA.Connect(context.Background(), peer.AddrInfo{
+ ID: hostB.ID(),
+ Addrs: hostB.Addrs(),
+ })
+ require.NoError(t, err)
+
+ pingA := ping.NewPingService(hostA)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ res := pingA.Ping(ctx, hostB.ID())
+ result := <-res
+ require.NoError(t, result.Error)
+ t.Logf("pingA -> pingB: %v", result.RTT)
+
+ expectedLatency := latency * 2 // RTT is the sum of the latency of the two links
+ percentDiff := float64(result.RTT-expectedLatency) / float64(expectedLatency)
+ if percentDiff > 0.20 {
+ t.Fatalf("latency is wrong: %v. percent off: %v", result.RTT, percentDiff)
+ }
+ })
+}