diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 51cf5d7629..8d78047ccd 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -4,6 +4,8 @@ FROM ghcr.io/linkerd/dev:${DEV_VERSION} RUN scurl https://run.linkerd.io/install-edge | sh && \ mkdir -p "$HOME/bin" && ln -s "$HOME/.linkerd2/bin/linkerd" "$HOME/bin/linkerd" +ENV RUSTFLAGS="--cfg tokio_unstable" + # XXX(ver) This doesn't currently work, because it puts # /usr/local/cargo/registry into a weird state with regard to permissions. #RUN rustup toolchain install --profile=minimal nightly diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 5498141711..a2725d816f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -3,7 +3,7 @@ "build": { "dockerfile": "Dockerfile", "args": { - "DEV_VERSION": "v43", + "DEV_VERSION": "v45", "http_proxy": "${localEnv:http_proxy}", "https_proxy": "${localEnv:https_proxy}" } @@ -42,7 +42,8 @@ "overrideCommand": false, "remoteUser": "code", "containerEnv": { - "CXX": "clang++-14", + "CXX": "clang++-19", + "RUSTFLAGS": "--cfg tokio_unstable" }, "mounts": [ { diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 48a3bc7591..8116acb766 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,12 +11,6 @@ updates: allow: - dependency-type: "all" ignore: - # These dependencies will be updated via higher-level aggregator dependencies like `clap`, - # `futures`, `prost`, `tracing`, and `trust-dns-resolver`: - - dependency-name: "futures-*" - - dependency-name: "prost-derive" - - dependency-name: "tracing-*" - - dependency-name: "trust-dns-proto" # These dependencies are for platforms that we don't support: - dependency-name: "hermit-abi" - dependency-name: "redox_*" @@ -24,6 +18,35 @@ updates: - dependency-name: "wasm-bindgen" - dependency-name: "web-sys" - dependency-name: "windows*" + groups: + boring: + patterns: + - "tokio-boring" + - "boring*" + futures: + patterns: + - "futures*" + grpc: + patterns: + - "prost*" + - "tonic*" + hickory: + patterns: + - "hickory*" + icu4x: + patterns: + - "icu_*" + opentelemetry: + patterns: + - "opentelemetry*" + rustls: + patterns: + - "tokio-rustls" + - "rustls*" + - "ring" + tracing: + patterns: + - "tracing*" - package-ecosystem: cargo directory: /linkerd/addr/fuzz diff --git a/.github/workflows/beta.yml b/.github/workflows/beta.yml index 3370eaeffb..3ca547cee5 100644 --- a/.github/workflows/beta.yml +++ b/.github/workflows/beta.yml @@ -15,20 +15,20 @@ env: CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 RUSTUP_MAX_RETRIES: 10 - RUSTFLAGS: "-D warnings" + RUSTFLAGS: "-D warnings --cfg tokio_unstable" permissions: contents: read jobs: build: - runs-on: ubuntu-latest - container: ghcr.io/linkerd/dev:v43-rust + runs-on: ubuntu-24.04 + container: ghcr.io/linkerd/dev:v45-rust timeout-minutes: 20 continue-on-error: true steps: - run: rustup toolchain install --profile=minimal beta - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: just toolchain=beta fetch - run: just toolchain=beta build diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index eb88228528..447330f86a 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -15,17 +15,17 @@ concurrency: env: CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 - RUSTFLAGS: "-D warnings -A deprecated -C debuginfo=2" + RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable -C debuginfo=2" RUSTUP_MAX_RETRIES: 10 jobs: meta: timeout-minutes: 5 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - id: changed - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c with: files: | .codecov.yml @@ -40,19 +40,19 @@ jobs: codecov: needs: meta if: (github.event_name == 'push' && github.ref == 'refs/heads/main') || needs.meta.outputs.any_changed == 'true' - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 30 container: - image: docker://ghcr.io/linkerd/dev:v43-rust + image: docker://ghcr.io/linkerd/dev:v45-rust options: --security-opt seccomp=unconfined # 🤷 env: - CXX: "/usr/bin/clang++-14" + CXX: "/usr/bin/clang++-19" steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 - run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --no-run - run: cargo tarpaulin --locked --workspace --exclude=linkerd2-proxy --exclude=linkerd-transport-header --exclude=opencensus-proto --exclude=spire-proto --skip-clean --ignore-tests --no-fail-fast --out=Xml # Some tests are especially flakey in coverage tests. That's fine. We # only really care to measure how much of our codebase is covered. continue-on-error: true - - uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed + - uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574 diff --git a/.github/workflows/fuzzers.yml b/.github/workflows/fuzzers.yml index 290237230c..a7d8dae08c 100644 --- a/.github/workflows/fuzzers.yml +++ b/.github/workflows/fuzzers.yml @@ -17,7 +17,7 @@ env: CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 RUST_BACKTRACE: short - RUSTFLAGS: "-D warnings -A deprecated -C debuginfo=0" + RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable -C debuginfo=0" RUSTUP_MAX_RETRIES: 10 permissions: @@ -26,13 +26,13 @@ permissions: jobs: list-changed: timeout-minutes: 3 - runs-on: ubuntu-latest - container: docker://rust:1.76.0 + runs-on: ubuntu-24.04 + container: docker://rust:1.83.0 steps: - run: apt update && apt install -y jo - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c id: changed-files - name: list changed crates id: list-changed @@ -47,15 +47,15 @@ jobs: build: needs: [list-changed] timeout-minutes: 40 - runs-on: ubuntu-latest - container: docker://rust:1.76.0 + runs-on: ubuntu-24.04 + container: docker://rust:1.83.0 strategy: matrix: dir: ${{ fromJson(needs.list-changed.outputs.dirs) }} steps: - run: rustup toolchain add nightly - run: cargo install cargo-fuzz - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - working-directory: ${{matrix.dir}} run: cargo +nightly fetch diff --git a/.github/workflows/markdown.yml b/.github/workflows/markdown.yml index 45f8fc8c88..fdfbefa803 100644 --- a/.github/workflows/markdown.yml +++ b/.github/workflows/markdown.yml @@ -12,9 +12,9 @@ on: jobs: markdownlint: timeout-minutes: 5 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b - - uses: DavidAnson/markdownlint-cli2-action@b4c9feab76d8025d1e83c653fa3990936df0e6c8 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - uses: DavidAnson/markdownlint-cli2-action@05f32210e84442804257b2a6f20b273450ec8265 with: globs: "**/*.md" diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 6d44a04ebe..301170189b 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -14,7 +14,7 @@ on: env: CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 - RUSTFLAGS: "-D warnings -A opaque_hidden_inferred_bound -C debuginfo=0" + RUSTFLAGS: "-D warnings -A opaque_hidden_inferred_bound --cfg tokio_unstable -C debuginfo=0" RUSTUP_MAX_RETRIES: 10 permissions: @@ -22,13 +22,13 @@ permissions: jobs: build: - runs-on: ubuntu-latest - container: ghcr.io/linkerd/dev:v43-rust + runs-on: ubuntu-24.04 + container: ghcr.io/linkerd/dev:v45-rust timeout-minutes: 20 continue-on-error: true steps: - run: rustup toolchain install --profile=minimal nightly - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: just toolchain=nightly fetch - run: just toolchain=nightly profile=release build diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 333365b070..63f3412e51 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -5,7 +5,7 @@ env: CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 RUSTUP_MAX_RETRIES: 10 - RUSTFLAGS: "-D warnings -D deprecated -C debuginfo=0" + RUSTFLAGS: "-D warnings -D deprecated --cfg tokio_unstable -C debuginfo=0" concurrency: group: ${{ github.workflow }}-${{ github.head_ref }} @@ -14,24 +14,24 @@ concurrency: jobs: meta: timeout-minutes: 5 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - id: build - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c with: files: | .github/workflows/pr.yml justfile Dockerfile - id: actions - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c with: files: | .github/workflows/** .devcontainer/* - id: cargo - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c with: files_ignore: "Cargo.toml" files: | @@ -40,7 +40,7 @@ jobs: if: steps.cargo.outputs.any_changed == 'true' run: ./.github/list-crates.sh ${{ steps.cargo.outputs.all_changed_files }} - id: rust - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c with: files: | **/*.rs @@ -57,7 +57,7 @@ jobs: info: timeout-minutes: 3 needs: meta - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Info run: | @@ -74,30 +74,27 @@ jobs: actions: needs: meta if: needs.meta.outputs.actions_changed == 'true' - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: linkerd/dev/actions/setup-tools@v43 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: linkerd/dev/actions/setup-tools@v45 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: just action-lint - run: just action-dev-check rust: needs: meta if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true' - runs-on: ubuntu-latest - container: ghcr.io/linkerd/dev:v43-rust + runs-on: ubuntu-24.04 + container: ghcr.io/linkerd/dev:v45-rust permissions: contents: read timeout-minutes: 20 steps: - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 - run: just fetch - - name: Run cargo deny check bans licenses sources - uses: EmbarkStudios/cargo-deny-action@3f4a782664881cf5725d0ffd23969fcce89fd868 - with: - command: check bans licenses sources + - run: cargo deny --all-features check bans licenses sources - run: just check-fmt - run: just clippy - run: just doc @@ -110,15 +107,15 @@ jobs: needs: meta if: needs.meta.outputs.cargo_changed == 'true' timeout-minutes: 20 - runs-on: ubuntu-latest - container: ghcr.io/linkerd/dev:v43-rust + runs-on: ubuntu-24.04 + container: ghcr.io/linkerd/dev:v45-rust strategy: matrix: crate: ${{ fromJson(needs.meta.outputs.cargo_crates) }} steps: - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 - run: just fetch - run: just check-crate ${{ matrix.crate }} @@ -126,9 +123,11 @@ jobs: needs: meta if: needs.meta.outputs.cargo_changed == 'true' || needs.meta.outputs.rust_changed == 'true' timeout-minutes: 20 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 + env: + WAIT_TIMEOUT: 2m steps: - - uses: linkerd/dev/actions/setup-tools@v43 + - uses: linkerd/dev/actions/setup-tools@v45 - name: scurl https://run.linkerd.io/install-edge | sh run: | scurl https://run.linkerd.io/install-edge | sh @@ -137,12 +136,12 @@ jobs: tag=$(linkerd version --client --short) echo "linkerd $tag" echo "LINKERD_TAG=$tag" >> "$GITHUB_ENV" - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: just docker - - run: just-k3d create + - run: just k3d-create - run: just k3d-load-linkerd - run: just linkerd-install - - run: just linkerd-check-contol-plane-proxy + - run: just linkerd-check-control-plane-proxy env: TMPDIR: ${{ runner.temp }} @@ -150,7 +149,7 @@ jobs: timeout-minutes: 3 needs: [meta, actions, rust, rust-crates, linkerd-install] if: always() - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 permissions: contents: write @@ -169,7 +168,7 @@ jobs: if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') run: exit 1 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true' - name: "Merge dependabot changes" if: needs.meta.outputs.is_dependabot == 'true' && needs.meta.outputs.any_changed == 'true' diff --git a/.github/workflows/release-weekly.yml b/.github/workflows/release-weekly.yml index 4bf63790ed..2154e19e73 100644 --- a/.github/workflows/release-weekly.yml +++ b/.github/workflows/release-weekly.yml @@ -13,7 +13,7 @@ concurrency: jobs: last-release: if: github.repository == 'linkerd/linkerd2-proxy' # Don't run this in forks. - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 5 env: GH_REPO: ${{ github.repository }} @@ -41,10 +41,10 @@ jobs: last-commit: needs: last-release - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - name: Check if the most recent commit is after the last release id: recency env: @@ -62,7 +62,7 @@ jobs: trigger-release: needs: [last-release, last-commit] if: needs.last-release.outputs.recent == 'false' && needs.last-commit.outputs.after-release == 'true' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 5 permissions: actions: write diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 526785e65e..47a6e51d17 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -48,17 +48,17 @@ on: env: CARGO_INCREMENTAL: 0 CARGO_NET_RETRY: 10 - RUSTFLAGS: "-D warnings -A deprecated" + RUSTFLAGS: "-D warnings -A deprecated --cfg tokio_unstable" RUSTUP_MAX_RETRIES: 10 concurrency: - group: ${{ github.workflow }}-${{ github.head_ref }} + group: ${{ github.workflow }}-${{ inputs.ref || github.head_ref }} cancel-in-progress: true jobs: meta: timeout-minutes: 5 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - id: meta env: @@ -70,6 +70,7 @@ jobs: if [[ "$GITHUB_EVENT_NAME" == pull_request ]]; then echo version="0.0.0-test.${GITHUB_SHA:0:7}" echo archs='["amd64"]' + echo oses='["linux"]' exit 0 fi >> "$GITHUB_OUTPUT" if ! [[ "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z-]+)?(\+[0-9A-Za-z-]+)?$ ]]; then @@ -78,13 +79,14 @@ jobs: fi ( echo version="${VERSION#v}" echo archs='["amd64", "arm64", "arm"]' + echo oses='["linux", "windows"]' ) >> "$GITHUB_OUTPUT" - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 if: github.event_name == 'pull_request' - id: changed if: github.event_name == 'pull_request' - uses: tj-actions/changed-files@0874344d6ebbaa00a27da73276ae7162fadcaf69 + uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c with: files: | .github/workflows/release.yml @@ -93,6 +95,7 @@ jobs: outputs: archs: ${{ steps.meta.outputs.archs }} + oses: ${{ steps.meta.outputs.oses }} version: ${{ steps.meta.outputs.version }} package: ${{ github.event_name == 'workflow_dispatch' || steps.changed.outputs.any_changed == 'true' }} profile: ${{ inputs.profile || 'release' }} @@ -105,7 +108,7 @@ jobs: info: needs: meta - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 3 steps: - name: Inputs @@ -126,38 +129,48 @@ jobs: strategy: matrix: arch: ${{ fromJson(needs.meta.outputs.archs) }} + os: ${{ fromJson(needs.meta.outputs.oses) }} libc: [gnu] # musl + exclude: + - os: windows + arch: arm64 + - os: windows + arch: arm # If we're not actually building on a release tag, don't short-circuit on # errors. This helps us know whether a failure is platform-specific. continue-on-error: ${{ needs.meta.outputs.publish != 'true' }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 40 - container: docker://ghcr.io/linkerd/dev:v43-rust-musl + container: docker://ghcr.io/linkerd/dev:v45-rust-musl env: LINKERD2_PROXY_VENDOR: ${{ github.repository_owner }} LINKERD2_PROXY_VERSION: ${{ needs.meta.outputs.version }} steps: + # TODO: add to dev image + - name: Install MiniGW + if: matrix.os == 'windows' + run: apt-get update && apt-get install mingw-w64 -y - name: Configure git run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: ref: ${{ needs.meta.outputs.ref }} - - uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 + - uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 with: key: ${{ matrix.arch }} - run: just fetch - - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} rustup - - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} profile=${{ needs.meta.outputs.profile }} build - - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} profile=${{ needs.meta.outputs.profile }} package - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 + - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} rustup + - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} build + - run: just arch=${{ matrix.arch }} libc=${{ matrix.libc }} os=${{ matrix.os }} profile=${{ needs.meta.outputs.profile }} package + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 with: - name: ${{ matrix.arch }}-artifacts + name: ${{ matrix.arch }}-${{ matrix.os }}-artifacts path: target/package/* publish: needs: [meta, package] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 timeout-minutes: 5 permissions: actions: write @@ -174,13 +187,13 @@ jobs: git config --global user.name "$GITHUB_USERNAME" git config --global user.email "$GITHUB_USERNAME"@users.noreply.github.com # Tag the release. - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 with: token: ${{ secrets.LINKERD2_PROXY_GITHUB_TOKEN || github.token }} ref: ${{ needs.meta.outputs.ref }} - run: git tag -a -m "$VERSION" "$TAG" # Fetch the artifacts. - - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e + - uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e with: path: artifacts - run: du -h artifacts/**/* @@ -188,7 +201,7 @@ jobs: - if: needs.meta.outputs.publish == 'true' run: git push origin "$TAG" - if: needs.meta.outputs.publish == 'true' - uses: softprops/action-gh-release@9d7c94cfd0a1f3ed45544c887983e9fa900f0564 + uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda with: name: ${{ env.VERSION }} tag_name: ${{ env.TAG }} @@ -212,7 +225,7 @@ jobs: needs: publish if: always() timeout-minutes: 3 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Results run: | diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml index 4ea79dc457..845d67263b 100644 --- a/.github/workflows/shellcheck.yml +++ b/.github/workflows/shellcheck.yml @@ -13,8 +13,8 @@ on: jobs: sh-lint: timeout-minutes: 5 - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: linkerd/dev/actions/setup-tools@v43 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: linkerd/dev/actions/setup-tools@v45 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: just sh-lint diff --git a/.github/workflows/toolchain.yml b/.github/workflows/toolchain.yml index 80ac5de191..7d99b92214 100644 --- a/.github/workflows/toolchain.yml +++ b/.github/workflows/toolchain.yml @@ -13,10 +13,10 @@ permissions: jobs: devcontainer: - runs-on: ubuntu-latest - container: ghcr.io/linkerd/dev:v43-rust + runs-on: ubuntu-24.04 + container: ghcr.io/linkerd/dev:v45-rust steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - run: git config --global --add safe.directory "$PWD" # actions/runner#2033 - run: | VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"' @@ -35,10 +35,10 @@ jobs: workflows: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: linkerd/dev/actions/setup-tools@v43 - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b + - uses: linkerd/dev/actions/setup-tools@v45 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - shell: bash run: | VERSION_REGEX='channel = "([0-9]+\.[0-9]+\.[0-9]+)"' diff --git a/.gitignore b/.gitignore index 567abde987..36de45fb1a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +.cargo +**/.cargo target **/target **/corpus diff --git a/Cargo.lock b/Cargo.lock index a416da3aa3..de8bbcb4d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,21 +1,21 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "adler32" @@ -30,10 +30,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -45,26 +45,41 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" dependencies = [ "derive_arbitrary", ] [[package]] name = "asn1-rs" -version = "0.6.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ad1373757efa0f70ec53939aabc7152e1591cb485208052993070ac8d2429d" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" dependencies = [ "asn1-rs-derive", "asn1-rs-impl", @@ -72,15 +87,15 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror", + "thiserror 2.0.12", "time", ] [[package]] name = "asn1-rs-derive" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", @@ -99,11 +114,22 @@ dependencies = [ "syn", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -112,9 +138,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -123,35 +149,40 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" -version = "1.2.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", "http", "http-body", - "hyper", + "http-body-util", "itoa", "matchit", "memchr", @@ -161,41 +192,44 @@ dependencies = [ "rustversion", "serde", "sync_wrapper", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", "tower-layer", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -204,12 +238,6 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - [[package]] name = "base64" version = "0.22.0" @@ -218,16 +246,14 @@ checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" [[package]] name = "bindgen" -version = "0.66.1" +version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b84e06fc203107bfbad243f4aba2af864eb7db3b1cf46ea0a023b0b433d2a7" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ "bitflags 2.4.2", "cexpr", "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", + "itertools", "proc-macro2", "quote", "regex", @@ -259,44 +285,57 @@ dependencies = [ [[package]] name = "boring" -version = "3.1.0" +version = "4.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae1aba472e42d3cf45ac6d0a6c8fc3ddf743871209e1b40229aed9fbdf48ece" +checksum = "ecd4d65a24a5e58e9b820723e496bfa920dd0afd31676646c81cfc3b6f34e039" dependencies = [ "bitflags 2.4.2", "boring-sys", "foreign-types", "libc", - "once_cell", + "openssl-macros", ] [[package]] name = "boring-sys" -version = "3.1.0" +version = "4.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceced5be0047c7c48d77599535fd7f0a81c1b0f0a1e97e7eece24c45022bb481" +checksum = "0c9a2a6a85b9cdadd64a1856ac5632afe0816518e20aadd372f4e4172aa94e2a" dependencies = [ + "autocfg", "bindgen", "cmake", "fs_extra", "fslock", ] +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" -version = "1.6.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.0.95" +version = "1.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "525046617d8376e3db1deffb079e91cef90a89fc3ca5c185bbf8c9ecdd15cd5c" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -316,9 +355,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -327,40 +366,70 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] [[package]] name = "cpp_demangle" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8227005286ec39567949b33df9896bcadfa6051bccca2488129f108ca23119" +checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" dependencies = [ "cfg-if", ] [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + [[package]] name = "crypto-common" version = "0.1.6" @@ -373,9 +442,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "debugid" @@ -398,9 +467,9 @@ dependencies = [ [[package]] name = "der-parser" -version = "9.0.0" +version = "10.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" dependencies = [ "asn1-rs", "displaydoc", @@ -412,18 +481,18 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" dependencies = [ "powerfmt", ] [[package]] name = "derive_arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", @@ -442,9 +511,9 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", @@ -453,25 +522,26 @@ dependencies = [ [[package]] name = "drain" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d105028bd2b5dfcb33318fd79a445001ead36004dd8dffef1bdd7e493d8bc1e" +checksum = "599214ee8a1d13a3a422a016834d5cf71ff984a38ea463f30677e62348161b7f" dependencies = [ "tokio", - "tower", + "tower-layer", + "tower-service", ] [[package]] name = "dtoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" [[package]] name = "either" -version = "1.11.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "enum-as-inner" @@ -485,17 +555,37 @@ dependencies = [ "syn", ] +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" dependencies = [ "libc", "windows-sys 0.52.0", @@ -503,9 +593,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "findshlibs" @@ -527,9 +617,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.29" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "miniz_oxide", @@ -541,6 +631,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "foreign-types" version = "0.5.0" @@ -595,9 +691,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -610,9 +706,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -620,15 +716,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -637,31 +733,49 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", "futures-io", + "futures-macro", "futures-sink", "futures-task", "memchr", @@ -670,6 +784,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -682,26 +809,57 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", ] [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "governor" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "3cbe789d04bf14543f03c4b60cd494148aa79438c8440ae7d81a7778147745c3" +dependencies = [ + "cfg-if", + "futures-sink", + "futures-timer", + "futures-util", + "hashbrown 0.15.2", + "nonzero_ext", + "parking_lot", + "portable-atomic", + "smallvec", + "spinning_top", + "web-time", +] [[package]] name = "gzip-header" @@ -714,17 +872,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http", - "indexmap 2.2.6", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -739,9 +897,14 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] [[package]] name = "heck" @@ -751,9 +914,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -763,22 +926,25 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" dependencies = [ + "async-recursion", "async-trait", "cfg-if", + "critical-section", "data-encoding", "enum-as-inner", "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", - "rand", - "thiserror", + "rand 0.9.0", + "ring", + "thiserror 2.0.12", "tinyvec", "tokio", "tracing", @@ -787,41 +953,41 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" dependencies = [ "cfg-if", "futures-util", "hickory-proto", "ipconfig", - "lru-cache", + "moka", "once_cell", "parking_lot", - "rand", + "rand 0.9.0", "resolv-conf", "smallvec", - "thiserror", + "thiserror 2.0.12", "tokio", "tracing", ] [[package]] name = "hostname" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" dependencies = [ + "cfg-if", "libc", - "match_cfg", - "winapi", + "windows-link", ] [[package]] name = "http" -version = "0.2.11" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -830,20 +996,33 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.6" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", + "futures-core", "http", + "http-body", "pin-project-lite", + "tokio", ] [[package]] name = "httparse" -version = "1.8.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -851,21 +1030,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - [[package]] name = "hyper" -version = "0.14.28" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", - "futures-core", "futures-util", "h2", "http", @@ -874,10 +1046,8 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "smallvec", "tokio", - "tower-service", - "tracing", "want", ] @@ -887,172 +1057,374 @@ version = "0.1.0" dependencies = [ "futures", "http", + "http-body", "hyper", "pin-project", "tokio", "tokio-test", - "tower", + "tower 0.5.2", ] [[package]] name = "hyper-timeout" -version = "0.4.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", + "hyper-util", "pin-project-lite", "tokio", - "tokio-io-timeout", + "tower-service", ] [[package]] -name = "idna" -version = "0.4.0" +name = "hyper-util" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", ] [[package]] -name = "idna" -version = "0.5.0" +name = "icu_collections" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "displaydoc", + "yoke", + "zerofrom", + "zerovec", ] [[package]] -name = "indexmap" -version = "1.9.3" +name = "icu_locid" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" dependencies = [ - "autocfg", - "hashbrown 0.12.3", + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", ] [[package]] -name = "indexmap" -version = "2.2.6" +name = "icu_locid_transform" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" dependencies = [ - "equivalent", - "hashbrown 0.14.3", + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", ] [[package]] -name = "ipconfig" -version = "0.3.2" +name = "icu_locid_transform_data" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" dependencies = [ - "socket2", - "widestring", - "windows-sys 0.48.0", - "winreg", + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", ] [[package]] -name = "ipnet" -version = "2.9.0" +name = "icu_normalizer_data" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] -name = "itertools" -version = "0.12.1" +name = "icu_properties" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" dependencies = [ - "either", + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", ] [[package]] -name = "itoa" -version = "1.0.11" +name = "icu_properties_data" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] -name = "jemalloc-sys" -version = "0.5.4+5.3.0-patched" +name = "icu_provider" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" dependencies = [ - "cc", - "libc", + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", ] [[package]] -name = "jemallocator" -version = "0.5.4" +name = "icu_provider_macros" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ - "jemalloc-sys", - "libc", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "jobserver" -version = "0.1.31" +name = "idna" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "libc", + "idna_adapter", + "smallvec", + "utf8_iter", ] [[package]] -name = "lazy_static" -version = "1.4.0" +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] [[package]] -name = "lazycell" -version = "1.3.0" +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown 0.15.2", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jemalloc-sys" +version = "0.5.4+5.3.0-patched" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "jemallocator" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc" +dependencies = [ + "jemalloc-sys", + "libc", +] + +[[package]] +name = "jiff" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f33145a5cbea837164362c7bd596106eb7c5198f97d1ba6f6ebb3223952e488" +dependencies = [ + "jiff-static", + "jiff-tzdb-platform", + "log", + "portable-atomic", + "portable-atomic-util", + "serde", + "windows-sys 0.52.0", +] + +[[package]] +name = "jiff-static" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43ce13c40ec6956157a3635d97a1ee2df323b263f09ea14165131289cb0f5c19" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jiff-tzdb" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1283705eb0a21404d2bfd6eef2a7593d240bc42a0bdb39db0ad6fa2ec026524" + +[[package]] +name = "jiff-tzdb-platform" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "875a5a69ac2bab1a891711cf5eccbec1ce0341ea805560dcd90b7a2e925132e8" +dependencies = [ + "jiff-tzdb", +] + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.70" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "kubert-prometheus-process" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b89e2a641a3f74c2e7366eb050282ac4a6194b63dae5294084215c457237e47" +dependencies = [ + "libc", + "procfs", + "prometheus-client", + "tracing", +] + +[[package]] +name = "kubert-prometheus-tokio" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "639670482534c37eb44caf6f4b72cc5da2f2c06aed39d1fb0cba940569428212" +dependencies = [ + "prometheus-client", + "tokio", + "tokio-metrics", + "tracing", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libfuzzer-sys" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" dependencies = [ "arbitrary", "cc", - "once_cell", ] [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.0", + "windows-targets 0.48.5", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linkerd-addr" version = "0.1.0" @@ -1060,7 +1432,7 @@ dependencies = [ "http", "ipnet", "linkerd-dns-name", - "thiserror", + "thiserror 2.0.12", ] [[package]] @@ -1068,6 +1440,7 @@ name = "linkerd-app" version = "0.1.0" dependencies = [ "futures", + "hyper-util", "linkerd-app-admin", "linkerd-app-core", "linkerd-app-gateway", @@ -1075,14 +1448,16 @@ dependencies = [ "linkerd-app-outbound", "linkerd-error", "linkerd-opencensus", + "linkerd-opentelemetry", "linkerd-tonic-stream", + "linkerd-workers", "rangemap", "regex", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-stream", "tonic", - "tower", + "tower 0.5.2", "tracing", ] @@ -1090,9 +1465,12 @@ dependencies = [ name = "linkerd-app-admin" version = "0.1.0" dependencies = [ + "bytes", "deflate", "futures", "http", + "http-body", + "http-body-util", "hyper", "linkerd-app-core", "linkerd-app-inbound", @@ -1100,9 +1478,9 @@ dependencies = [ "pprof", "serde", "serde_json", - "thiserror", + "thiserror 2.0.12", "tokio", - "tower", + "tower 0.5.2", "tracing", ] @@ -1115,11 +1493,12 @@ dependencies = [ "futures", "http", "http-body", + "http-body-util", "hyper", + "hyper-util", "ipnet", "linkerd-addr", "linkerd-conditional", - "linkerd-detect", "linkerd-dns", "linkerd-duplex", "linkerd-errno", @@ -1132,7 +1511,9 @@ dependencies = [ "linkerd-io", "linkerd-meshtls", "linkerd-metrics", + "linkerd-mock-http-body", "linkerd-opencensus", + "linkerd-opentelemetry", "linkerd-proxy-api-resolve", "linkerd-proxy-balance", "linkerd-proxy-client-policy", @@ -1152,7 +1533,6 @@ dependencies = [ "linkerd-stack", "linkerd-stack-metrics", "linkerd-stack-tracing", - "linkerd-system", "linkerd-tls", "linkerd-trace-context", "linkerd-tracing", @@ -1165,11 +1545,11 @@ dependencies = [ "regex", "semver", "serde_json", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-stream", "tonic", - "tower", + "tower 0.5.2", "tracing", ] @@ -1186,11 +1566,11 @@ dependencies = [ "linkerd-proxy-client-policy", "linkerd-proxy-server-policy", "once_cell", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-test", "tonic", - "tower", + "tower 0.5.2", "tower-test", "tracing", ] @@ -1203,11 +1583,14 @@ dependencies = [ "bytes", "futures", "http", + "http-body-util", "hyper", + "hyper-util", "libfuzzer-sys", "linkerd-app-core", "linkerd-app-test", "linkerd-http-access-log", + "linkerd-http-box", "linkerd-http-metrics", "linkerd-idle-cache", "linkerd-io", @@ -1222,11 +1605,11 @@ dependencies = [ "once_cell", "parking_lot", "rangemap", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-test", "tonic", - "tower", + "tower 0.5.2", "tracing", ] @@ -1240,7 +1623,9 @@ dependencies = [ "h2", "http", "http-body", + "http-body-util", "hyper", + "hyper-util", "ipnet", "linkerd-app", "linkerd-app-admin", @@ -1260,7 +1645,7 @@ dependencies = [ "tokio-rustls", "tokio-stream", "tonic", - "tower", + "tower 0.5.2", "tracing", "tracing-subscriber", ] @@ -1272,21 +1657,30 @@ dependencies = [ "ahash", "bytes", "futures", + "futures-util", "http", + "http-body", + "http-body-util", "hyper", + "hyper-util", "linkerd-app-core", "linkerd-app-test", "linkerd-distribute", + "linkerd-http-box", "linkerd-http-classify", + "linkerd-http-prom", "linkerd-http-retry", "linkerd-http-route", "linkerd-identity", "linkerd-io", "linkerd-meshtls", "linkerd-meshtls-rustls", + "linkerd-mock-http-body", + "linkerd-opaq-route", "linkerd-proxy-client-policy", "linkerd-retry", "linkerd-stack", + "linkerd-tls-route", "linkerd-tonic-stream", "linkerd-tonic-watch", "linkerd-tracing", @@ -1295,11 +1689,12 @@ dependencies = [ "parking_lot", "pin-project", "prometheus-client", - "thiserror", + "thiserror 2.0.12", "tokio", + "tokio-rustls", "tokio-test", "tonic", - "tower", + "tower 0.5.2", "tower-test", "tracing", ] @@ -1312,7 +1707,9 @@ dependencies = [ "h2", "http", "http-body", + "http-body-util", "hyper", + "hyper-util", "linkerd-app-core", "linkerd-http-route", "linkerd-identity", @@ -1321,12 +1718,12 @@ dependencies = [ "parking_lot", "pin-project", "regex", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-stream", "tokio-test", "tonic", - "tower", + "tower 0.5.2", "tracing", "tracing-subscriber", ] @@ -1335,30 +1732,14 @@ dependencies = [ name = "linkerd-conditional" version = "0.1.0" -[[package]] -name = "linkerd-detect" -version = "0.1.0" -dependencies = [ - "async-trait", - "bytes", - "linkerd-error", - "linkerd-io", - "linkerd-stack", - "thiserror", - "tokio", - "tower", - "tracing", -] - [[package]] name = "linkerd-distribute" version = "0.1.0" dependencies = [ "ahash", - "indexmap 2.2.6", "linkerd-stack", "parking_lot", - "rand", + "rand 0.9.0", "tokio", "tokio-test", "tower-test", @@ -1373,7 +1754,8 @@ dependencies = [ "hickory-resolver", "linkerd-dns-name", "linkerd-error", - "thiserror", + "prometheus-client", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -1382,7 +1764,7 @@ dependencies = [ name = "linkerd-dns-name" version = "0.1.0" dependencies = [ - "thiserror", + "thiserror 2.0.12", "untrusted", ] @@ -1407,7 +1789,7 @@ name = "linkerd-error" version = "0.1.0" dependencies = [ "futures", - "thiserror", + "thiserror 2.0.12", ] [[package]] @@ -1427,8 +1809,8 @@ dependencies = [ "futures", "pin-project", "quickcheck", - "rand", - "thiserror", + "rand 0.9.0", + "thiserror 2.0.12", "tokio", ] @@ -1438,7 +1820,7 @@ version = "0.1.0" dependencies = [ "futures-core", "http", - "humantime", + "jiff", "linkerd-identity", "linkerd-proxy-transport", "linkerd-stack", @@ -1466,14 +1848,53 @@ dependencies = [ name = "linkerd-http-classify" version = "0.1.0" dependencies = [ + "futures", "http", + "http-body", + "linkerd-error", + "linkerd-http-box", + "linkerd-stack", + "linkerd-tracing", + "pin-project", + "tokio", + "tokio-test", + "tower-test", + "tracing", +] + +[[package]] +name = "linkerd-http-detect" +version = "0.1.0" +dependencies = [ + "bytes", + "httparse", "linkerd-error", + "linkerd-http-variant", + "linkerd-io", + "linkerd-stack", + "linkerd-tracing", + "prometheus-client", + "thiserror 2.0.12", + "tokio", + "tokio-test", + "tracing", ] [[package]] name = "linkerd-http-h2" version = "0.1.0" +[[package]] +name = "linkerd-http-insert" +version = "0.1.0" +dependencies = [ + "futures", + "http", + "linkerd-stack", + "pin-project", + "tower 0.5.2", +] + [[package]] name = "linkerd-http-metrics" version = "0.1.0" @@ -1490,10 +1911,50 @@ dependencies = [ "parking_lot", "pin-project", "tokio", - "tower", + "tower 0.5.2", + "tracing", +] + +[[package]] +name = "linkerd-http-override-authority" +version = "0.1.0" +dependencies = [ + "http", + "linkerd-stack", + "tower 0.5.2", "tracing", ] +[[package]] +name = "linkerd-http-prom" +version = "0.1.0" +dependencies = [ + "bytes", + "futures", + "http", + "http-body", + "linkerd-error", + "linkerd-http-box", + "linkerd-metrics", + "linkerd-stack", + "parking_lot", + "pin-project", + "prometheus-client", + "thiserror 2.0.12", + "tokio", +] + +[[package]] +name = "linkerd-http-retain" +version = "0.1.0" +dependencies = [ + "http", + "http-body", + "linkerd-stack", + "pin-project", + "tower 0.5.2", +] + [[package]] name = "linkerd-http-retry" version = "0.1.0" @@ -1502,13 +1963,20 @@ dependencies = [ "futures", "http", "http-body", + "http-body-util", "hyper", "linkerd-error", + "linkerd-exp-backoff", + "linkerd-http-box", + "linkerd-metrics", + "linkerd-mock-http-body", "linkerd-stack", "linkerd-tracing", "parking_lot", - "thiserror", + "pin-project", + "thiserror 2.0.12", "tokio", + "tower 0.5.2", "tracing", ] @@ -1519,47 +1987,96 @@ dependencies = [ "http", "linkerd2-proxy-api", "maplit", - "rand", + "rand 0.9.0", "regex", - "thiserror", + "thiserror 2.0.12", "tracing", "url", ] [[package]] -name = "linkerd-identity" +name = "linkerd-http-stream-timeouts" version = "0.1.0" dependencies = [ - "linkerd-dns-name", + "futures", + "http", + "http-body", "linkerd-error", - "linkerd-metrics", - "prometheus-client", - "thiserror", + "linkerd-stack", + "parking_lot", + "pin-project", + "thiserror 2.0.12", + "tokio", "tracing", - "url", ] [[package]] -name = "linkerd-idle-cache" +name = "linkerd-http-upgrade" version = "0.1.0" dependencies = [ + "bytes", + "drain", "futures", + "http", + "http-body", + "hyper", + "hyper-util", + "linkerd-duplex", "linkerd-error", + "linkerd-http-box", + "linkerd-http-variant", + "linkerd-io", "linkerd-stack", - "linkerd-tracing", - "parking_lot", + "pin-project", + "thiserror 2.0.12", "tokio", - "tower", + "tower 0.5.2", "tracing", + "try-lock", ] [[package]] -name = "linkerd-io" +name = "linkerd-http-variant" version = "0.1.0" dependencies = [ - "async-trait", - "bytes", - "futures", + "http", + "thiserror 2.0.12", +] + +[[package]] +name = "linkerd-identity" +version = "0.1.0" +dependencies = [ + "linkerd-dns-name", + "linkerd-error", + "linkerd-metrics", + "prometheus-client", + "thiserror 2.0.12", + "tracing", + "url", +] + +[[package]] +name = "linkerd-idle-cache" +version = "0.1.0" +dependencies = [ + "futures", + "linkerd-error", + "linkerd-stack", + "linkerd-tracing", + "parking_lot", + "tokio", + "tower 0.5.2", + "tracing", +] + +[[package]] +name = "linkerd-io" +version = "0.1.0" +dependencies = [ + "async-trait", + "bytes", + "futures", "linkerd-errno", "pin-project", "tokio", @@ -1627,7 +2144,7 @@ dependencies = [ "ring", "rustls-pemfile", "rustls-webpki", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-rustls", "tracing", @@ -1648,11 +2165,15 @@ dependencies = [ name = "linkerd-metrics" version = "0.1.0" dependencies = [ + "bytes", "deflate", "http", + "http-body", + "http-body-util", "hyper", + "kubert-prometheus-process", + "linkerd-http-box", "linkerd-stack", - "linkerd-system", "parking_lot", "prometheus-client", "quickcheck", @@ -1660,15 +2181,30 @@ dependencies = [ "tracing", ] +[[package]] +name = "linkerd-mock-http-body" +version = "0.1.0" +dependencies = [ + "bytes", + "http", + "http-body", + "linkerd-error", + "tokio", +] + +[[package]] +name = "linkerd-opaq-route" +version = "0.1.0" + [[package]] name = "linkerd-opencensus" version = "0.1.0" dependencies = [ "futures", - "http", "http-body", "linkerd-error", "linkerd-metrics", + "linkerd-trace-context", "opencensus-proto", "tokio", "tokio-stream", @@ -1676,6 +2212,23 @@ dependencies = [ "tracing", ] +[[package]] +name = "linkerd-opentelemetry" +version = "0.1.0" +dependencies = [ + "futures", + "http-body", + "linkerd-error", + "linkerd-metrics", + "linkerd-trace-context", + "opentelemetry", + "opentelemetry-proto", + "opentelemetry_sdk", + "tokio", + "tonic", + "tracing", +] + [[package]] name = "linkerd-pool" version = "0.1.0" @@ -1691,7 +2244,7 @@ dependencies = [ "linkerd-pool", "linkerd-stack", "parking_lot", - "thiserror", + "thiserror 2.0.12", "tokio", "tower-test", "tracing", @@ -1704,7 +2257,7 @@ dependencies = [ "ahash", "futures", "futures-util", - "indexmap 2.2.6", + "indexmap 2.9.0", "linkerd-error", "linkerd-metrics", "linkerd-pool", @@ -1713,10 +2266,10 @@ dependencies = [ "parking_lot", "prometheus-client", "quickcheck", - "rand", + "rand 0.9.0", "tokio", "tokio-test", - "tower", + "tower 0.5.2", "tower-test", "tracing", ] @@ -1738,9 +2291,9 @@ dependencies = [ "linkerd-tonic-stream", "linkerd2-proxy-api", "pin-project", - "prost", + "prost 0.13.5", "tonic", - "tower", + "tower 0.5.2", "tracing", ] @@ -1756,9 +2309,9 @@ dependencies = [ "linkerd-proxy-balance-queue", "linkerd-proxy-core", "linkerd-stack", - "rand", + "rand 0.9.0", "tokio", - "tower", + "tower 0.5.2", "tracing", ] @@ -1785,7 +2338,7 @@ dependencies = [ "parking_lot", "pin-project", "prometheus-client", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-stream", "tokio-test", @@ -1804,14 +2357,16 @@ dependencies = [ "linkerd-error", "linkerd-exp-backoff", "linkerd-http-route", + "linkerd-opaq-route", "linkerd-proxy-api-resolve", "linkerd-proxy-core", + "linkerd-tls-route", "linkerd2-proxy-api", "maplit", "once_cell", - "prost-types", + "prost-types 0.13.5", "quickcheck", - "thiserror", + "thiserror 2.0.12", "tonic", ] @@ -1821,7 +2376,7 @@ version = "0.1.0" dependencies = [ "futures", "linkerd-error", - "tower", + "tower 0.5.2", ] [[package]] @@ -1836,7 +2391,7 @@ dependencies = [ "linkerd-stack", "tokio", "tokio-stream", - "tower", + "tower 0.5.2", "tracing", ] @@ -1851,25 +2406,34 @@ dependencies = [ "h2", "http", "http-body", + "http-body-util", "httparse", "hyper", "hyper-balance", - "linkerd-detect", + "hyper-util", "linkerd-duplex", "linkerd-error", "linkerd-http-box", "linkerd-http-classify", + "linkerd-http-detect", "linkerd-http-h2", + "linkerd-http-insert", + "linkerd-http-override-authority", + "linkerd-http-retain", + "linkerd-http-stream-timeouts", + "linkerd-http-upgrade", + "linkerd-http-variant", "linkerd-io", "linkerd-proxy-balance", "linkerd-stack", "linkerd-tracing", + "parking_lot", "pin-project", - "rand", - "thiserror", + "rand 0.9.0", + "thiserror 2.0.12", "tokio", "tokio-test", - "tower", + "tower 0.5.2", "tower-test", "tracing", "try-lock", @@ -1889,7 +2453,7 @@ dependencies = [ "linkerd2-proxy-api", "parking_lot", "pin-project", - "thiserror", + "thiserror 2.0.12", "tokio", "tonic", "tracing", @@ -1903,8 +2467,8 @@ dependencies = [ "linkerd-error", "linkerd-proxy-core", "pin-project", - "thiserror", - "tower", + "thiserror 2.0.12", + "tower 0.5.2", "tracing", ] @@ -1912,14 +2476,17 @@ dependencies = [ name = "linkerd-proxy-server-policy" version = "0.1.0" dependencies = [ + "governor", "http", "ipnet", "linkerd-http-route", + "linkerd-identity", "linkerd2-proxy-api", "maplit", - "prost-types", + "prost-types 0.13.5", "quickcheck", - "thiserror", + "thiserror 2.0.12", + "tokio", ] [[package]] @@ -1936,11 +2503,11 @@ dependencies = [ "rcgen", "simple_asn1", "spiffe-proto", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-test", "tonic", - "tower", + "tower 0.5.2", "tracing", "x509-parser", ] @@ -1949,9 +2516,12 @@ dependencies = [ name = "linkerd-proxy-tap" version = "0.1.0" dependencies = [ + "bytes", "futures", "http", + "http-body", "hyper", + "hyper-util", "ipnet", "linkerd-conditional", "linkerd-error", @@ -1963,13 +2533,13 @@ dependencies = [ "linkerd2-proxy-api", "parking_lot", "pin-project", - "prost-types", + "prost-types 0.13.5", "quickcheck", - "rand", - "thiserror", + "rand 0.9.0", + "thiserror 2.0.12", "tokio", "tonic", - "tower", + "tower 0.5.2", "tracing", ] @@ -1983,9 +2553,9 @@ dependencies = [ "linkerd-proxy-balance", "linkerd-stack", "pin-project", - "rand", + "rand 0.9.0", "tokio", - "tower", + "tower 0.5.2", ] [[package]] @@ -1998,7 +2568,7 @@ dependencies = [ "linkerd-io", "linkerd-stack", "socket2", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-stream", "tracing", @@ -2016,7 +2586,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-test", - "tower", + "tower 0.5.2", "tower-test", "tracing", ] @@ -2028,7 +2598,7 @@ dependencies = [ "futures", "linkerd-error", "linkerd-stack", - "tower", + "tower 0.5.2", "tracing", ] @@ -2041,7 +2611,7 @@ dependencies = [ "linkerd-error", "linkerd-stack", "parking_lot", - "thiserror", + "thiserror 2.0.12", "tracing", ] @@ -2063,14 +2633,14 @@ dependencies = [ "linkerd-tonic-watch", "linkerd2-proxy-api", "once_cell", - "prost-types", + "prost-types 0.13.5", "quickcheck", "regex", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-stream", "tonic", - "tower", + "tower 0.5.2", "tracing", ] @@ -2091,11 +2661,11 @@ dependencies = [ "linkerd-tracing", "parking_lot", "pin-project", - "thiserror", + "thiserror 2.0.12", "tokio", "tokio-test", "tokio-util", - "tower", + "tower 0.5.2", "tower-test", "tracing", ] @@ -2108,7 +2678,7 @@ dependencies = [ "parking_lot", "tokio", "tokio-test", - "tower", + "tower 0.5.2", "tower-test", ] @@ -2119,16 +2689,7 @@ dependencies = [ "futures", "linkerd-error", "linkerd-stack", - "tower", - "tracing", -] - -[[package]] -name = "linkerd-system" -version = "0.1.0" -dependencies = [ - "libc", - "procfs", + "tower 0.5.2", "tracing", ] @@ -2147,13 +2708,26 @@ dependencies = [ "linkerd-stack", "linkerd-tracing", "pin-project", - "thiserror", + "thiserror 2.0.12", "tokio", - "tower", + "tower 0.5.2", "tracing", "untrusted", ] +[[package]] +name = "linkerd-tls-route" +version = "0.1.0" +dependencies = [ + "linkerd-dns", + "linkerd-tls", + "linkerd2-proxy-api", + "rand 0.9.0", + "regex", + "thiserror 2.0.12", + "tracing", +] + [[package]] name = "linkerd-tls-test-util" version = "0.1.0" @@ -2200,9 +2774,9 @@ dependencies = [ "http", "linkerd-error", "linkerd-stack", - "rand", - "thiserror", - "tower", + "rand 0.8.5", + "thiserror 1.0.69", + "tower 0.5.2", "tracing", ] @@ -2232,8 +2806,8 @@ dependencies = [ "linkerd-error", "linkerd-io", "linkerd-stack", - "prost", - "prost-build", + "prost 0.13.5", + "prost-build 0.13.5", "tokio", "tokio-test", "tracing", @@ -2254,12 +2828,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "linkerd-workers" +version = "0.1.0" + [[package]] name = "linkerd2-proxy" version = "0.1.0" dependencies = [ "futures", "jemallocator", + "kubert-prometheus-tokio", "linkerd-app", "linkerd-meshtls", "linkerd-metrics", @@ -2271,25 +2850,37 @@ dependencies = [ [[package]] name = "linkerd2-proxy-api" -version = "0.13.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65678e4c506a7e5fdf1a664c629a9b658afa70e254dffcd24df72e937b2c0159" +checksum = "f53a8c17b8e81a58651c4e83cef64a8a3d7fba4d0ed224f3ca7e5c40265ec135" dependencies = [ "h2", "http", "ipnet", - "prost", - "prost-types", + "prost 0.13.5", + "prost-types 0.13.5", "quickcheck", - "thiserror", + "thiserror 2.0.12", "tonic", ] [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "linux-raw-sys" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" + +[[package]] +name = "litemap" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -2303,17 +2894,21 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] -name = "lru-cache" -version = "0.1.2" +name = "loom" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ - "linked-hash-map", + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", ] [[package]] @@ -2322,12 +2917,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -2345,15 +2934,15 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memmap2" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" dependencies = [ "libc", ] @@ -2372,22 +2961,41 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi", - "windows-sys 0.48.0", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", ] [[package]] @@ -2417,6 +3025,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nonzero_ext" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2429,11 +3043,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -2455,9 +3068,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -2474,39 +3087,94 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] [[package]] name = "oid-registry" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c958dd45046245b9c3c2547369bb634eb461670b2e7e0de552905801a648d1d" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" dependencies = [ "asn1-rs", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "opencensus-proto" version = "0.1.0" dependencies = [ "bytes", - "prost", - "prost-types", + "prost 0.13.5", + "prost-types 0.13.5", + "tonic", + "tonic-build", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "opentelemetry" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "pin-project-lite", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.1.0" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost 0.13.5", "tonic", "tonic-build", ] +[[package]] +name = "opentelemetry_sdk" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +dependencies = [ + "futures-channel", + "futures-executor", + "futures-util", + "opentelemetry", + "percent-encoding", + "rand 0.9.0", + "thiserror 2.0.12", +] + [[package]] name = "overload" version = "0.1.1" @@ -2515,9 +3183,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -2525,28 +3193,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" -version = "3.0.4" +version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" dependencies = [ "base64 0.22.0", "serde", @@ -2560,28 +3222,28 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.2.6", + "indexmap 2.9.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", @@ -2590,9 +3252,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2600,6 +3262,21 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -2608,10 +3285,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "pprof" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" +checksum = "ebbe2f8898beba44815fdc9e5a4ae9c929e21c5dc29b0c774a15555f7f58d6d0" dependencies = [ + "aligned-vec", "backtrace", "cfg-if", "findshlibs", @@ -2620,27 +3298,30 @@ dependencies = [ "nix", "once_cell", "parking_lot", - "prost", - "prost-build", - "prost-derive", + "prost 0.12.6", + "prost-build 0.12.6", + "prost-derive 0.12.6", "sha2", "smallvec", "symbolic-demangle", "tempfile", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy 0.7.35", +] [[package]] name = "prettyplease" -version = "0.2.19" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" +checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" dependencies = [ "proc-macro2", "syn", @@ -2648,31 +3329,30 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] [[package]] name = "procfs" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" +checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f" dependencies = [ "bitflags 2.4.2", "hex", - "lazy_static", "procfs-core", - "rustix", + "rustix 0.38.44", ] [[package]] name = "procfs-core" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" +checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec" dependencies = [ "bitflags 2.4.2", "hex", @@ -2680,9 +3360,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.2" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" +checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", @@ -2703,19 +3383,29 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.4" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive 0.12.6", +] + +[[package]] +name = "prost" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.13.5", ] [[package]] name = "prost-build" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", "heck", @@ -2725,8 +3415,28 @@ dependencies = [ "once_cell", "petgraph", "prettyplease", - "prost", - "prost-types", + "prost 0.12.6", + "prost-types 0.12.6", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-build" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +dependencies = [ + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.13.5", + "prost-types 0.13.5", "regex", "syn", "tempfile", @@ -2734,9 +3444,22 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.4" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", "itertools", @@ -2747,18 +3470,21 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" dependencies = [ - "prost", + "prost 0.12.6", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "prost-types" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost 0.13.5", +] [[package]] name = "quickcheck" @@ -2766,14 +3492,14 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] @@ -2785,8 +3511,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.14", ] [[package]] @@ -2796,7 +3533,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", ] [[package]] @@ -2805,7 +3552,17 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.14", ] [[package]] @@ -2816,35 +3573,36 @@ checksum = "f60fcc7d6849342eff22c4350c8b9a989ee8ceabc4b481253e8946b9fe83d684" [[package]] name = "rcgen" -version = "0.12.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" dependencies = [ "pem", "ring", + "rustls-pki-types", "time", "yasna", ] [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", ] [[package]] name = "regex" -version = "1.10.4" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -2858,13 +3616,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", ] [[package]] @@ -2875,40 +3633,38 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" dependencies = [ "hostname", - "quick-error", ] [[package]] name = "ring" -version = "0.17.8" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -2917,7 +3673,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "rusticata-macros" +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rusticata-macros" version = "4.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" @@ -2927,96 +3692,115 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags 2.4.2", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.15", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustix" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" +dependencies = [ + "bitflags 2.4.2", + "errno", + "libc", + "linux-raw-sys 0.9.2", "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.12" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "log", + "once_cell", "ring", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.21.7", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" + [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "ring", + "rustls-pki-types", "untrusted", ] [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] -name = "scopeguard" -version = "1.2.0" +name = "scoped-tls" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] -name = "sct" -version = "0.7.1" +name = "scopeguard" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "semver" -version = "1.0.22" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" -version = "1.0.198" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", @@ -3025,11 +3809,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -3071,13 +3856,13 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 2.0.12", "time", ] @@ -3092,15 +3877,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3111,17 +3896,20 @@ name = "spiffe-proto" version = "0.1.0" dependencies = [ "bytes", - "prost", - "prost-types", + "prost 0.13.5", + "prost-types 0.13.5", "tonic", "tonic-build", ] [[package]] -name = "spin" -version = "0.9.8" +name = "spinning_top" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +checksum = "d96d2d1d716fb500937168cc09353ffdc7a012be8475ac7308e1bdf0e3923300" +dependencies = [ + "lock_api", +] [[package]] name = "stable_deref_trait" @@ -3129,11 +3917,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "symbolic-common" -version = "12.8.0" +version = "12.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cccfffbc6bb3bb2d3a26cd2077f4d055f6808d266f9d4d158797a4c60510dfe" +checksum = "771bbb5c786d76c1d19bc7fb4142d617d94555677160972f50f33867d78c80cc" dependencies = [ "debugid", "memmap2", @@ -3143,9 +3937,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.8.0" +version = "12.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a99812da4020a67e76c4eb41f08c87364c14170495ff780f30dd519c221a68" +checksum = "9a32cd31355f957675578c709604796d589c79f8b220ecd1d2ac2ecd4912d94a" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -3154,9 +3948,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -3165,9 +3959,9 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" [[package]] name = "synstructure" @@ -3180,15 +3974,22 @@ dependencies = [ "syn", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tempfile" -version = "3.10.1" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" dependencies = [ - "cfg-if", "fastrand", - "rustix", + "getrandom 0.3.1", + "once_cell", + "rustix 1.0.2", "windows-sys 0.52.0", ] @@ -3204,18 +4005,38 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.12", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", @@ -3234,9 +4055,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -3249,25 +4070,35 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -3280,28 +4111,27 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-boring" -version = "3.1.0" +version = "4.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961385cffda2112d02c6e17169965ed604b9953078f2ec55740b8cf7b55c122e" +checksum = "1ae40ba38de371a81e1d16668bf63113ef62401670e2df96445e6e2d438686b6" dependencies = [ "boring", "boring-sys", @@ -3309,31 +4139,33 @@ dependencies = [ ] [[package]] -name = "tokio-io-timeout" -version = "1.2.0" +name = "tokio-macros" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ - "pin-project-lite", - "tokio", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "tokio-macros" -version = "2.2.0" +name = "tokio-metrics" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "cb2bb07a8451c4c6fa8b3497ad198510d8b8dffa5df5cfb97a64102a58b113c8" dependencies = [ - "proc-macro2", - "quote", - "syn", + "futures-util", + "pin-project-lite", + "tokio", + "tokio-stream", ] [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -3341,9 +4173,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -3366,40 +4198,42 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] name = "tonic" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.7", + "base64 0.22.0", "bytes", "h2", "http", "http-body", + "http-body-util", "hyper", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", - "prost", + "prost 0.13.5", + "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -3407,13 +4241,14 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", - "prost-build", + "prost-build 0.13.5", + "prost-types 0.13.5", "quote", "syn", ] @@ -3436,7 +4271,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -3445,17 +4280,35 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 2.9.0", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tower-test" @@ -3473,11 +4326,10 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -3485,9 +4337,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", @@ -3496,9 +4348,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -3517,9 +4369,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ "serde", "tracing-core", @@ -3527,9 +4379,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -3555,30 +4407,15 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "unicode-bidi" -version = "0.3.15" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "unicode-normalization" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" -dependencies = [ - "tinyvec", -] +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "untrusted" @@ -3588,32 +4425,47 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna", "percent-encoding", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "uuid" -version = "1.8.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +dependencies = [ + "getrandom 0.3.1", +] [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "want" @@ -3631,10 +4483,84 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "widestring" +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.93" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" + +[[package]] +name = "web-time" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "widestring" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "winapi" @@ -3658,6 +4584,76 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -3673,7 +4669,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.6", ] [[package]] @@ -3693,17 +4689,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -3714,9 +4711,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -3726,9 +4723,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -3738,9 +4735,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -3750,9 +4753,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -3762,9 +4765,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -3774,9 +4777,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -3786,9 +4789,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winreg" @@ -3800,11 +4803,32 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.4.2", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "x509-parser" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" dependencies = [ "asn1-rs", "data-encoding", @@ -3813,7 +4837,7 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror", + "thiserror 2.0.12", "time", ] @@ -3826,20 +4850,114 @@ dependencies = [ "time", ] +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive 0.7.35", +] + [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "a367f292d93d4eab890745e75a778da40909cab4d6ff8173693812f79c4a2468" dependencies = [ - "zerocopy-derive", + "zerocopy-derive 0.8.14", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3931cb58c62c13adec22e38686b559c86a30565e16ad6e8510a337cedc611e1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 424923246d..c0c45d3c00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,6 @@ members = [ "linkerd/app", "linkerd/conditional", "linkerd/distribute", - "linkerd/detect", "linkerd/dns/name", "linkerd/dns", "linkerd/duplex", @@ -27,10 +26,18 @@ members = [ "linkerd/http/access-log", "linkerd/http/box", "linkerd/http/classify", + "linkerd/http/detect", "linkerd/http/h2", + "linkerd/http/insert", "linkerd/http/metrics", + "linkerd/http/override-authority", + "linkerd/http/prom", + "linkerd/http/retain", "linkerd/http/retry", "linkerd/http/route", + "linkerd/http/stream-timeouts", + "linkerd/http/upgrade", + "linkerd/http/variant", "linkerd/identity", "linkerd/idle-cache", "linkerd/io", @@ -39,7 +46,10 @@ members = [ "linkerd/meshtls/rustls", "linkerd/meshtls/verifier", "linkerd/metrics", + "linkerd/mock/http-body", + "linkerd/opaq-route", "linkerd/opencensus", + "linkerd/opentelemetry", "linkerd/pool", "linkerd/pool/mock", "linkerd/pool/p2c", @@ -66,16 +76,18 @@ members = [ "linkerd/stack", "linkerd/stack/metrics", "linkerd/stack/tracing", - "linkerd/system", "linkerd/tonic-stream", "linkerd/tonic-watch", "linkerd/tls", + "linkerd/tls/route", "linkerd/tls/test-util", "linkerd/tracing", "linkerd/transport-header", "linkerd/transport-metrics", + "linkerd/workers", "linkerd2-proxy", "opencensus-proto", + "opentelemetry-proto", "spiffe-proto", "tools", ] @@ -83,3 +95,44 @@ members = [ [profile.release] debug = 1 lto = true + +[workspace.package] +version = "0.1.0" +authors = ["Linkerd Developers "] +license = "Apache-2.0" +edition = "2021" +publish = false + +[workspace.dependencies] +bytes = { version = "1" } +drain = { version = "0.2", default-features = false } +h2 = { version = "0.4" } +http = { version = "1" } +http-body = { version = "1" } +hyper = { version = "1", default-features = false } +prometheus-client = { version = "0.23" } +prost = { version = "0.13" } +prost-build = { version = "0.13", default-features = false } +prost-types = { version = "0.13" } +tokio-rustls = { version = "0.26", default-features = false, features = [ + "ring", + "logging", +] } +tonic = { version = "0.12", default-features = false } +tonic-build = { version = "0.12", default-features = false } +tower = { version = "0.5", default-features = false } +tower-service = { version = "0.3" } +tower-test = { version = "0.4" } + +[workspace.dependencies.http-body-util] +version = "0.1.3" +default-features = false +features = ["channel"] + +[workspace.dependencies.hyper-util] +version = "0.1" +default-features = false +features = ["tokio", "tracing"] + +[workspace.dependencies.linkerd2-proxy-api] +version = "0.16.0" diff --git a/Dockerfile b/Dockerfile index 809dda662e..714965f1c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # This is intended **DEVELOPMENT ONLY**, i.e. so that proxy developers can # easily test the proxy in the context of the larger `linkerd2` project. -ARG RUST_IMAGE=ghcr.io/linkerd/dev:v43-rust +ARG RUST_IMAGE=ghcr.io/linkerd/dev:v45-rust # Use an arbitrary ~recent edge release image to get the proxy # identity-initializing and linkerd-await wrappers. @@ -32,7 +32,7 @@ RUN --mount=type=cache,id=cargo,target=/usr/local/cargo/registry \ # Build the proxy. FROM fetch as build ENV CARGO_INCREMENTAL=0 -ENV RUSTFLAGS="-D warnings -A deprecated" +ENV RUSTFLAGS="-D warnings -A deprecated --cfg tokio_unstable" ARG TARGETARCH="amd64" ARG PROFILE="release" ARG LINKERD2_PROXY_VERSION="" diff --git a/deny.toml b/deny.toml index c3d4c0e939..d28a744d67 100644 --- a/deny.toml +++ b/deny.toml @@ -1,3 +1,4 @@ +[graph] targets = [ { triple = "x86_64-unknown-linux-gnu" }, { triple = "aarch64-unknown-linux-gnu" }, @@ -7,44 +8,27 @@ targets = [ [advisories] db-path = "~/.cargo/advisory-db" db-urls = ["https://github.com/rustsec/advisory-db"] -vulnerability = "deny" -unmaintained = "warn" -yanked = "deny" -notice = "warn" ignore = [] [licenses] -unlicensed = "deny" allow = [ "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause", "ISC", "MIT", + "Unicode-3.0", + "Zlib", ] -deny = [] -copyleft = "deny" -allow-osi-fsf-free = "neither" -default = "deny" # Ignore local workspace license values for unpublished crates. private = { ignore = true } confidence-threshold = 0.8 exceptions = [ - { allow = [ - "Zlib", - ], name = "adler32", version = "*" }, { allow = [ "ISC", "MIT", "OpenSSL", ], name = "ring", version = "*" }, - # The Unicode-DFS-2016 license is necessary for unicode-ident because they - # use data from the unicode tables to generate the tables which are - # included in the application. We do not distribute those data files so - # this is not a problem for us. See https://github.com/dtolnay/unicode-ident/pull/9/files - { allow = [ - "Unicode-DFS-2016", - ], name = "unicode-ident", version = "*" }, ] [[licenses.clarify]] @@ -66,39 +50,27 @@ deny = [ { name = "webpki" }, ] skip = [ - # The proc-macro ecosystem is in the middle of a migration from `syn` v1 to - # `syn` v2. Allow both versions to coexist peacefully for now. - # - # Since `syn` is used by proc-macros (executed at compile time), duplicate - # versions won't have an impact on the final binary size. - { name = "syn" }, - # `tonic` v0.6 depends on `bitflags` v1.x, while `boring-sys` depends on - # `bitflags` v2.x. Allow both versions to coexist peacefully for now. - { name = "bitflags", version = "1" }, # `linkerd-trace-context`, `rustls-pemfile` and `tonic` depend on `base64` # v0.13.1 while `rcgen` depends on v0.21.5 { name = "base64" }, - # https://github.com/hawkw/matchers/pull/4 - { name = "regex-automata", version = "0.1" }, - { name = "regex-syntax", version = "0.6" }, - # `trust-dns-proto`, depends on `idna` v0.2.3 while `url` depends on v0.5.0 - { name = "idna" }, - # Some dependencies still use indexmap v1. - { name = "indexmap", version = "1" }, - { name = "hashbrown", version = "0.12" }, + # tonic/axum depend on a newer `tower`, which we are still catching up to. + # see #3744. + { name = "tower", version = "0.5" }, ] skip-tree = [ - # right now we have a mix of versions of this crate in the ecosystem - # procfs uses 0.36.14, tempfile uses 0.37.4 - { name = "rustix" }, - # Hyper v0.14 depends on an older socket2 version. - { name = "socket2" }, + # thiserror v2 is still propagating through the ecosystem + { name = "thiserror", version = "1" }, + # rand v0.9 is still propagating through the ecosystem + { name = "rand", version = "0.8" }, + # rust v1.0 is still propagating through the ecosystem + { name = "rustix", version = "0.38" }, + # `pprof` uses a number of old dependencies. for now, we skip its subtree. + { name = "pprof" }, ] [sources] unknown-registry = "deny" unknown-git = "deny" -allow-registry = ["https://github.com/rust-lang/crates.io-index"] - -[sources.allow-org] -github = ["linkerd"] +allow-registry = [ + "https://github.com/rust-lang/crates.io-index", +] diff --git a/hyper-balance/Cargo.toml b/hyper-balance/Cargo.toml index 54238fb283..02a887b1f3 100644 --- a/hyper-balance/Cargo.toml +++ b/hyper-balance/Cargo.toml @@ -1,17 +1,18 @@ [package] name = "hyper-balance" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } -http = "0.2" -hyper = "0.14" +http = { workspace = true } +http-body = { workspace = true } +hyper = { workspace = true } pin-project = "1" -tower = { version = "0.4", default-features = false, features = ["load"] } +tower = { workspace = true, default-features = false, features = ["load"] } tokio = { version = "1", features = ["macros"] } [dev-dependencies] diff --git a/hyper-balance/src/lib.rs b/hyper-balance/src/lib.rs index 4531212e32..dbfe8eb6cb 100644 --- a/hyper-balance/src/lib.rs +++ b/hyper-balance/src/lib.rs @@ -1,7 +1,7 @@ #![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] #![forbid(unsafe_code)] -use hyper::body::HttpBody; +use http_body::Body; use pin_project::pin_project; use std::pin::Pin; use std::task::{Context, Poll}; @@ -38,7 +38,7 @@ pub struct PendingUntilEosBody { impl TrackCompletion> for PendingUntilFirstData where - B: HttpBody, + B: Body, { type Output = http::Response>; @@ -59,7 +59,7 @@ where impl TrackCompletion> for PendingUntilEos where - B: HttpBody, + B: Body, { type Output = http::Response>; @@ -80,7 +80,7 @@ where impl Default for PendingUntilFirstDataBody where - B: HttpBody + Default, + B: Body + Default, { fn default() -> Self { Self { @@ -90,9 +90,9 @@ where } } -impl HttpBody for PendingUntilFirstDataBody +impl Body for PendingUntilFirstDataBody where - B: HttpBody, + B: Body, T: Send + 'static, { type Data = B::Data; @@ -102,32 +102,20 @@ where self.body.is_end_stream() } - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { let this = self.project(); - let ret = futures::ready!(this.body.poll_data(cx)); + let ret = futures::ready!(this.body.poll_frame(cx)); - // Once a data frame is received, the handle is dropped. On subsequent calls, this + // Once a frame is received, the handle is dropped. On subsequent calls, this // is a noop. drop(this.handle.take()); Poll::Ready(ret) } - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.project(); - // If this is being called, the handle definitely should have been dropped - // already. - drop(this.handle.take()); - - this.body.poll_trailers(cx) - } - #[inline] fn size_hint(&self) -> hyper::body::SizeHint { self.body.size_hint() @@ -138,7 +126,7 @@ where impl Default for PendingUntilEosBody where - B: HttpBody + Default, + B: Body + Default, { fn default() -> Self { Self { @@ -148,7 +136,7 @@ where } } -impl HttpBody for PendingUntilEosBody { +impl Body for PendingUntilEosBody { type Data = B::Data; type Error = B::Error; @@ -157,35 +145,21 @@ impl HttpBody for PendingUntilEosBody { self.body.is_end_stream() } - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { let mut this = self.project(); let body = &mut this.body; tokio::pin!(body); - let ret = futures::ready!(body.poll_data(cx)); + let frame = futures::ready!(body.poll_frame(cx)); // If this was the last frame, then drop the handle immediately. if this.body.is_end_stream() { drop(this.handle.take()); } - Poll::Ready(ret) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.project(); - let ret = futures::ready!(this.body.poll_trailers(cx)); - - // Once trailers are received, the handle is dropped immediately (in case the body - // is retained longer for some reason). - drop(this.handle.take()); - - Poll::Ready(ret) + Poll::Ready(frame) } #[inline] @@ -198,7 +172,7 @@ impl HttpBody for PendingUntilEosBody { mod tests { use super::{PendingUntilEos, PendingUntilFirstData}; use futures::future::poll_fn; - use hyper::body::HttpBody; + use http_body::{Body, Frame}; use std::collections::VecDeque; use std::io::Cursor; use std::pin::Pin; @@ -225,11 +199,13 @@ mod tests { assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()) - .expect("data some") - .expect("data ok"); + .expect("frame is some") + .expect("frame is ok") + .into_data() + .expect("frame is data"); assert!(wk.upgrade().is_none()); } @@ -282,10 +258,10 @@ mod tests { let res = assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()); - assert!(res.expect("data is some").is_err()); + assert!(res.expect("frame is some").is_err()); assert!(wk.upgrade().is_none()); } @@ -308,21 +284,21 @@ mod tests { assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()) - .expect("data some") - .expect("data ok"); + .expect("frame is some") + .expect("frame is ok"); assert!(wk.upgrade().is_some()); assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()) - .expect("data some") - .expect("data ok"); + .expect("frame is some") + .expect("frame is ok"); assert!(wk.upgrade().is_none()); } @@ -355,40 +331,42 @@ mod tests { assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()) - .expect("data") - .expect("data ok"); + .expect("frame is some") + .expect("frame is ok"); assert!(wk.upgrade().is_some()); assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()) - .expect("data") - .expect("data ok"); + .expect("frame is some") + .expect("frame is ok"); assert!(wk.upgrade().is_some()); - let poll = assert_ready!(task::spawn(poll_fn(|cx| { + assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) - .poll()); - assert!(poll.is_none()); - assert!(wk.upgrade().is_some()); + .poll()) + .expect("frame is some") + .expect("frame is ok") + .into_trailers() + .expect("is trailers"); + assert!(wk.upgrade().is_none()); - assert_ready!(task::spawn(poll_fn(|cx| { + let poll = assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_trailers(cx) + body.poll_frame(cx) })) - .poll()) - .expect("trailers ok") - .expect("trailers"); + .poll()); + assert!(poll.is_none()); assert!(wk.upgrade().is_none()); } @@ -411,7 +389,7 @@ mod tests { let poll = assert_ready!(task::spawn(poll_fn(|cx| { let body = &mut body; tokio::pin!(body); - body.poll_data(cx) + body.poll_frame(cx) })) .poll()); assert!(poll.expect("some").is_err()); @@ -429,7 +407,7 @@ mod tests { #[derive(Default)] struct TestBody(VecDeque<&'static str>, Option); - impl HttpBody for TestBody { + impl Body for TestBody { type Data = Cursor<&'static str>; type Error = &'static str; @@ -437,26 +415,27 @@ mod tests { self.0.is_empty() & self.1.is_none() } - fn poll_data( + fn poll_frame( mut self: Pin<&mut Self>, _: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(self.as_mut().0.pop_front().map(Cursor::new).map(Ok)) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll, Self::Error>> { + ) -> Poll, Self::Error>>> { let mut this = self.as_mut(); - assert!(this.0.is_empty()); - Poll::Ready(Ok(this.1.take())) + + // Return the next data frame from the sequence of chunks. + if let Some(chunk) = this.0.pop_front() { + let frame = Some(Ok(Frame::data(Cursor::new(chunk)))); + return Poll::Ready(frame); + } + + // Yield the trailers once all data frames have been yielded. + let trailers = this.1.take().map(Frame::::trailers).map(Ok); + Poll::Ready(trailers) } } #[derive(Default)] struct ErrBody(Option<&'static str>); - impl HttpBody for ErrBody { + impl Body for ErrBody { type Data = Cursor<&'static str>; type Error = &'static str; @@ -464,18 +443,13 @@ mod tests { self.0.is_none() } - fn poll_data( + fn poll_frame( mut self: Pin<&mut Self>, _: &mut Context<'_>, - ) -> Poll>> { - Poll::Ready(Some(Err(self.as_mut().0.take().expect("err")))) - } + ) -> Poll, Self::Error>>> { + let err = self.as_mut().0.take().expect("err"); - fn poll_trailers( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Poll::Ready(Err(self.as_mut().0.take().expect("err"))) + Poll::Ready(Some(Err(err))) } } } diff --git a/justfile b/justfile index 2dc928d6ae..c42b65fd71 100644 --- a/justfile +++ b/justfile @@ -15,7 +15,7 @@ toolchain := "" features := "" -export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev." + `git rev-parse --short HEAD`) +export LINKERD2_PROXY_VERSION := env_var_or_default("LINKERD2_PROXY_VERSION", "0.0.0-dev" + `git rev-parse --short HEAD`) export LINKERD2_PROXY_VENDOR := env_var_or_default("LINKERD2_PROXY_VENDOR", `whoami` + "@" + `hostname`) # The version name to use for packages. @@ -28,26 +28,30 @@ docker-image := docker-repo + ":" + docker-tag # The architecture name to use for packages. Either 'amd64', 'arm64', or 'arm'. arch := "amd64" +# The OS name to use for packages. Either 'linux' or 'windows'. +os := "linux" libc := 'gnu' # If a `arch` is specified, then we change the default cargo `--target` # to support cross-compilation. Otherwise, we use `rustup` to find the default. -_target := if arch == 'amd64' { +_target := if os + '-' + arch == "linux-amd64" { "x86_64-unknown-linux-" + libc - } else if arch == "arm64" { + } else if os + '-' + arch == "linux-arm64" { "aarch64-unknown-linux-" + libc - } else if arch == "arm" { + } else if os + '-' + arch == "linux-arm" { "armv7-unknown-linux-" + libc + "eabihf" + } else if os + '-' + arch == "windows-amd64" { + "x86_64-pc-windows-" + libc } else { - error("unsupported arch=" + arch) + error("unsupported: os=" + os + " arch=" + arch + " libc=" + libc) } _cargo := 'just-cargo profile=' + profile + ' target=' + _target + ' toolchain=' + toolchain _target_dir := "target" / _target / profile -_target_bin := _target_dir / "linkerd2-proxy" -_package_name := "linkerd2-proxy-" + package_version + "-" + arch + if libc == 'musl' { '-static' } else { '' } +_target_bin := _target_dir / "linkerd2-proxy" + if os == 'windows' { '.exe' } else { '' } +_package_name := "linkerd2-proxy-" + package_version + "-" + os + "-" + arch + if libc == 'musl' { '-static' } else { '' } _package_dir := "target/package" / _package_name shasum := "shasum -a 256" @@ -57,7 +61,9 @@ _features := if features == "all" { "--no-default-features --features=" + features } else { "" } -export CXX := 'clang++-14' +wait-timeout := env_var_or_default("WAIT_TIMEOUT", "1m") + +export CXX := 'clang++-19' # # Recipes @@ -237,7 +243,7 @@ linkerd-tag := env_var_or_default('LINKERD_TAG', '') _controller-image := 'ghcr.io/linkerd/controller' _policy-image := 'ghcr.io/linkerd/policy-controller' _init-image := 'ghcr.io/linkerd/proxy-init' -_init-tag := 'v2.2.0' +_init-tag := 'v2.4.0' _kubectl := 'just-k3d kubectl' _linkerd := 'linkerd --context=k3d-$(just-k3d --evaluate K3D_CLUSTER_NAME)' @@ -252,6 +258,12 @@ _tag-set: _k3d-ready: @just-k3d ready +export K3D_CLUSTER_NAME := "l5d-proxy" +export K3D_CREATE_FLAGS := "--no-lb" +export K3S_DISABLE := "local-storage,traefik,servicelb,metrics-server@server:*" +k3d-create: && _k3d-ready + @just-k3d create + k3d-load-linkerd: _tag-set _k3d-ready for i in \ '{{ _controller-image }}:{{ linkerd-tag }}' \ @@ -268,11 +280,12 @@ k3d-load-linkerd: _tag-set _k3d-ready # Install crds on the test cluster. _linkerd-crds-install: _k3d-ready + {{ _kubectl }} apply -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.2.1/standard-install.yaml {{ _linkerd }} install --crds \ | {{ _kubectl }} apply -f - {{ _kubectl }} wait crd --for condition=established \ --selector='linkerd.io/control-plane-ns' \ - --timeout=1m + --timeout={{ wait-timeout }} # Install linkerd on the test cluster using test images. linkerd-install *args='': _tag-set k3d-load-linkerd _linkerd-crds-install && _linkerd-ready @@ -294,7 +307,7 @@ linkerd-uninstall: {{ _linkerd }} uninstall \ | {{ _kubectl }} delete -f - -linkerd-check-contol-plane-proxy: +linkerd-check-control-plane-proxy: #!/usr/bin/env bash set -euo pipefail check=$(mktemp --tmpdir check-XXXX.json) @@ -313,4 +326,14 @@ linkerd-check-contol-plane-proxy: _linkerd-ready: {{ _kubectl }} wait pod --for=condition=ready \ --namespace=linkerd --selector='linkerd.io/control-plane-component' \ - --timeout=1m + --timeout={{ wait-timeout }} + +# +# Dev Container +# + +devcontainer-up: + devcontainer.js up --workspace-folder=. + +devcontainer-exec container-id *args: + devcontainer.js exec --container-id={{ container-id }} {{ args }} diff --git a/linkerd/addr/Cargo.toml b/linkerd/addr/Cargo.toml index 8cfd9faec9..61b9750f7f 100644 --- a/linkerd/addr/Cargo.toml +++ b/linkerd/addr/Cargo.toml @@ -1,13 +1,16 @@ [package] name = "linkerd-addr" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -http = "0.2" -ipnet = "2.7" +http = { workspace = true } +ipnet = "2.11" linkerd-dns-name = { path = "../dns/name" } -thiserror = "1" +thiserror = "2" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/linkerd/addr/fuzz/Cargo.toml b/linkerd/addr/fuzz/Cargo.toml index 24bdeda919..11a20575a9 100644 --- a/linkerd/addr/fuzz/Cargo.toml +++ b/linkerd/addr/fuzz/Cargo.toml @@ -1,9 +1,10 @@ [package] name = "linkerd-addr-fuzz" -version = "0.0.0" -authors = ["Linkerd Developers "] -publish = false -edition = "2021" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [package.metadata] cargo-fuzz = true diff --git a/linkerd/app/Cargo.toml b/linkerd/app/Cargo.toml index af443530a3..a9b1571c76 100644 --- a/linkerd/app/Cargo.toml +++ b/linkerd/app/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Configures and executes the proxy @@ -18,6 +18,7 @@ pprof = ["linkerd-app-admin/pprof"] [dependencies] futures = { version = "0.3", default-features = false } +hyper-util = { workspace = true } linkerd-app-admin = { path = "./admin" } linkerd-app-core = { path = "./core" } linkerd-app-gateway = { path = "./gateway" } @@ -25,12 +26,14 @@ linkerd-app-inbound = { path = "./inbound" } linkerd-app-outbound = { path = "./outbound" } linkerd-error = { path = "../error" } linkerd-opencensus = { path = "../opencensus" } +linkerd-opentelemetry = { path = "../opentelemetry" } linkerd-tonic-stream = { path = "../tonic-stream" } +linkerd-workers = { path = "../workers" } rangemap = "1" regex = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["rt"] } tokio-stream = { version = "0.1", features = ["time", "sync"] } -tonic = { version = "0.10", default-features = false, features = ["prost"] } -tower = "0.4" +tonic = { workspace = true, default-features = false, features = ["prost"] } +tower = { workspace = true } tracing = "0.1" diff --git a/linkerd/app/admin/Cargo.toml b/linkerd/app/admin/Cargo.toml index cfac00cb03..6781600a17 100644 --- a/linkerd/app/admin/Cargo.toml +++ b/linkerd/app/admin/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app-admin" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ The linkerd proxy's admin server. """ @@ -15,14 +15,17 @@ pprof = ["deflate", "dep:pprof"] log-streaming = ["linkerd-tracing/stream"] [dependencies] +bytes = { workspace = true } deflate = { version = "1", optional = true, features = ["gzip"] } -http = "0.2" -hyper = { version = "0.14", features = ["http1", "http2"] } +http = { workspace = true } +http-body = { workspace = true } +http-body-util = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } futures = { version = "0.3", default-features = false } -pprof = { version = "0.13", optional = true, features = ["prost-codec"] } +pprof = { version = "0.14", optional = true, features = ["prost-codec"] } serde = "1" serde_json = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "sync", "parking_lot"] } tracing = "0.1" @@ -31,7 +34,7 @@ linkerd-app-inbound = { path = "../inbound" } linkerd-tracing = { path = "../../tracing" } [dependencies.tower] -version = "0.4" +workspace = true default-features = false features = [ "buffer", diff --git a/linkerd/app/admin/src/server.rs b/linkerd/app/admin/src/server.rs index 6362fada4e..272a01932c 100644 --- a/linkerd/app/admin/src/server.rs +++ b/linkerd/app/admin/src/server.rs @@ -12,13 +12,9 @@ use futures::future::{self, TryFutureExt}; use http::StatusCode; -use hyper::{ - body::{Body, HttpBody}, - Request, Response, -}; use linkerd_app_core::{ metrics::{self as metrics, FmtMetrics}, - proxy::http::ClientHandle, + proxy::http::{Body, BoxBody, ClientHandle, Request, Response}, trace, Error, Result, }; use std::{ @@ -40,23 +36,26 @@ pub struct Admin { tracing: trace::Handle, ready: Readiness, shutdown_tx: mpsc::UnboundedSender<()>, + enable_shutdown: bool, #[cfg(feature = "pprof")] pprof: Option, } -pub type ResponseFuture = Pin>> + Send + 'static>>; +pub type ResponseFuture = Pin>> + Send + 'static>>; impl Admin { pub fn new( metrics: M, ready: Readiness, shutdown_tx: mpsc::UnboundedSender<()>, + enable_shutdown: bool, tracing: trace::Handle, ) -> Self { Self { metrics: metrics::Serve::new(metrics), ready, shutdown_tx, + enable_shutdown, tracing, #[cfg(feature = "pprof")] @@ -70,30 +69,30 @@ impl Admin { self } - fn ready_rsp(&self) -> Response { + fn ready_rsp(&self) -> Response { if self.ready.is_ready() { Response::builder() .status(StatusCode::OK) .header(http::header::CONTENT_TYPE, "text/plain") - .body("ready\n".into()) + .body(BoxBody::from_static("ready\n")) .expect("builder with known status code must not fail") } else { Response::builder() .status(StatusCode::SERVICE_UNAVAILABLE) - .body("not ready\n".into()) + .body(BoxBody::from_static("not ready\n")) .expect("builder with known status code must not fail") } } - fn live_rsp() -> Response { + fn live_rsp() -> Response { Response::builder() .status(StatusCode::OK) .header(http::header::CONTENT_TYPE, "text/plain") - .body("live\n".into()) + .body(BoxBody::from_static("live\n")) .expect("builder with known status code must not fail") } - fn env_rsp(req: Request) -> Response { + fn env_rsp(req: Request) -> Response { use std::{collections::HashMap, env, ffi::OsString}; if req.method() != http::Method::GET { @@ -139,56 +138,74 @@ impl Admin { json::json_rsp(&env) } - fn shutdown(&self) -> Response { + fn shutdown(&self) -> Response { + if !self.enable_shutdown { + return Response::builder() + .status(StatusCode::NOT_FOUND) + .header(http::header::CONTENT_TYPE, "text/plain") + .body(BoxBody::from_static("shutdown endpoint is not enabled\n")) + .expect("builder with known status code must not fail"); + } if self.shutdown_tx.send(()).is_ok() { Response::builder() .status(StatusCode::OK) .header(http::header::CONTENT_TYPE, "text/plain") - .body("shutdown\n".into()) + .body(BoxBody::from_static("shutdown\n")) .expect("builder with known status code must not fail") } else { Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .header(http::header::CONTENT_TYPE, "text/plain") - .body("shutdown listener dropped\n".into()) + .body(BoxBody::from_static("shutdown listener dropped\n")) .expect("builder with known status code must not fail") } } - fn internal_error_rsp(error: impl ToString) -> http::Response { + fn internal_error_rsp(error: impl ToString) -> http::Response { http::Response::builder() .status(http::StatusCode::INTERNAL_SERVER_ERROR) .header(http::header::CONTENT_TYPE, "text/plain") - .body(error.to_string().into()) + .body(BoxBody::new(error.to_string())) .expect("builder with known status code should not fail") } - fn not_found() -> Response { + fn not_found() -> Response { Response::builder() .status(http::StatusCode::NOT_FOUND) - .body(Body::empty()) + .body(BoxBody::empty()) .expect("builder with known status code must not fail") } - fn method_not_allowed() -> Response { + fn method_not_allowed() -> Response { Response::builder() .status(http::StatusCode::METHOD_NOT_ALLOWED) - .body(Body::empty()) + .body(BoxBody::empty()) .expect("builder with known status code must not fail") } - fn forbidden_not_localhost() -> Response { + fn forbidden_not_localhost() -> Response { Response::builder() .status(http::StatusCode::FORBIDDEN) .header(http::header::CONTENT_TYPE, "text/plain") - .body("Requests are only permitted from localhost.".into()) + .body(BoxBody::new::( + "Requests are only permitted from localhost.".into(), + )) .expect("builder with known status code must not fail") } fn client_is_localhost(req: &Request) -> bool { req.extensions() .get::() - .map(|a| a.addr.ip().is_loopback()) + .map(|a| match a.addr.ip() { + std::net::IpAddr::V4(v4) => v4.is_loopback(), + std::net::IpAddr::V6(v6) => { + if let Some(v4) = v6.to_ipv4_mapped() { + v4.is_loopback() + } else { + v6.is_loopback() + } + } + }) .unwrap_or(false) } } @@ -196,11 +213,11 @@ impl Admin { impl tower::Service> for Admin where M: FmtMetrics, - B: HttpBody + Send + 'static, + B: Body + Send + 'static, B::Error: Into, B::Data: Send, { - type Response = http::Response; + type Response = http::Response; type Error = Error; type Future = ResponseFuture; @@ -306,13 +323,13 @@ mod tests { let (_, t) = trace::Settings::default().build(); let (s, _) = mpsc::unbounded_channel(); - let admin = Admin::new((), r, s, t); + let admin = Admin::new((), r, s, true, t); macro_rules! call { () => {{ let r = Request::builder() .method(Method::GET) .uri("http://0.0.0.0/ready") - .body(Body::empty()) + .body(BoxBody::empty()) .unwrap(); let f = admin.clone().oneshot(r); timeout(TIMEOUT, f).await.expect("timeout").expect("call") diff --git a/linkerd/app/admin/src/server/json.rs b/linkerd/app/admin/src/server/json.rs index 8f3c3ed6e6..00be983b4a 100644 --- a/linkerd/app/admin/src/server/json.rs +++ b/linkerd/app/admin/src/server/json.rs @@ -1,14 +1,17 @@ static JSON_MIME: &str = "application/json"; pub(in crate::server) static JSON_HEADER_VAL: HeaderValue = HeaderValue::from_static(JSON_MIME); +use bytes::Bytes; use hyper::{ header::{self, HeaderValue}, - Body, StatusCode, + StatusCode, }; +use linkerd_app_core::proxy::http::BoxBody; + pub(crate) fn json_error_rsp( error: impl ToString, status: http::StatusCode, -) -> http::Response { +) -> http::Response { mk_rsp( status, &serde_json::json!({ @@ -18,11 +21,12 @@ pub(crate) fn json_error_rsp( ) } -pub(crate) fn json_rsp(val: &impl serde::Serialize) -> http::Response { +pub(crate) fn json_rsp(val: &impl serde::Serialize) -> http::Response { mk_rsp(StatusCode::OK, val) } -pub(crate) fn accepts_json(req: &http::Request) -> Result<(), http::Response> { +#[allow(clippy::result_large_err)] +pub(crate) fn accepts_json(req: &http::Request) -> Result<(), http::Response> { if let Some(accept) = req.headers().get(header::ACCEPT) { let accept = match std::str::from_utf8(accept.as_bytes()) { Ok(accept) => accept, @@ -41,7 +45,7 @@ pub(crate) fn accepts_json(req: &http::Request) -> Result<(), http::Respon tracing::warn!(?accept, "Accept header will not accept 'application/json'"); return Err(http::Response::builder() .status(StatusCode::NOT_ACCEPTABLE) - .body(JSON_MIME.into()) + .body(BoxBody::from_static(JSON_MIME)) .expect("builder with known status code must not fail")); } } @@ -49,18 +53,26 @@ pub(crate) fn accepts_json(req: &http::Request) -> Result<(), http::Respon Ok(()) } -fn mk_rsp(status: StatusCode, val: &impl serde::Serialize) -> http::Response { - match serde_json::to_vec(val) { - Ok(json) => http::Response::builder() +fn mk_rsp(status: StatusCode, val: &impl serde::Serialize) -> http::Response { + // Serialize the value into JSON, and then place the bytes in a boxed response body. + let json = serde_json::to_vec(val) + .map(Bytes::from) + .map(http_body_util::Full::new) + .map(BoxBody::new); + + match json { + Ok(body) => http::Response::builder() .status(status) .header(header::CONTENT_TYPE, JSON_HEADER_VAL.clone()) - .body(json.into()) + .body(body) .expect("builder with known status code must not fail"), Err(error) => { tracing::warn!(?error, "failed to serialize JSON value"); http::Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) - .body(format!("failed to serialize JSON value: {error}").into()) + .body(BoxBody::new(format!( + "failed to serialize JSON value: {error}" + ))) .expect("builder with known status code must not fail") } } diff --git a/linkerd/app/admin/src/server/log/level.rs b/linkerd/app/admin/src/server/log/level.rs index 0c228d61ca..aabbcd8fd3 100644 --- a/linkerd/app/admin/src/server/log/level.rs +++ b/linkerd/app/admin/src/server/log/level.rs @@ -1,17 +1,18 @@ +use bytes::Buf; use http::{header, StatusCode}; -use hyper::{ - body::{Buf, HttpBody}, - Body, +use linkerd_app_core::{ + proxy::http::{Body, BoxBody}, + trace::level, + Error, }; -use linkerd_app_core::{trace::level, Error}; use std::io; pub async fn serve( level: level::Handle, req: http::Request, -) -> Result, Error> +) -> Result, Error> where - B: HttpBody, + B: Body, B::Error: Into, { Ok(match *req.method() { @@ -21,11 +22,15 @@ where } http::Method::PUT => { - let body = hyper::body::aggregate(req.into_body()) + use http_body_util::BodyExt; + let body = req + .into_body() + .collect() .await - .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + .map_err(|e| io::Error::new(io::ErrorKind::Other, e))? + .aggregate(); match level.set_from(body.chunk()) { - Ok(_) => mk_rsp(StatusCode::NO_CONTENT, Body::empty()), + Ok(_) => mk_rsp(StatusCode::NO_CONTENT, BoxBody::empty()), Err(error) => { tracing::warn!(%error, "Setting log level failed"); mk_rsp(StatusCode::BAD_REQUEST, error) @@ -37,14 +42,19 @@ where .status(StatusCode::METHOD_NOT_ALLOWED) .header(header::ALLOW, "GET") .header(header::ALLOW, "PUT") - .body(Body::empty()) + .body(BoxBody::empty()) .expect("builder with known status code must not fail"), }) } -fn mk_rsp(status: StatusCode, body: impl Into) -> http::Response { +fn mk_rsp(status: StatusCode, body: B) -> http::Response +where + B: Body + Send + 'static, + B::Data: Send + 'static, + B::Error: Into, +{ http::Response::builder() .status(status) - .body(body.into()) + .body(BoxBody::new(body)) .expect("builder with known status code must not fail") } diff --git a/linkerd/app/admin/src/server/log/stream.rs b/linkerd/app/admin/src/server/log/stream.rs index 11b10a99c9..2042435075 100644 --- a/linkerd/app/admin/src/server/log/stream.rs +++ b/linkerd/app/admin/src/server/log/stream.rs @@ -1,10 +1,9 @@ use crate::server::json; +use bytes::{Buf, Bytes}; use futures::FutureExt; -use hyper::{ - body::{Buf, Bytes}, - header, Body, StatusCode, -}; +use hyper::{header, StatusCode}; use linkerd_app_core::{ + proxy::http::{Body, BoxBody}, trace::{self}, Error, }; @@ -27,9 +26,9 @@ macro_rules! recover { pub async fn serve( handle: trace::Handle, req: http::Request, -) -> Result, Error> +) -> Result, Error> where - B: hyper::body::HttpBody, + B: Body, B::Error: Into, { let handle = handle.into_stream(); @@ -52,10 +51,13 @@ where // If the request is a QUERY, use the request body method if method.as_str() == "QUERY" => { // TODO(eliza): validate that the request has a content-length... + use http_body_util::BodyExt; let body = recover!( - hyper::body::aggregate(req.into_body()) + req.into_body() + .collect() .await - .map_err(Into::into), + .map_err(Into::into) + .map(http_body_util::Collected::aggregate), "Reading log stream request body", StatusCode::BAD_REQUEST ); @@ -74,7 +76,7 @@ where .status(StatusCode::METHOD_NOT_ALLOWED) .header(header::ALLOW, "GET") .header(header::ALLOW, "QUERY") - .body(Body::empty()) + .body(BoxBody::empty()) .expect("builder with known status code must not fail")); } }; @@ -99,7 +101,7 @@ where // https://github.com/hawkw/thingbuf/issues/62 would allow us to avoid the // copy by passing the channel's pooled buffer directly to hyper, and // returning it to the channel to be reused when hyper is done with it. - let (mut tx, body) = Body::channel(); + let (mut tx, body) = http_body_util::channel::Channel::::new(1024); tokio::spawn( async move { // TODO(eliza): we could definitely implement some batching here. @@ -124,7 +126,7 @@ where }), ); - Ok(mk_rsp(StatusCode::OK, body)) + Ok(mk_rsp(StatusCode::OK, BoxBody::new(body))) } fn parse_filter(filter_str: &str) -> Result { @@ -133,10 +135,10 @@ fn parse_filter(filter_str: &str) -> Result { filter } -fn mk_rsp(status: StatusCode, body: impl Into) -> http::Response { +fn mk_rsp(status: StatusCode, body: B) -> http::Response { http::Response::builder() .status(status) .header(header::CONTENT_TYPE, json::JSON_HEADER_VAL.clone()) - .body(body.into()) + .body(body) .expect("builder with known status code must not fail") } diff --git a/linkerd/app/admin/src/stack.rs b/linkerd/app/admin/src/stack.rs index 910341f65b..a198f9e629 100644 --- a/linkerd/app/admin/src/stack.rs +++ b/linkerd/app/admin/src/stack.rs @@ -1,7 +1,7 @@ use linkerd_app_core::{ classify, config::ServerConfig, - detect, drain, errors, identity, + drain, errors, identity, metrics::{self, FmtMetrics}, proxy::http, serve, @@ -24,6 +24,7 @@ pub struct Config { pub metrics_retain_idle: Duration, #[cfg(feature = "pprof")] pub enable_profiling: bool, + pub enable_shutdown: bool, } pub struct Task { @@ -51,7 +52,7 @@ struct Tcp { #[derive(Clone, Debug)] struct Http { tcp: Tcp, - version: http::Version, + version: http::Variant, } #[derive(Clone, Debug)] @@ -100,7 +101,7 @@ impl Config { let (ready, latch) = crate::server::Readiness::new(); #[cfg_attr(not(feature = "pprof"), allow(unused_mut))] - let admin = crate::server::Admin::new(report, ready, shutdown, trace); + let admin = crate::server::Admin::new(report, ready, shutdown, self.enable_shutdown, trace); #[cfg(feature = "pprof")] let admin = admin.with_profiling(self.enable_profiling); @@ -121,6 +122,7 @@ impl Config { .push_on_service(http::BoxResponse::layer()) .arc_new_clone_http(); + let inbound::DetectMetrics(detect_metrics) = metrics.detect.clone(); let tcp = http .unlift_new() .push(http::NewServeHttp::layer({ @@ -135,11 +137,11 @@ impl Config { })) .push_filter( |(http, tcp): ( - Result, detect::DetectTimeoutError<_>>, + http::Detection, Tcp, )| { match http { - Ok(Some(version)) => Ok(Http { version, tcp }), + http::Detection::Http(version) => Ok(Http { version, tcp }), // If detection timed out, we can make an educated guess at the proper // behavior: // - If the connection was meshed, it was most likely transported over @@ -147,12 +149,12 @@ impl Config { // - If the connection was unmeshed, it was mostly likely HTTP/1. // - If we received some unexpected SNI, the client is mostly likely // confused/stale. - Err(_timeout) => { + http::Detection::ReadTimeout(_timeout) => { let version = match tcp.tls { - tls::ConditionalServerTls::None(_) => http::Version::Http1, + tls::ConditionalServerTls::None(_) => http::Variant::Http1, tls::ConditionalServerTls::Some(tls::ServerTls::Established { .. - }) => http::Version::H2, + }) => http::Variant::H2, tls::ConditionalServerTls::Some(tls::ServerTls::Passthru { sni, }) => { @@ -165,7 +167,7 @@ impl Config { } // If the connection failed HTTP detection, check if we detected TLS for // another target. This might indicate that the client is confused/stale. - Ok(None) => match tcp.tls { + http::Detection::NotHttp => match tcp.tls { tls::ConditionalServerTls::Some(tls::ServerTls::Passthru { sni }) => { Err(UnexpectedSni(sni, tcp.client).into()) } @@ -176,9 +178,12 @@ impl Config { ) .arc_new_tcp() .lift_new_with_target() - .push(detect::NewDetectService::layer(svc::stack::CloneParam::from( - detect::Config::::from_timeout(DETECT_TIMEOUT), - ))) + .push(http::NewDetect::layer(move |tcp: &Tcp| { + http::DetectParams { + read_timeout: DETECT_TIMEOUT, + metrics: detect_metrics.metrics(tcp.policy.server_label()) + } + })) .push(transport::metrics::NewServer::layer(metrics.proxy.transport)) .push_map_target(move |(tls, addrs): (tls::ConditionalServerTls, B::Addrs)| { Tcp { @@ -218,8 +223,8 @@ impl Param for Tcp { // === impl Http === -impl Param for Http { - fn param(&self) -> http::Version { +impl Param for Http { + fn param(&self) -> http::Variant { self.version } } diff --git a/linkerd/app/core/Cargo.toml b/linkerd/app/core/Cargo.toml index f18c234a7a..17fe5f2b43 100644 --- a/linkerd/app/core/Cargo.toml +++ b/linkerd/app/core/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app-core" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Core infrastructure for the proxy application @@ -13,20 +13,22 @@ independently of the inbound and outbound proxy logic. """ [dependencies] -bytes = "1" -drain = { version = "0.1", features = ["retain"] } -http = "0.2" -http-body = "0.4" -hyper = { version = "0.14", features = ["http1", "http2"] } +bytes = { workspace = true } +drain = { workspace = true, features = ["retain"] } +http = { workspace = true } +http-body = { workspace = true } +http-body-util = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } +hyper-util = { workspace = true } futures = { version = "0.3", default-features = false } -ipnet = "2.7" -prometheus-client = "0.22" +ipnet = "2.11" +prometheus-client = { workspace = true } regex = "1" serde_json = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "sync", "parking_lot"] } tokio-stream = { version = "0.1", features = ["time"] } -tonic = { version = "0.10", default-features = false, features = ["prost"] } +tonic = { workspace = true, default-features = false, features = ["prost"] } tracing = "0.1" parking_lot = "0.12" pin-project = "1" @@ -34,7 +36,6 @@ pin-project = "1" linkerd-addr = { path = "../../addr" } linkerd-conditional = { path = "../../conditional" } linkerd-dns = { path = "../../dns" } -linkerd-detect = { path = "../../detect" } linkerd-duplex = { path = "../../duplex" } linkerd-errno = { path = "../../errno" } linkerd-error = { path = "../../error" } @@ -47,6 +48,7 @@ linkerd-io = { path = "../../io" } linkerd-meshtls = { path = "../../meshtls", default-features = false } linkerd-metrics = { path = "../../metrics", features = ["process", "stack"] } linkerd-opencensus = { path = "../../opencensus" } +linkerd-opentelemetry = { path = "../../opentelemetry" } linkerd-proxy-api-resolve = { path = "../../proxy/api-resolve" } linkerd-proxy-balance = { path = "../../proxy/balance" } linkerd-proxy-core = { path = "../../proxy/core" } @@ -73,15 +75,13 @@ linkerd-tls = { path = "../../tls" } linkerd-trace-context = { path = "../../trace-context" } [dependencies.tower] -version = "0.4" +workspace = true default-features = false features = ["make", "spawn-ready", "timeout", "util", "limit"] -[target.'cfg(target_os = "linux")'.dependencies] -linkerd-system = { path = "../../system" } - [build-dependencies] semver = "1" [dev-dependencies] +linkerd-mock-http-body = { path = "../../mock/http-body" } quickcheck = { version = "1", default-features = false } diff --git a/linkerd/app/core/build.rs b/linkerd/app/core/build.rs index e1db4f96de..0aefc19d95 100644 --- a/linkerd/app/core/build.rs +++ b/linkerd/app/core/build.rs @@ -14,8 +14,8 @@ fn set_env(name: &str, cmd: &mut Command) { fn version() -> String { if let Ok(v) = std::env::var("LINKERD2_PROXY_VERSION") { if !v.is_empty() { - if semver::Version::parse(&v).is_err() { - panic!("LINKERD2_PROXY_VERSION must be semver"); + if let Err(err) = semver::Version::parse(&v) { + panic!("LINKERD2_PROXY_VERSION must be semver: version='{v}' error='{err}'"); } return v; } diff --git a/linkerd/app/core/src/config.rs b/linkerd/app/core/src/config.rs index a5d2f45ebc..d27467ba8f 100644 --- a/linkerd/app/core/src/config.rs +++ b/linkerd/app/core/src/config.rs @@ -1,8 +1,8 @@ pub use crate::exp_backoff::ExponentialBackoff; use crate::{ - proxy::http::{self, h1, h2}, - svc::{queue, CloneParam, ExtractParam, Param}, - transport::{DualListenAddr, Keepalive, ListenAddr}, + proxy::http::{h1, h2}, + svc::{queue, ExtractParam, Param}, + transport::{DualListenAddr, Keepalive, ListenAddr, UserTimeout}, }; use std::time::Duration; @@ -10,6 +10,7 @@ use std::time::Duration; pub struct ServerConfig { pub addr: DualListenAddr, pub keepalive: Keepalive, + pub user_timeout: UserTimeout, pub http2: h2::ServerParams, } @@ -18,6 +19,7 @@ pub struct ConnectConfig { pub backoff: ExponentialBackoff, pub timeout: Duration, pub keepalive: Keepalive, + pub user_timeout: UserTimeout, pub http1: h1::PoolSettings, pub http2: h2::ClientParams, } @@ -57,14 +59,6 @@ impl ExtractParam for QueueConfig { } } -// === impl ProxyConfig === - -impl ProxyConfig { - pub fn detect_http(&self) -> CloneParam> { - linkerd_detect::Config::from_timeout(self.detect_protocol_timeout).into() - } -} - // === impl ServerConfig === impl Param for ServerConfig { @@ -84,3 +78,9 @@ impl Param for ServerConfig { self.keepalive } } + +impl Param for ServerConfig { + fn param(&self) -> UserTimeout { + self.user_timeout + } +} diff --git a/linkerd/app/core/src/control.rs b/linkerd/app/core/src/control.rs index 856c0fedc2..4fcb1133b2 100644 --- a/linkerd/app/core/src/control.rs +++ b/linkerd/app/core/src/control.rs @@ -69,8 +69,10 @@ impl fmt::Display for ControlAddr { } } -pub type RspBody = - linkerd_http_metrics::requests::ResponseBody, classify::Eos>; +pub type RspBody = linkerd_http_metrics::requests::ResponseBody< + http::balance::Body, + classify::Eos, +>; #[derive(Clone, Debug, Default)] pub struct Metrics { @@ -112,7 +114,7 @@ impl Config { warn!(error, "Failed to resolve control-plane component"); if let Some(e) = crate::errors::cause_ref::(&*error) { if let Some(ttl) = e.negative_ttl() { - return Ok(Either::Left( + return Ok::<_, Error>(Either::Left( IntervalStream::new(time::interval(ttl)).map(|_| ()), )); } @@ -124,13 +126,16 @@ impl Config { } }; - let client = svc::stack(ConnectTcp::new(self.connect.keepalive)) - .push(tls::Client::layer(identity)) - .push_connect_timeout(self.connect.timeout) - .push_map_target(|(_version, target)| target) - .push(self::client::layer(self.connect.http2)) - .push_on_service(svc::MapErr::layer_boxed()) - .into_new_service(); + let client = svc::stack(ConnectTcp::new( + self.connect.keepalive, + self.connect.user_timeout, + )) + .push(tls::Client::layer(identity)) + .push_connect_timeout(self.connect.timeout) // Client + .push_map_target(|(_version, target)| target) + .push(self::client::layer::<_, _>(self.connect.http2)) + .push_on_service(svc::MapErr::layer_boxed()) + .into_new_service(); let endpoint = client // Ensure that connection is driven independently of the load diff --git a/linkerd/app/core/src/dns.rs b/linkerd/app/core/src/dns.rs index 7afc69e43d..90f59af0f1 100644 --- a/linkerd/app/core/src/dns.rs +++ b/linkerd/app/core/src/dns.rs @@ -1,30 +1,55 @@ -pub use linkerd_dns::*; -use std::path::PathBuf; +use self::metrics::Labels; +use linkerd_metrics::prom::{Counter, Family, Registry}; use std::time::Duration; +pub use linkerd_dns::*; + +mod metrics; + #[derive(Clone, Debug)] pub struct Config { pub min_ttl: Option, pub max_ttl: Option, - pub resolv_conf_path: PathBuf, } pub struct Dns { - pub resolver: Resolver, + resolver: Resolver, + resolutions: Family, +} + +// === impl Dns === + +impl Dns { + /// Returns a new [`Resolver`]. + pub fn resolver(&self, client: &'static str) -> Resolver { + let metrics = self.metrics(client); + + self.resolver.clone().with_metrics(metrics) + } } // === impl Config === impl Config { - pub fn build(self) -> Dns { + pub fn build(self, registry: &mut Registry) -> Dns { + let resolutions = Family::default(); + registry.register( + "resolutions", + "Counts the number of DNS records that have been resolved.", + resolutions.clone(), + ); + let resolver = Resolver::from_system_config_with(&self).expect("system DNS config must be valid"); - Dns { resolver } + Dns { + resolver, + resolutions, + } } } impl ConfigureResolver for Config { - /// Modify a `trust-dns-resolver::config::ResolverOpts` to reflect + /// Modify a `hickory-resolver::config::ResolverOpts` to reflect /// the configured minimum and maximum DNS TTL values. fn configure_resolver(&self, opts: &mut ResolverOpts) { opts.positive_min_ttl = self.min_ttl; diff --git a/linkerd/app/core/src/dns/metrics.rs b/linkerd/app/core/src/dns/metrics.rs new file mode 100644 index 0000000000..82de0d9054 --- /dev/null +++ b/linkerd/app/core/src/dns/metrics.rs @@ -0,0 +1,115 @@ +use super::{Dns, Metrics}; +use linkerd_metrics::prom::encoding::{ + EncodeLabel, EncodeLabelSet, EncodeLabelValue, LabelSetEncoder, LabelValueEncoder, +}; +use std::fmt::{Display, Write}; + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub(super) struct Labels { + client: &'static str, + record_type: RecordType, + result: Outcome, +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +enum RecordType { + A, + Srv, +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +enum Outcome { + Ok, + NotFound, +} + +// === impl Dns === + +impl Dns { + pub(super) fn metrics(&self, client: &'static str) -> Metrics { + let family = &self.resolutions; + + let a_records_resolved = (*family.get_or_create(&Labels { + client, + record_type: RecordType::A, + result: Outcome::Ok, + })) + .clone(); + let a_records_not_found = (*family.get_or_create(&Labels { + client, + record_type: RecordType::A, + result: Outcome::NotFound, + })) + .clone(); + let srv_records_resolved = (*family.get_or_create(&Labels { + client, + record_type: RecordType::Srv, + result: Outcome::Ok, + })) + .clone(); + let srv_records_not_found = (*family.get_or_create(&Labels { + client, + record_type: RecordType::Srv, + result: Outcome::NotFound, + })) + .clone(); + + Metrics { + a_records_resolved, + a_records_not_found, + srv_records_resolved, + srv_records_not_found, + } + } +} +// === impl Labels === + +impl EncodeLabelSet for Labels { + fn encode(&self, mut encoder: LabelSetEncoder<'_>) -> Result<(), std::fmt::Error> { + let Self { + client, + record_type, + result, + } = self; + + ("client", *client).encode(encoder.encode_label())?; + ("record_type", record_type).encode(encoder.encode_label())?; + ("result", result).encode(encoder.encode_label())?; + + Ok(()) + } +} + +// === impl Outcome === + +impl EncodeLabelValue for &Outcome { + fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), std::fmt::Error> { + encoder.write_str(self.to_string().as_str()) + } +} + +impl Display for Outcome { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Self::Ok => "ok", + Self::NotFound => "not_found", + }) + } +} + +// === impl RecordType === + +impl EncodeLabelValue for &RecordType { + fn encode(&self, encoder: &mut LabelValueEncoder<'_>) -> Result<(), std::fmt::Error> { + encoder.write_str(self.to_string().as_str()) + } +} + +impl Display for RecordType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Self::A => "A/AAAA", + Self::Srv => "SRV", + }) + } +} diff --git a/linkerd/app/core/src/errors.rs b/linkerd/app/core/src/errors.rs index 0baebc6cd5..eafa9cc898 100644 --- a/linkerd/app/core/src/errors.rs +++ b/linkerd/app/core/src/errors.rs @@ -1,3 +1,4 @@ +pub mod body; pub mod respond; pub use self::respond::{HttpRescue, NewRespond, NewRespondService, SyntheticHttpResponse}; @@ -6,6 +7,16 @@ pub use linkerd_proxy_http::h2::H2Error; pub use linkerd_stack::{FailFastError, LoadShedError}; pub use tonic::Code as Grpc; +/// Header names and values related to error responses. +pub mod header { + use http::header::{HeaderName, HeaderValue}; + pub const L5D_PROXY_CONNECTION: HeaderName = HeaderName::from_static("l5d-proxy-connection"); + pub const L5D_PROXY_ERROR: HeaderName = HeaderName::from_static("l5d-proxy-error"); + pub(super) const GRPC_CONTENT_TYPE: HeaderValue = HeaderValue::from_static("application/grpc"); + pub(super) const GRPC_MESSAGE: HeaderName = HeaderName::from_static("grpc-message"); + pub(super) const GRPC_STATUS: HeaderName = HeaderName::from_static("grpc-status"); +} + #[derive(Debug, thiserror::Error)] #[error("connect timed out after {0:?}")] pub struct ConnectTimeout(pub(crate) std::time::Duration); @@ -18,3 +29,27 @@ pub fn has_grpc_status(error: &crate::Error, code: tonic::Code) -> bool { .map(|s| s.code() == code) .unwrap_or(false) } + +// Copied from tonic, where it's private. +fn code_header(code: tonic::Code) -> http::HeaderValue { + use {http::HeaderValue, tonic::Code}; + match code { + Code::Ok => HeaderValue::from_static("0"), + Code::Cancelled => HeaderValue::from_static("1"), + Code::Unknown => HeaderValue::from_static("2"), + Code::InvalidArgument => HeaderValue::from_static("3"), + Code::DeadlineExceeded => HeaderValue::from_static("4"), + Code::NotFound => HeaderValue::from_static("5"), + Code::AlreadyExists => HeaderValue::from_static("6"), + Code::PermissionDenied => HeaderValue::from_static("7"), + Code::ResourceExhausted => HeaderValue::from_static("8"), + Code::FailedPrecondition => HeaderValue::from_static("9"), + Code::Aborted => HeaderValue::from_static("10"), + Code::OutOfRange => HeaderValue::from_static("11"), + Code::Unimplemented => HeaderValue::from_static("12"), + Code::Internal => HeaderValue::from_static("13"), + Code::Unavailable => HeaderValue::from_static("14"), + Code::DataLoss => HeaderValue::from_static("15"), + Code::Unauthenticated => HeaderValue::from_static("16"), + } +} diff --git a/linkerd/app/core/src/errors/body.rs b/linkerd/app/core/src/errors/body.rs new file mode 100644 index 0000000000..a9d14677b4 --- /dev/null +++ b/linkerd/app/core/src/errors/body.rs @@ -0,0 +1,314 @@ +use super::{ + header::{GRPC_MESSAGE, GRPC_STATUS}, + respond::{HttpRescue, SyntheticHttpResponse}, +}; +use http::header::HeaderValue; +use http_body::Frame; +use linkerd_error::{Error, Result}; +use pin_project::pin_project; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; +use tracing::{debug, warn}; + +/// Returns a "gRPC rescue" body. +/// +/// This returns a body that, should the inner `B`-typed body return an error when polling for +/// DATA frames, will "rescue" the stream and return a TRAILERS frame that describes the error. +#[pin_project(project = ResponseBodyProj)] +pub struct ResponseBody(#[pin] Inner); + +#[pin_project(project = InnerProj)] +enum Inner { + /// An inert body that delegates directly down to the underlying body `B`. + Passthru(#[pin] B), + /// A body that will be rescued if it yields an error. + GrpcRescue { + #[pin] + inner: B, + /// An error response [strategy][HttpRescue]. + rescue: R, + emit_headers: bool, + }, + /// The underlying body `B` yielded an error and was "rescued". + Rescued, +} + +// === impl ResponseBody === + +impl ResponseBody { + /// Returns a body in "passthru" mode. + pub fn passthru(inner: B) -> Self { + Self(Inner::Passthru(inner)) + } + + /// Returns a "gRPC rescue" body. + pub fn grpc_rescue(inner: B, rescue: R, emit_headers: bool) -> Self { + Self(Inner::GrpcRescue { + inner, + rescue, + emit_headers, + }) + } +} + +impl Default for ResponseBody { + fn default() -> Self { + Self(Inner::Passthru(B::default())) + } +} + +impl linkerd_proxy_http::Body for ResponseBody +where + B: linkerd_proxy_http::Body, + R: HttpRescue, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let ResponseBodyProj(inner) = self.as_mut().project(); + match inner.project() { + InnerProj::Passthru(inner) => inner.poll_frame(cx), + InnerProj::GrpcRescue { + inner, + rescue, + emit_headers, + } => match inner.poll_frame(cx) { + Poll::Ready(Some(Err(error))) => { + // The inner body has yielded an error, which we will try to rescue. If so, + // yield synthetic trailers reporting the error. + let trailers = Self::rescue(error, rescue, *emit_headers)?; + self.set(Self(Inner::Rescued)); + Poll::Ready(Some(Ok(Frame::trailers(trailers)))) + } + poll => poll, + }, + InnerProj::Rescued => Poll::Ready(None), + } + } + + #[inline] + fn is_end_stream(&self) -> bool { + let Self(inner) = self; + match inner { + Inner::Passthru(inner) => inner.is_end_stream(), + Inner::GrpcRescue { inner, .. } => inner.is_end_stream(), + Inner::Rescued => true, + } + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + let Self(inner) = self; + match inner { + Inner::Passthru(inner) => inner.size_hint(), + Inner::GrpcRescue { inner, .. } => inner.size_hint(), + Inner::Rescued => http_body::SizeHint::with_exact(0), + } + } +} + +impl ResponseBody +where + B: http_body::Body, + R: HttpRescue, +{ + /// Maps an error yielded by the inner body to a collection of gRPC trailers. + /// + /// This function returns `Ok(trailers)` if the given [`HttpRescue`] strategy could identify + /// a cause for an error yielded by the inner `B`-typed body. + fn rescue( + error: B::Error, + rescue: &R, + emit_headers: bool, + ) -> Result { + let SyntheticHttpResponse { + grpc_status, + message, + .. + } = rescue.rescue(error)?; + + debug!(grpc.status = ?grpc_status, "Synthesizing gRPC trailers"); + let mut t = http::HeaderMap::new(); + t.insert(GRPC_STATUS, super::code_header(grpc_status)); + if emit_headers { + // A gRPC message trailer is only included if instructed to emit additional headers. + t.insert( + GRPC_MESSAGE, + HeaderValue::from_str(&message).unwrap_or_else(|error| { + warn!(%error, "Failed to encode error header"); + HeaderValue::from_static("Unexpected error") + }), + ); + } + + Ok(t) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::errors::header::{GRPC_MESSAGE, GRPC_STATUS}; + use http::HeaderMap; + use linkerd_mock_http_body::MockBody; + + struct MockRescue; + impl HttpRescue for MockRescue { + /// Attempts to synthesize a response from the given error. + fn rescue(&self, _: E) -> Result { + let synthetic = SyntheticHttpResponse::internal_error("MockRescue::rescue"); + Ok(synthetic) + } + } + + #[tokio::test] + async fn rescue_body_recovers_from_error_without_grpc_message() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let trailers = { + let mut trls = HeaderMap::with_capacity(1); + let value = HeaderValue::from_static("caboose"); + trls.insert("trailer", value); + trls + }; + let rescue = { + let inner = MockBody::default() + .then_yield_data(Poll::Ready(Some(Ok("inter".into())))) + .then_yield_data(Poll::Ready(Some(Err("an error midstream".into())))) + .then_yield_data(Poll::Ready(Some(Ok("rupted".into())))) + .then_yield_trailer(Poll::Ready(Some(Ok(trailers)))); + let rescue = MockRescue; + let emit_headers = false; + ResponseBody::grpc_rescue(inner, rescue, emit_headers) + }; + let (data, Some(trailers)) = body_to_string(rescue).await else { + panic!("trailers should exist"); + }; + assert_eq!(data, "inter"); + assert_eq!( + trailers[GRPC_STATUS], + i32::from(tonic::Code::Internal).to_string() + ); + assert_eq!(trailers.get(GRPC_MESSAGE), None); + } + + #[tokio::test] + async fn rescue_body_recovers_from_error_emitting_message() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let trailers = { + let mut trls = HeaderMap::with_capacity(1); + let value = HeaderValue::from_static("caboose"); + trls.insert("trailer", value); + trls + }; + let rescue = { + let inner = MockBody::default() + .then_yield_data(Poll::Ready(Some(Ok("inter".into())))) + .then_yield_data(Poll::Ready(Some(Err("an error midstream".into())))) + .then_yield_data(Poll::Ready(Some(Ok("rupted".into())))) + .then_yield_trailer(Poll::Ready(Some(Ok(trailers)))); + let rescue = MockRescue; + let emit_headers = true; + ResponseBody::grpc_rescue(inner, rescue, emit_headers) + }; + let (data, Some(trailers)) = body_to_string(rescue).await else { + panic!("trailers should exist"); + }; + assert_eq!(data, "inter"); + assert_eq!( + trailers[GRPC_STATUS], + i32::from(tonic::Code::Internal).to_string() + ); + assert_eq!(trailers[GRPC_MESSAGE], "MockRescue::rescue"); + } + + #[tokio::test] + async fn rescue_body_works_for_empty() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let rescue = { + let inner = MockBody::default(); + let rescue = MockRescue; + let emit_headers = false; + ResponseBody::grpc_rescue(inner, rescue, emit_headers) + }; + let (data, trailers) = body_to_string(rescue).await; + assert_eq!(data, ""); + assert_eq!(trailers, None); + } + + #[tokio::test] + async fn rescue_body_works_for_body_with_data() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let rescue = { + let inner = MockBody::default().then_yield_data(Poll::Ready(Some(Ok("unary".into())))); + let rescue = MockRescue; + let emit_headers = false; + ResponseBody::grpc_rescue(inner, rescue, emit_headers) + }; + let (data, trailers) = body_to_string(rescue).await; + assert_eq!(data, "unary"); + assert_eq!(trailers, None); + } + + #[tokio::test] + async fn rescue_body_works_for_body_with_trailers() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let trailers = { + let mut trls = HeaderMap::with_capacity(1); + let value = HeaderValue::from_static("caboose"); + trls.insert("trailer", value); + trls + }; + let rescue = { + let inner = MockBody::default().then_yield_trailer(Poll::Ready(Some(Ok(trailers)))); + let rescue = MockRescue; + let emit_headers = false; + ResponseBody::grpc_rescue(inner, rescue, emit_headers) + }; + let (data, trailers) = body_to_string(rescue).await; + assert_eq!(data, ""); + assert_eq!(trailers.expect("has trailers")["trailer"], "caboose"); + } + + async fn body_to_string(mut body: B) -> (String, Option) + where + B: http_body::Body + Unpin, + B::Error: std::fmt::Debug, + { + use http_body_util::BodyExt; + + let mut data = String::new(); + let mut trailers = None; + + // Continue reading frames from the body until it is finished. + while let Some(frame) = body + .frame() + .await + .transpose() + .expect("reading a frame succeeds") + { + match frame.into_data().map(|mut buf| { + use bytes::Buf; + let bytes = buf.copy_to_bytes(buf.remaining()); + String::from_utf8(bytes.to_vec()).unwrap() + }) { + Ok(ref s) => data.push_str(s), + Err(frame) => { + let trls = frame + .into_trailers() + .map_err(drop) + .expect("test frame is either data or trailers"); + trailers = Some(trls); + } + } + } + + tracing::info!(?data, ?trailers, "finished reading body"); + (data, trailers) + } +} diff --git a/linkerd/app/core/src/errors/respond.rs b/linkerd/app/core/src/errors/respond.rs index 0b57fb36a6..2e8b12d1d1 100644 --- a/linkerd/app/core/src/errors/respond.rs +++ b/linkerd/app/core/src/errors/respond.rs @@ -1,21 +1,16 @@ +use super::{ + body::ResponseBody, + header::{GRPC_CONTENT_TYPE, GRPC_MESSAGE, GRPC_STATUS, L5D_PROXY_CONNECTION, L5D_PROXY_ERROR}, +}; use crate::svc; use http::header::{HeaderValue, LOCATION}; use linkerd_error::{Error, Result}; use linkerd_error_respond as respond; -use linkerd_proxy_http::orig_proto; -pub use linkerd_proxy_http::{ClientHandle, HasH2Reason}; +use linkerd_proxy_http::{orig_proto, ClientHandle}; use linkerd_stack::ExtractParam; -use pin_project::pin_project; -use std::{ - borrow::Cow, - pin::Pin, - task::{Context, Poll}, -}; +use std::borrow::Cow; use tracing::{debug, info_span, warn}; -pub const L5D_PROXY_CONNECTION: &str = "l5d-proxy-connection"; -pub const L5D_PROXY_ERROR: &str = "l5d-proxy-error"; - pub fn layer( params: P, ) -> impl svc::layer::Layer> + Clone { @@ -33,10 +28,10 @@ pub trait HttpRescue { #[derive(Clone, Debug)] pub struct SyntheticHttpResponse { - grpc_status: tonic::Code, + pub grpc_status: tonic::Code, http_status: http::StatusCode, close_connection: bool, - message: Cow<'static, str>, + pub message: Cow<'static, str>, location: Option, } @@ -62,22 +57,6 @@ pub struct Respond { emit_headers: bool, } -#[pin_project(project = ResponseBodyProj)] -pub enum ResponseBody { - Passthru(#[pin] B), - GrpcRescue { - #[pin] - inner: B, - trailers: Option, - rescue: R, - emit_headers: bool, - }, -} - -const GRPC_CONTENT_TYPE: &str = "application/grpc"; -const GRPC_STATUS: &str = "grpc-status"; -const GRPC_MESSAGE: &str = "grpc-message"; - // === impl HttpRescue === impl HttpRescue for F @@ -120,7 +99,17 @@ impl SyntheticHttpResponse { Self { close_connection: true, http_status: http::StatusCode::GATEWAY_TIMEOUT, - grpc_status: tonic::Code::Unavailable, + grpc_status: tonic::Code::DeadlineExceeded, + message: Cow::Owned(msg.to_string()), + location: None, + } + } + + pub fn gateway_timeout_nonfatal(msg: impl ToString) -> Self { + Self { + close_connection: false, + http_status: http::StatusCode::GATEWAY_TIMEOUT, + grpc_status: tonic::Code::DeadlineExceeded, message: Cow::Owned(msg.to_string()), location: None, } @@ -156,6 +145,16 @@ impl SyntheticHttpResponse { } } + pub fn rate_limited(msg: impl ToString) -> Self { + Self { + http_status: http::StatusCode::TOO_MANY_REQUESTS, + grpc_status: tonic::Code::ResourceExhausted, + close_connection: false, + message: Cow::Owned(msg.to_string()), + location: None, + } + } + pub fn loop_detected(msg: impl ToString) -> Self { Self { http_status: http::StatusCode::LOOP_DETECTED, @@ -227,7 +226,7 @@ impl SyntheticHttpResponse { .version(http::Version::HTTP_2) .header(http::header::CONTENT_LENGTH, "0") .header(http::header::CONTENT_TYPE, GRPC_CONTENT_TYPE) - .header(GRPC_STATUS, code_header(self.grpc_status)); + .header(GRPC_STATUS, super::code_header(self.grpc_status)); if emit_headers { rsp = rsp @@ -326,7 +325,15 @@ where let is_grpc = req .headers() .get(http::header::CONTENT_TYPE) - .and_then(|v| v.to_str().ok().map(|s| s.starts_with(GRPC_CONTENT_TYPE))) + .and_then(|v| { + v.to_str().ok().map(|s| { + s.starts_with( + GRPC_CONTENT_TYPE + .to_str() + .expect("GRPC_CONTENT_TYPE only contains visible ASCII"), + ) + }) + }) .unwrap_or(false); Respond { client, @@ -368,7 +375,7 @@ impl Respond { impl respond::Respond, Error> for Respond where - B: Default + hyper::body::HttpBody, + B: Default + linkerd_proxy_http::Body, R: HttpRescue + Clone, { type Response = http::Response>; @@ -376,19 +383,14 @@ where fn respond(&self, res: Result>) -> Result { let error = match res { Ok(rsp) => { - return Ok(rsp.map(|b| match self { + return Ok(rsp.map(|inner| match self { Respond { is_grpc: true, rescue, emit_headers, .. - } => ResponseBody::GrpcRescue { - inner: b, - trailers: None, - rescue: rescue.clone(), - emit_headers: *emit_headers, - }, - _ => ResponseBody::Passthru(b), + } => ResponseBody::grpc_rescue(inner, rescue.clone(), *emit_headers), + _ => ResponseBody::passthru(inner), })); } Err(error) => error, @@ -421,127 +423,3 @@ where Ok(rsp) } } - -// === impl ResponseBody === - -impl Default for ResponseBody { - fn default() -> Self { - ResponseBody::Passthru(B::default()) - } -} - -impl hyper::body::HttpBody for ResponseBody -where - B: hyper::body::HttpBody, - R: HttpRescue, -{ - type Data = B::Data; - type Error = B::Error; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - match self.project() { - ResponseBodyProj::Passthru(inner) => inner.poll_data(cx), - ResponseBodyProj::GrpcRescue { - inner, - trailers, - rescue, - emit_headers, - } => { - // should not be calling poll_data if we have set trailers derived from an error - assert!(trailers.is_none()); - match inner.poll_data(cx) { - Poll::Ready(Some(Err(error))) => { - let SyntheticHttpResponse { - grpc_status, - message, - .. - } = rescue.rescue(error)?; - let t = Self::grpc_trailers(grpc_status, &message, *emit_headers); - *trailers = Some(t); - Poll::Ready(None) - } - data => data, - } - } - } - } - - #[inline] - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - match self.project() { - ResponseBodyProj::Passthru(inner) => inner.poll_trailers(cx), - ResponseBodyProj::GrpcRescue { - inner, trailers, .. - } => match trailers.take() { - Some(t) => Poll::Ready(Ok(Some(t))), - None => inner.poll_trailers(cx), - }, - } - } - - #[inline] - fn is_end_stream(&self) -> bool { - match self { - Self::Passthru(inner) => inner.is_end_stream(), - Self::GrpcRescue { - inner, trailers, .. - } => trailers.is_none() && inner.is_end_stream(), - } - } - - #[inline] - fn size_hint(&self) -> http_body::SizeHint { - match self { - Self::Passthru(inner) => inner.size_hint(), - Self::GrpcRescue { inner, .. } => inner.size_hint(), - } - } -} - -impl ResponseBody { - fn grpc_trailers(code: tonic::Code, message: &str, emit_headers: bool) -> http::HeaderMap { - debug!(grpc.status = ?code, "Synthesizing gRPC trailers"); - let mut t = http::HeaderMap::new(); - t.insert(GRPC_STATUS, code_header(code)); - if emit_headers { - t.insert( - GRPC_MESSAGE, - HeaderValue::from_str(message).unwrap_or_else(|error| { - warn!(%error, "Failed to encode error header"); - HeaderValue::from_static("Unexpected error") - }), - ); - } - t - } -} - -// Copied from tonic, where it's private. -fn code_header(code: tonic::Code) -> HeaderValue { - use tonic::Code; - match code { - Code::Ok => HeaderValue::from_static("0"), - Code::Cancelled => HeaderValue::from_static("1"), - Code::Unknown => HeaderValue::from_static("2"), - Code::InvalidArgument => HeaderValue::from_static("3"), - Code::DeadlineExceeded => HeaderValue::from_static("4"), - Code::NotFound => HeaderValue::from_static("5"), - Code::AlreadyExists => HeaderValue::from_static("6"), - Code::PermissionDenied => HeaderValue::from_static("7"), - Code::ResourceExhausted => HeaderValue::from_static("8"), - Code::FailedPrecondition => HeaderValue::from_static("9"), - Code::Aborted => HeaderValue::from_static("10"), - Code::OutOfRange => HeaderValue::from_static("11"), - Code::Unimplemented => HeaderValue::from_static("12"), - Code::Internal => HeaderValue::from_static("13"), - Code::Unavailable => HeaderValue::from_static("14"), - Code::DataLoss => HeaderValue::from_static("15"), - Code::Unauthenticated => HeaderValue::from_static("16"), - } -} diff --git a/linkerd/app/core/src/http_tracing.rs b/linkerd/app/core/src/http_tracing.rs index 6b24c599e3..b0ddb05ce5 100644 --- a/linkerd/app/core/src/http_tracing.rs +++ b/linkerd/app/core/src/http_tracing.rs @@ -1,139 +1,76 @@ use linkerd_error::Error; -use linkerd_opencensus::proto::trace::v1 as oc; use linkerd_stack::layer; -use linkerd_trace_context::{self as trace_context, TraceContext}; -use std::{collections::HashMap, sync::Arc}; -use thiserror::Error; +use linkerd_trace_context::{ + self as trace_context, + export::{ExportSpan, SpanKind, SpanLabels}, + Span, TraceContext, +}; +use std::{str::FromStr, sync::Arc}; use tokio::sync::mpsc; -pub type OpenCensusSink = Option>; -pub type Labels = Arc>; - -/// SpanConverter converts trace_context::Span objects into OpenCensus agent -/// protobuf span objects. SpanConverter receives trace_context::Span objects by -/// implmenting the SpanSink trait. For each span that it receives, it converts -/// it to an OpenCensus span and then sends it on the provided mpsc::Sender. -#[derive(Clone)] -pub struct SpanConverter { - kind: Kind, - sink: mpsc::Sender, - labels: Labels, +#[derive(Debug, Copy, Clone, Default)] +pub enum CollectorProtocol { + #[default] + OpenCensus, + OpenTelemetry, } -#[derive(Debug, Error)] -#[error("ID '{:?} should have {} bytes, but it has {}", self.id, self.expected_size, self.actual_size)] -pub struct IdLengthError { - id: Vec, - expected_size: usize, - actual_size: usize, +impl FromStr for CollectorProtocol { + type Err = (); + + fn from_str(s: &str) -> Result { + if s.eq_ignore_ascii_case("opencensus") { + Ok(Self::OpenCensus) + } else if s.eq_ignore_ascii_case("opentelemetry") { + Ok(Self::OpenTelemetry) + } else { + Err(()) + } + } } +pub type SpanSink = mpsc::Sender; + pub fn server( - sink: OpenCensusSink, - labels: impl Into, + sink: Option, + labels: impl Into, ) -> impl layer::Layer, S>> + Clone { - SpanConverter::layer(Kind::Server, sink, labels) + TraceContext::layer(sink.map(move |sink| SpanConverter { + kind: SpanKind::Server, + sink, + labels: labels.into(), + })) } pub fn client( - sink: OpenCensusSink, - labels: impl Into, + sink: Option, + labels: impl Into, ) -> impl layer::Layer, S>> + Clone { - SpanConverter::layer(Kind::Client, sink, labels) -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum Kind { - Server = 1, - Client = 2, + TraceContext::layer(sink.map(move |sink| SpanConverter { + kind: SpanKind::Client, + sink, + labels: labels.into(), + })) } -impl SpanConverter { - fn layer( - kind: Kind, - sink: OpenCensusSink, - labels: impl Into, - ) -> impl layer::Layer, S>> + Clone { - TraceContext::layer(sink.map(move |sink| Self { - kind, - sink, - labels: labels.into(), - })) - } - - fn mk_span(&self, mut span: trace_context::Span) -> Result { - let mut attributes = HashMap::::new(); - for (k, v) in self.labels.iter() { - attributes.insert( - k.clone(), - oc::AttributeValue { - value: Some(oc::attribute_value::Value::StringValue(truncatable( - v.clone(), - ))), - }, - ); - } - for (k, v) in span.labels.drain() { - attributes.insert( - k.to_string(), - oc::AttributeValue { - value: Some(oc::attribute_value::Value::StringValue(truncatable(v))), - }, - ); - } - Ok(oc::Span { - trace_id: into_bytes(span.trace_id, 16)?, - span_id: into_bytes(span.span_id, 8)?, - tracestate: None, - parent_span_id: into_bytes(span.parent_id, 8)?, - name: Some(truncatable(span.span_name)), - kind: self.kind as i32, - start_time: Some(span.start.into()), - end_time: Some(span.end.into()), - attributes: Some(oc::span::Attributes { - attribute_map: attributes, - dropped_attributes_count: 0, - }), - stack_trace: None, - time_events: None, - links: None, - status: None, // TODO: this is gRPC status; we must read response trailers to populate this - resource: None, - same_process_as_parent_span: Some(self.kind == Kind::Client), - child_span_count: None, - }) - } +#[derive(Clone)] +pub struct SpanConverter { + kind: SpanKind, + sink: SpanSink, + labels: SpanLabels, } impl trace_context::SpanSink for SpanConverter { - #[inline] fn is_enabled(&self) -> bool { true } - fn try_send(&mut self, span: trace_context::Span) -> Result<(), Error> { - let span = self.mk_span(span)?; - self.sink.try_send(span).map_err(Into::into) - } -} - -fn into_bytes(id: trace_context::Id, size: usize) -> Result, IdLengthError> { - let bytes: Vec = id.into(); - if bytes.len() == size { - Ok(bytes) - } else { - let actual_size = bytes.len(); - Err(IdLengthError { - id: bytes, - expected_size: size, - actual_size, - }) - } -} - -fn truncatable(value: String) -> oc::TruncatableString { - oc::TruncatableString { - value, - truncated_byte_count: 0, + fn try_send(&mut self, span: Span) -> Result<(), Error> { + self.sink.try_send(ExportSpan { + span, + kind: self.kind, + labels: Arc::clone(&self.labels), + })?; + Ok(()) } } diff --git a/linkerd/app/core/src/lib.rs b/linkerd/app/core/src/lib.rs index fc98a3b621..0d08e2bc2d 100644 --- a/linkerd/app/core/src/lib.rs +++ b/linkerd/app/core/src/lib.rs @@ -32,7 +32,6 @@ pub use drain; pub use ipnet::{IpNet, Ipv4Net, Ipv6Net}; pub use linkerd_addr::{self as addr, Addr, AddrMatch, IpMatch, NameAddr, NameMatch}; pub use linkerd_conditional::Conditional; -pub use linkerd_detect as detect; pub use linkerd_dns; pub use linkerd_error::{cause_ref, is_caused_by, Error, Infallible, Recover, Result}; pub use linkerd_exp_backoff as exp_backoff; @@ -40,6 +39,7 @@ pub use linkerd_http_metrics as http_metrics; pub use linkerd_idle_cache as idle_cache; pub use linkerd_io as io; pub use linkerd_opencensus as opencensus; +pub use linkerd_opentelemetry as opentelemetry; pub use linkerd_service_profiles as profiles; pub use linkerd_stack_metrics as stack_metrics; pub use linkerd_stack_tracing as stack_tracing; @@ -65,7 +65,7 @@ pub struct ProxyRuntime { pub identity: identity::creds::Receiver, pub metrics: metrics::Proxy, pub tap: proxy::tap::Registry, - pub span_sink: http_tracing::OpenCensusSink, + pub span_sink: Option, pub drain: drain::Watch, } diff --git a/linkerd/app/core/src/metrics.rs b/linkerd/app/core/src/metrics.rs index 3fd374031b..3aac6ccc12 100644 --- a/linkerd/app/core/src/metrics.rs +++ b/linkerd/app/core/src/metrics.rs @@ -9,14 +9,13 @@ pub use crate::transport::labels::{TargetAddr, TlsAccept}; use crate::{ classify::Class, - control, http_metrics, opencensus, profiles, stack_metrics, - svc::Param, - tls, + control, http_metrics, opencensus, opentelemetry, profiles, proxy, stack_metrics, svc, tls, transport::{self, labels::TlsConnect}, }; use linkerd_addr::Addr; pub use linkerd_metrics::*; use linkerd_proxy_server_policy as policy; +use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; use std::{ fmt::{self, Write}, net::SocketAddr, @@ -39,6 +38,7 @@ pub struct Metrics { pub proxy: Proxy, pub control: ControlHttp, pub opencensus: opencensus::metrics::Registry, + pub opentelemetry: opentelemetry::metrics::Registry, } #[derive(Clone, Debug)] @@ -73,7 +73,7 @@ pub struct InboundEndpointLabels { /// A label referencing an inbound `Server` (i.e. for policy). #[derive(Clone, Debug, Eq, PartialEq, Hash)] -pub struct ServerLabel(pub Arc); +pub struct ServerLabel(pub Arc, pub u16); /// Labels referencing an inbound server and authorization. #[derive(Clone, Debug, Eq, PartialEq, Hash)] @@ -101,9 +101,32 @@ pub struct OutboundEndpointLabels { pub server_id: tls::ConditionalClientTls, pub authority: Option, pub labels: Option, + pub zone_locality: OutboundZoneLocality, pub target_addr: SocketAddr, } +#[derive(Debug, Copy, Clone, Default, Hash, Eq, PartialEq, EncodeLabelValue)] +pub enum OutboundZoneLocality { + #[default] + Unknown, + Local, + Remote, +} + +impl OutboundZoneLocality { + pub fn new(metadata: &proxy::api_resolve::Metadata) -> Self { + if let Some(is_zone_local) = metadata.is_zone_local() { + if is_zone_local { + OutboundZoneLocality::Local + } else { + OutboundZoneLocality::Remote + } + } else { + OutboundZoneLocality::Unknown + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StackLabels { pub direction: Direction, @@ -191,11 +214,13 @@ impl Metrics { }; let (opencensus, opencensus_report) = opencensus::metrics::new(); + let (opentelemetry, opentelemetry_report) = opentelemetry::metrics::new(); let metrics = Metrics { proxy, control, opencensus, + opentelemetry, }; let report = endpoint_report @@ -205,6 +230,7 @@ impl Metrics { .and_report(control_report) .and_report(transport_report) .and_report(opencensus_report) + .and_report(opentelemetry_report) .and_report(stack); (metrics, report) @@ -213,7 +239,7 @@ impl Metrics { // === impl CtlLabels === -impl Param for control::ControlAddr { +impl svc::Param for control::ControlAddr { fn param(&self) -> ControlLabels { ControlLabels { addr: self.addr.clone(), @@ -310,14 +336,32 @@ impl FmtLabels for ServerLabel { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "srv_group=\"{}\",srv_kind=\"{}\",srv_name=\"{}\"", + "srv_group=\"{}\",srv_kind=\"{}\",srv_name=\"{}\",srv_port=\"{}\"", self.0.group(), self.0.kind(), - self.0.name() + self.0.name(), + self.1 ) } } +impl EncodeLabelSet for ServerLabel { + fn encode(&self, mut enc: prometheus_client::encoding::LabelSetEncoder<'_>) -> fmt::Result { + prom::EncodeLabelSetMut::encode_label_set(self, &mut enc) + } +} + +impl prom::EncodeLabelSetMut for ServerLabel { + fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> fmt::Result { + use prometheus_client::encoding::EncodeLabel; + ("srv_group", self.0.group()).encode(enc.encode_label())?; + ("srv_kind", self.0.kind()).encode(enc.encode_label())?; + ("srv_name", self.0.name()).encode(enc.encode_label())?; + ("srv_port", self.1).encode(enc.encode_label())?; + Ok(()) + } +} + impl FmtLabels for ServerAuthzLabels { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.server.fmt_labels(f)?; @@ -357,6 +401,12 @@ impl FmtLabels for RouteAuthzLabels { } } +impl svc::Param for OutboundEndpointLabels { + fn param(&self) -> OutboundZoneLocality { + self.zone_locality + } +} + impl FmtLabels for OutboundEndpointLabels { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(a) = self.authority.as_ref() { @@ -391,7 +441,7 @@ impl FmtLabels for Direction { } } -impl<'a> FmtLabels for Authority<'a> { +impl FmtLabels for Authority<'_> { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "authority=\"{}\"", self.0) } diff --git a/linkerd/app/core/src/transport/labels.rs b/linkerd/app/core/src/transport/labels.rs index 0d03888709..831a625df3 100644 --- a/linkerd/app/core/src/transport/labels.rs +++ b/linkerd/app/core/src/transport/labels.rs @@ -120,7 +120,7 @@ impl<'t> From<&'t tls::ConditionalServerTls> for TlsAccept<'t> { } } -impl<'t> FmtLabels for TlsAccept<'t> { +impl FmtLabels for TlsAccept<'_> { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.0 { Conditional::None(tls::NoServerTls::Disabled) => { @@ -148,7 +148,7 @@ impl<'t> From<&'t tls::ConditionalClientTls> for TlsConnect<'t> { } } -impl<'t> FmtLabels for TlsConnect<'t> { +impl FmtLabels for TlsConnect<'_> { fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.0 { Conditional::None(tls::NoClientTls::Disabled) => { @@ -199,18 +199,21 @@ mod tests { negotiated_protocol: None, }), ([192, 0, 2, 4], 40000).into(), - PolicyServerLabel(Arc::new(Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "testserver".into(), - })), + PolicyServerLabel( + Arc::new(Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "testserver".into(), + }), + 40000, + ), ); assert_eq!( labels.to_string(), "direction=\"inbound\",peer=\"src\",\ target_addr=\"192.0.2.4:40000\",target_ip=\"192.0.2.4\",target_port=\"40000\",\ tls=\"true\",client_id=\"foo.id.example.com\",\ - srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testserver\"" + srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testserver\",srv_port=\"40000\"" ); } } diff --git a/linkerd/app/gateway/Cargo.toml b/linkerd/app/gateway/Cargo.toml index 86603399ab..c35521c1b8 100644 --- a/linkerd/app/gateway/Cargo.toml +++ b/linkerd/app/gateway/Cargo.toml @@ -1,23 +1,23 @@ [package] name = "linkerd-app-gateway" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -http = "0.2" +http = { workspace = true } futures = { version = "0.3", default-features = false } linkerd-app-core = { path = "../core" } linkerd-app-inbound = { path = "../inbound" } linkerd-app-outbound = { path = "../outbound" } linkerd-proxy-client-policy = { path = "../../proxy/client-policy" } once_cell = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["sync"] } -tonic = { version = "0.10", default-features = false } -tower = { version = "0.4", default-features = false } +tonic = { workspace = true, default-features = false } +tower = { workspace = true, default-features = false } tracing = "0.1" [dev-dependencies] @@ -26,6 +26,6 @@ linkerd-app-outbound = { path = "../outbound", features = ["test-util"] } linkerd-proxy-server-policy = { path = "../../proxy/server-policy" } tokio = { version = "1", features = ["rt", "macros"] } tokio-test = "0.4" -tower = { version = "0.4", default-features = false, features = ["util"] } -tower-test = "0.4" +tower = { workspace = true, default-features = false, features = ["util"] } +tower-test = { workspace = true } linkerd-app-test = { path = "../test" } diff --git a/linkerd/app/gateway/src/http.rs b/linkerd/app/gateway/src/http.rs index 85285b2c39..a278c3d8de 100644 --- a/linkerd/app/gateway/src/http.rs +++ b/linkerd/app/gateway/src/http.rs @@ -28,7 +28,7 @@ pub(crate) use self::gateway::NewHttpGateway; pub struct Target { addr: GatewayAddr, routes: watch::Receiver, - version: http::Version, + version: http::Variant, parent: T, } @@ -74,7 +74,7 @@ impl Gateway { T: svc::Param, T: svc::Param, T: svc::Param>>, - T: svc::Param, + T: svc::Param, T: svc::Param, T: Clone + Send + Sync + Unpin + 'static, // Endpoint resolution. @@ -164,7 +164,7 @@ fn mk_routes(profile: &profiles::Profile) -> Option { impl svc::router::SelectRoute> for ByRequestVersion { type Key = Target; - type Error = http::version::Unsupported; + type Error = http::UnsupportedVariant; fn select(&self, req: &http::Request) -> Result { let mut t = self.0.clone(); @@ -192,8 +192,8 @@ impl svc::Param for Target { } } -impl svc::Param for Target { - fn param(&self) -> http::Version { +impl svc::Param for Target { + fn param(&self) -> http::Variant { self.version } } diff --git a/linkerd/app/gateway/src/http/gateway.rs b/linkerd/app/gateway/src/http/gateway.rs index 266a440966..2168df6bfd 100644 --- a/linkerd/app/gateway/src/http/gateway.rs +++ b/linkerd/app/gateway/src/http/gateway.rs @@ -66,7 +66,7 @@ where impl tower::Service> for HttpGateway where - B: http::HttpBody + 'static, + B: http::Body + 'static, S: tower::Service, Response = http::Response>, S::Error: Into + 'static, S::Future: Send + 'static, diff --git a/linkerd/app/gateway/src/http/tests.rs b/linkerd/app/gateway/src/http/tests.rs index e545d7f530..fa106bdad9 100644 --- a/linkerd/app/gateway/src/http/tests.rs +++ b/linkerd/app/gateway/src/http/tests.rs @@ -62,7 +62,7 @@ async fn upgraded_request_remains_relative_form() { impl svc::Param for Target { fn param(&self) -> ServerLabel { - ServerLabel(policy::Meta::new_default("test")) + ServerLabel(policy::Meta::new_default("test"), 4143) } } @@ -98,9 +98,9 @@ async fn upgraded_request_remains_relative_form() { } } - impl svc::Param for Target { - fn param(&self) -> http::Version { - http::Version::H2 + impl svc::Param for Target { + fn param(&self) -> http::Variant { + http::Variant::H2 } } @@ -129,6 +129,7 @@ async fn upgraded_request_remains_relative_form() { }), }]))]), }, + local_rate_limit: Arc::new(Default::default()), }; let (policy, tx) = inbound::policy::AllowPolicy::for_test(self.param(), policy); tokio::spawn(async move { diff --git a/linkerd/app/gateway/src/opaq.rs b/linkerd/app/gateway/src/opaq.rs index 3424a799bb..d46f70506b 100644 --- a/linkerd/app/gateway/src/opaq.rs +++ b/linkerd/app/gateway/src/opaq.rs @@ -1,10 +1,15 @@ use super::{server::Opaq, Gateway}; use inbound::{GatewayAddr, GatewayDomainInvalid}; -use linkerd_app_core::{io, profiles, svc, tls, transport::addrs::*, Error}; +use linkerd_app_core::{io, svc, tls, transport::addrs::*, Error}; use linkerd_app_inbound as inbound; use linkerd_app_outbound as outbound; +use tokio::sync::watch; -pub type Target = outbound::opaq::Logical; +#[derive(Clone, Debug)] +pub struct Target { + addr: GatewayAddr, + routes: watch::Receiver, +} impl Gateway { /// Wrap the provided outbound opaque stack with inbound authorization and @@ -33,18 +38,7 @@ impl Gateway { .push_filter( |(_, opaq): (_, Opaq)| -> Result<_, GatewayDomainInvalid> { // Fail connections were not resolved. - let profile = svc::Param::>::param(&*opaq) - .ok_or(GatewayDomainInvalid)?; - if let Some(profiles::LogicalAddr(addr)) = profile.logical_addr() { - Ok(outbound::opaq::Logical::Route(addr, profile)) - } else if let Some((addr, metadata)) = profile.endpoint() { - Ok(outbound::opaq::Logical::Forward( - Remote(ServerAddr(addr)), - metadata, - )) - } else { - Err(GatewayDomainInvalid) - } + Target::try_from(opaq) }, ) // Authorize connections to the gateway. @@ -52,3 +46,47 @@ impl Gateway { .arc_new_tcp() } } + +impl TryFrom> for Target +where + T: svc::Param, +{ + type Error = GatewayDomainInvalid; + + fn try_from(opaq: Opaq) -> Result { + use svc::Param; + + let addr: GatewayAddr = (**opaq).param(); + let Some(profile) = (*opaq).param() else { + // The gateway address must be resolvable via the profile API. + return Err(GatewayDomainInvalid); + }; + let routes = outbound::opaq::routes_from_discovery( + addr.0.clone().into(), + Some(profile), + (*opaq).param(), + ); + + Ok(Target { addr, routes }) + } +} + +impl svc::Param> for Target { + fn param(&self) -> watch::Receiver { + self.routes.clone() + } +} + +impl PartialEq for Target { + fn eq(&self, other: &Self) -> bool { + self.addr == other.addr + } +} + +impl Eq for Target {} + +impl std::hash::Hash for Target { + fn hash(&self, state: &mut H) { + self.addr.hash(state); + } +} diff --git a/linkerd/app/gateway/src/server.rs b/linkerd/app/gateway/src/server.rs index 2a958142d2..050a36dcb1 100644 --- a/linkerd/app/gateway/src/server.rs +++ b/linkerd/app/gateway/src/server.rs @@ -11,7 +11,7 @@ use tokio::sync::watch; /// Target for HTTP stacks. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Http { - version: http::Version, + version: http::Variant, parent: outbound::Discovery, } @@ -61,13 +61,13 @@ impl Gateway { |parent: outbound::Discovery| -> Result<_, GatewayDomainInvalid> { if let Some(proto) = (*parent).param() { let version = match proto { - SessionProtocol::Http1 => http::Version::Http1, - SessionProtocol::Http2 => http::Version::H2, + SessionProtocol::Http1 => http::Variant::Http1, + SessionProtocol::Http2 => http::Variant::H2, }; - return Ok(svc::Either::A(Http { parent, version })); + return Ok(svc::Either::Left(Http { parent, version })); } - Ok(svc::Either::B(Opaq(parent))) + Ok(svc::Either::Right(Opaq(parent))) }, opaq, ) @@ -154,8 +154,8 @@ impl std::ops::Deref for Http { } } -impl svc::Param for Http { - fn param(&self) -> http::Version { +impl svc::Param for Http { + fn param(&self) -> http::Variant { self.version } } diff --git a/linkerd/app/inbound/Cargo.toml b/linkerd/app/inbound/Cargo.toml index cad07a4f26..9e7f697c1d 100644 --- a/linkerd/app/inbound/Cargo.toml +++ b/linkerd/app/inbound/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app-inbound" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Configures and runs the inbound proxy """ @@ -18,8 +18,8 @@ test-util = [ ] [dependencies] -bytes = "1" -http = "0.2" +bytes = { workspace = true } +http = { workspace = true } futures = { version = "0.3", default-features = false } linkerd-app-core = { path = "../core" } linkerd-app-test = { path = "../test", optional = true } @@ -30,14 +30,14 @@ linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true } linkerd-proxy-client-policy = { path = "../../proxy/client-policy" } linkerd-tonic-stream = { path = "../../tonic-stream" } linkerd-tonic-watch = { path = "../../tonic-watch" } -linkerd2-proxy-api = { version = "0.13", features = ["inbound"] } +linkerd2-proxy-api = { workspace = true, features = ["inbound"] } once_cell = "1" parking_lot = "0.12" rangemap = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["sync"] } -tonic = { version = "0.10", default-features = false } -tower = { version = "0.4", features = ["util"] } +tonic = { workspace = true, default-features = false } +tower = { workspace = true, features = ["util"] } tracing = "0.1" [dependencies.linkerd-proxy-server-policy] @@ -45,21 +45,33 @@ path = "../../proxy/server-policy" features = ["proto"] [target.'cfg(fuzzing)'.dependencies] -hyper = { version = "0.14", features = ["http1", "http2"] } +hyper = { workspace = true, features = ["http1", "http2"] } linkerd-app-test = { path = "../test" } arbitrary = { version = "1", features = ["derive"] } libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] } +linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [ + "test-util", +] } [dev-dependencies] -hyper = { version = "0.14", features = ["http1", "http2"] } +http-body-util = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } +hyper-util = { workspace = true } linkerd-app-test = { path = "../test" } linkerd-http-metrics = { path = "../../http/metrics", features = ["test-util"] } +linkerd-http-box = { path = "../../http/box" } linkerd-idle-cache = { path = "../../idle-cache", features = ["test-util"] } linkerd-io = { path = "../../io", features = ["tokio-test"] } linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] } linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [ "test-util", ] } +linkerd-proxy-server-policy = { path = "../../proxy/server-policy", features = [ + "test-util", +] } linkerd-tracing = { path = "../../tracing", features = ["ansi"] } tokio = { version = "1", features = ["full", "macros"] } tokio-test = "0.4" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/linkerd/app/inbound/fuzz/Cargo.toml b/linkerd/app/inbound/fuzz/Cargo.toml index af7e226c92..f26cb91322 100644 --- a/linkerd/app/inbound/fuzz/Cargo.toml +++ b/linkerd/app/inbound/fuzz/Cargo.toml @@ -1,18 +1,18 @@ - [package] name = "linkerd-app-inbound-fuzz" -version = "0.0.0" +version = { workspace = true } authors = ["Automatically generated"] -publish = false -edition = "2021" +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [package.metadata] cargo-fuzz = true [target.'cfg(fuzzing)'.dependencies] arbitrary = { version = "1", features = ["derive"] } -hyper = { version = "0.14", features = ["http1", "http2"] } -http = "0.2" +hyper = { version = "0.14", features = ["deprecated", "http1", "http2"] } +http = { workspace = true } libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] } linkerd-app-core = { path = "../../core" } linkerd-app-inbound = { path = ".." } diff --git a/linkerd/app/inbound/src/accept.rs b/linkerd/app/inbound/src/accept.rs index 04dc57db9b..cd16c25ac4 100644 --- a/linkerd/app/inbound/src/accept.rs +++ b/linkerd/app/inbound/src/accept.rs @@ -53,12 +53,12 @@ impl Inbound { move |t: T| -> Result<_, Error> { let addr: OrigDstAddr = t.param(); if addr.port() == proxy_port { - return Ok(svc::Either::B(t)); + return Ok(svc::Either::Right(t)); } let policy = policies.get_policy(addr); tracing::debug!(policy = ?&*policy.borrow(), "Accepted"); - Ok(svc::Either::A(Accept { + Ok(svc::Either::Left(Accept { client_addr: t.param(), orig_dst_addr: addr, policy, @@ -138,6 +138,7 @@ mod tests { kind: "server".into(), name: "testsrv".into(), }), + local_rate_limit: Default::default(), }, None, ); @@ -181,7 +182,11 @@ mod tests { } fn inbound() -> Inbound<()> { - Inbound::new(test_util::default_config(), test_util::runtime().0) + Inbound::new( + test_util::default_config(), + test_util::runtime().0, + &mut Default::default(), + ) } fn new_panic(msg: &'static str) -> svc::ArcNewTcp { diff --git a/linkerd/app/inbound/src/detect.rs b/linkerd/app/inbound/src/detect.rs index e77b85b1cc..bbab744cea 100644 --- a/linkerd/app/inbound/src/detect.rs +++ b/linkerd/app/inbound/src/detect.rs @@ -3,8 +3,8 @@ use crate::{ Inbound, }; use linkerd_app_core::{ - detect, identity, io, - metrics::ServerLabel, + identity, io, + metrics::{prom, ServerLabel}, proxy::http, svc, tls, transport::{ @@ -20,6 +20,10 @@ use tracing::info; #[cfg(test)] mod tests; +#[derive(Clone, Debug)] +pub struct MetricsFamilies(pub HttpDetectMetrics); +pub type HttpDetectMetrics = http::DetectMetricsFamilies; + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Forward { client_addr: Remote, @@ -31,7 +35,7 @@ pub(crate) struct Forward { #[derive(Clone, Debug)] pub(crate) struct Http { tls: Tls, - http: http::Version, + http: http::Variant, } #[derive(Clone, Debug)] @@ -48,9 +52,6 @@ struct Detect { tls: Tls, } -#[derive(Copy, Clone, Debug)] -struct ConfigureHttpDetect; - #[derive(Clone)] struct TlsParams { timeout: tls::server::Timeout, @@ -64,7 +65,11 @@ type TlsIo = tls::server::Io>, I> impl Inbound> { /// Builds a stack that terminates mesh TLS and detects whether the traffic is HTTP (as hinted /// by policy). - pub(crate) fn push_detect(self, forward: F) -> Inbound> + pub(crate) fn push_detect( + self, + MetricsFamilies(metrics): MetricsFamilies, + forward: F, + ) -> Inbound> where T: svc::Param + svc::Param> + svc::Param, T: Clone + Send + 'static, @@ -75,14 +80,18 @@ impl Inbound> { FSvc::Error: Into, FSvc::Future: Send, { - self.push_detect_http(forward.clone()) + self.push_detect_http(metrics, forward.clone()) .push_detect_tls(forward) } /// Builds a stack that handles HTTP detection once TLS detection has been performed. If the /// connection is determined to be HTTP, the inner stack is used; otherwise the connection is /// passed to the provided 'forward' stack. - fn push_detect_http(self, forward: F) -> Inbound> + fn push_detect_http( + self, + metrics: HttpDetectMetrics, + forward: F, + ) -> Inbound> where I: io::AsyncRead + io::AsyncWrite + io::PeerAddr, I: Debug + Send + Sync + Unpin + 'static, @@ -111,42 +120,59 @@ impl Inbound> { .push_switch( |(detected, Detect { tls, .. })| -> Result<_, Infallible> { match detected { - Ok(Some(http)) => Ok(svc::Either::A(Http { http, tls })), - Ok(None) => Ok(svc::Either::B(tls)), + http::Detection::Http(http) => { + Ok(svc::Either::Left(Http { http, tls })) + } + http::Detection::NotHttp => Ok(svc::Either::Right(tls)), // When HTTP detection fails, forward the connection to the application as // an opaque TCP stream. - Err(timeout) => match tls.policy.protocol() { - Protocol::Http1 { .. } => { - // If the protocol was hinted to be HTTP/1.1 but detection - // failed, we'll usually be handling HTTP/1, but we may actually - // be handling HTTP/2 via protocol upgrade. Our options are: - // handle the connection as HTTP/1, assuming it will be rare for - // a proxy to initiate TLS, etc and not send the 16B of - // connection header; or we can handle it as opaque--but there's - // no chance the server will be able to handle the H2 protocol - // upgrade. So, it seems best to assume it's HTTP/1 and let the - // proxy handle the protocol error if we're in an edge case. - info!(%timeout, "Handling connection as HTTP/1 due to policy"); - Ok(svc::Either::A(Http { - http: http::Version::Http1, - tls, - })) - } - // Otherwise, the protocol hint must have been `Detect` or the - // protocol was updated after detection was initiated, otherwise we - // would have avoided detection below. Continue handling the - // connection as if it were opaque. - _ => { - info!(%timeout, "Handling connection as opaque"); - Ok(svc::Either::B(tls)) + http::Detection::ReadTimeout(timeout) => { + match tls.policy.protocol() { + Protocol::Http1 { .. } => { + // If the protocol was hinted to be HTTP/1.1 but detection + // failed, we'll usually be handling HTTP/1, but we may actually + // be handling HTTP/2 via protocol upgrade. Our options are: + // handle the connection as HTTP/1, assuming it will be rare for + // a proxy to initiate TLS, etc and not send the 16B of + // connection header; or we can handle it as opaque--but there's + // no chance the server will be able to handle the H2 protocol + // upgrade. So, it seems best to assume it's HTTP/1 and let the + // proxy handle the protocol error if we're in an edge case. + info!( + ?timeout, + "Handling connection as HTTP/1 due to policy" + ); + Ok(svc::Either::Left(Http { + http: http::Variant::Http1, + tls, + })) + } + // Otherwise, the protocol hint must have + // been `Detect` or the protocol was updated + // after detection was initiated, otherwise + // we would have avoided detection below. + // Continue handling the connection as if it + // were opaque. + _ => { + info!( + ?timeout, + "Handling connection as opaque due to policy" + ); + Ok(svc::Either::Right(tls)) + } } - }, + } } }, forward.into_inner(), ) .lift_new_with_target() - .push(detect::NewDetectService::layer(ConfigureHttpDetect)) + .push(http::NewDetect::layer( + move |Detect { timeout, tls }: &Detect| http::DetectParams { + read_timeout: *timeout, + metrics: metrics.metrics(tls.policy.server_label()), + }, + )) .arc_new_tcp(); http.push_on_service(svc::MapTargetLayer::new(io::BoxedIo::new)) @@ -159,7 +185,7 @@ impl Inbound> { move |tls: Tls| -> Result<_, Infallible> { let http = match tls.policy.protocol() { Protocol::Detect { timeout, .. } => { - return Ok(svc::Either::B(Detect { timeout, tls })); + return Ok(svc::Either::Right(Detect { timeout, tls })); } // Meshed HTTP/1 services may actually be transported over HTTP/2 connections // between proxies, so we have to do detection. @@ -167,18 +193,18 @@ impl Inbound> { // TODO(ver) outbound clients should hint this with ALPN so we don't // have to detect this situation. Protocol::Http1 { .. } if tls.status.is_some() => { - return Ok(svc::Either::B(Detect { + return Ok(svc::Either::Right(Detect { timeout: detect_timeout, tls, })); } // Unmeshed services don't use protocol upgrading, so we can use the // hint without further detection. - Protocol::Http1 { .. } => http::Version::Http1, - Protocol::Http2 { .. } | Protocol::Grpc { .. } => http::Version::H2, + Protocol::Http1 { .. } => http::Variant::Http1, + Protocol::Http2 { .. } | Protocol::Grpc { .. } => http::Variant::H2, _ => unreachable!("opaque protocols must not hit the HTTP stack"), }; - Ok(svc::Either::A(Http { http, tls })) + Ok(svc::Either::Left(Http { http, tls })) }, detect.into_inner(), ) @@ -232,10 +258,10 @@ impl Inbound>> { // whether app TLS was employed, but we use this as a signal that we should // not perform additional protocol detection. if matches!(protocol, Protocol::Tls { .. }) { - return Ok(svc::Either::B(tls)); + return Ok(svc::Either::Right(tls)); } - Ok(svc::Either::A(tls)) + Ok(svc::Either::Left(tls)) }, forward .clone() @@ -259,14 +285,14 @@ impl Inbound>> { if matches!(policy.protocol(), Protocol::Opaque { .. }) { const TLS_PORT_SKIPPED: tls::ConditionalServerTls = tls::ConditionalServerTls::None(tls::NoServerTls::PortSkipped); - return Ok(svc::Either::B(Tls { + return Ok(svc::Either::Right(Tls { client_addr: t.param(), orig_dst_addr: t.param(), status: TLS_PORT_SKIPPED, policy, })); } - Ok(svc::Either::A(t)) + Ok(svc::Either::Left(t)) }, forward .push_on_service(svc::MapTargetLayer::new(io::BoxedIo::new)) @@ -332,18 +358,10 @@ impl svc::Param for Tls { } } -// === impl ConfigureHttpDetect === - -impl svc::ExtractParam, Detect> for ConfigureHttpDetect { - fn extract_param(&self, detect: &Detect) -> detect::Config { - detect::Config::from_timeout(detect.timeout) - } -} - // === impl Http === -impl svc::Param for Http { - fn param(&self) -> http::Version { +impl svc::Param for Http { + fn param(&self) -> http::Variant { self.http } } @@ -442,3 +460,13 @@ impl svc::InsertParam for TlsParams { (tls, target) } } + +// === impl MetricsFamilies === + +impl MetricsFamilies { + pub fn register(reg: &mut prom::Registry) -> Self { + Self(http::DetectMetricsFamilies::register( + reg.sub_registry_with_prefix("http"), + )) + } +} diff --git a/linkerd/app/inbound/src/detect/tests.rs b/linkerd/app/inbound/src/detect/tests.rs index 0657ec733a..170050f30c 100644 --- a/linkerd/app/inbound/src/detect/tests.rs +++ b/linkerd/app/inbound/src/detect/tests.rs @@ -13,6 +13,12 @@ const HTTP1: &[u8] = b"GET / HTTP/1.1\r\nhost: example.com\r\n\r\n"; const HTTP2: &[u8] = b"PRI * HTTP/2.0\r\n"; const NOT_HTTP: &[u8] = b"foo\r\nbar\r\nblah\r\n"; +const RESULTS_NOT_HTTP: &str = "results_total{result=\"not_http\",srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testsrv\",srv_port=\"1000\"}"; +const RESULTS_HTTP1: &str = "results_total{result=\"http/1\",srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testsrv\",srv_port=\"1000\"}"; +const RESULTS_HTTP2: &str = "results_total{result=\"http/2\",srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testsrv\",srv_port=\"1000\"}"; +const RESULTS_READ_TIMEOUT: &str = "results_total{result=\"read_timeout\",srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testsrv\",srv_port=\"1000\"}"; +const RESULTS_ERROR: &str = "results_total{result=\"error\",srv_group=\"policy.linkerd.io\",srv_kind=\"server\",srv_name=\"testsrv\",srv_port=\"1000\"}"; + fn authzs() -> Arc<[Authorization]> { Arc::new([Authorization { authentication: Authentication::Unauthenticated, @@ -35,11 +41,41 @@ fn allow(protocol: Protocol) -> AllowPolicy { kind: "server".into(), name: "testsrv".into(), }), + local_rate_limit: Arc::new(Default::default()), }, ); allow } +macro_rules! assert_contains_metric { + ($registry:expr, $metric:expr, $value:expr) => {{ + let mut buf = String::new(); + prom::encoding::text::encode_registry(&mut buf, $registry).expect("encode registry failed"); + let lines = buf.split_terminator('\n').collect::>(); + assert_eq!( + lines.iter().find(|l| l.starts_with($metric)), + Some(&&*format!("{} {}", $metric, $value)), + "metric '{}' not found in:\n{:?}", + $metric, + buf + ); + }}; +} + +macro_rules! assert_not_contains_metric { + ($registry:expr, $pattern:expr) => {{ + let mut buf = String::new(); + prom::encoding::text::encode_registry(&mut buf, $registry).expect("encode registry failed"); + let lines = buf.split_terminator('\n').collect::>(); + assert!( + !lines.iter().any(|l| l.starts_with($pattern)), + "metric '{}' found in:\n{:?}", + $pattern, + buf + ); + }}; +} + #[tokio::test(flavor = "current_thread")] async fn detect_tls_opaque() { let _trace = trace::test::trace_init(); @@ -76,14 +112,21 @@ async fn detect_http_non_http() { let (ior, mut iow) = io::duplex(100); iow.write_all(NOT_HTTP).await.unwrap(); + let mut registry = prom::Registry::default(); inbound() .with_stack(new_panic("http stack must not be used")) - .push_detect_http(new_ok()) + .push_detect_http(super::HttpDetectMetrics::register(&mut registry), new_ok()) .into_inner() .new_service(target) .oneshot(ior) .await .expect("should succeed"); + + assert_contains_metric!(®istry, RESULTS_NOT_HTTP, 1); + assert_contains_metric!(®istry, RESULTS_HTTP1, 0); + assert_contains_metric!(®istry, RESULTS_HTTP2, 0); + assert_contains_metric!(®istry, RESULTS_READ_TIMEOUT, 0); + assert_contains_metric!(®istry, RESULTS_ERROR, 0); } #[tokio::test(flavor = "current_thread")] @@ -107,14 +150,24 @@ async fn detect_http() { let (ior, mut iow) = io::duplex(100); iow.write_all(HTTP1).await.unwrap(); + let mut registry = prom::Registry::default(); inbound() .with_stack(new_ok()) - .push_detect_http(new_panic("tcp stack must not be used")) + .push_detect_http( + super::HttpDetectMetrics::register(&mut registry), + new_panic("tcp stack must not be used"), + ) .into_inner() .new_service(target) .oneshot(ior) .await .expect("should succeed"); + + assert_contains_metric!(®istry, RESULTS_NOT_HTTP, 0); + assert_contains_metric!(®istry, RESULTS_HTTP1, 1); + assert_contains_metric!(®istry, RESULTS_HTTP2, 0); + assert_contains_metric!(®istry, RESULTS_READ_TIMEOUT, 0); + assert_contains_metric!(®istry, RESULTS_ERROR, 0); } #[tokio::test(flavor = "current_thread")] @@ -133,14 +186,24 @@ async fn hinted_http1() { let (ior, mut iow) = io::duplex(100); iow.write_all(HTTP1).await.unwrap(); + let mut registry = prom::Registry::default(); inbound() .with_stack(new_ok()) - .push_detect_http(new_panic("tcp stack must not be used")) + .push_detect_http( + super::HttpDetectMetrics::register(&mut registry), + new_panic("tcp stack must not be used"), + ) .into_inner() .new_service(target) .oneshot(ior) .await .expect("should succeed"); + + assert_contains_metric!(®istry, RESULTS_NOT_HTTP, 0); + assert_contains_metric!(®istry, RESULTS_HTTP1, 1); + assert_contains_metric!(®istry, RESULTS_HTTP2, 0); + assert_contains_metric!(®istry, RESULTS_READ_TIMEOUT, 0); + assert_contains_metric!(®istry, RESULTS_ERROR, 0); } #[tokio::test(flavor = "current_thread")] @@ -159,14 +222,24 @@ async fn hinted_http1_supports_http2() { let (ior, mut iow) = io::duplex(100); iow.write_all(HTTP2).await.unwrap(); + let mut registry = prom::Registry::default(); inbound() .with_stack(new_ok()) - .push_detect_http(new_panic("tcp stack must not be used")) + .push_detect_http( + super::HttpDetectMetrics::register(&mut registry), + new_panic("tcp stack must not be used"), + ) .into_inner() .new_service(target) .oneshot(ior) .await .expect("should succeed"); + + assert_contains_metric!(®istry, RESULTS_NOT_HTTP, 0); + assert_contains_metric!(®istry, RESULTS_HTTP1, 0); + assert_contains_metric!(®istry, RESULTS_HTTP2, 1); + assert_contains_metric!(®istry, RESULTS_READ_TIMEOUT, 0); + assert_contains_metric!(®istry, RESULTS_ERROR, 0); } #[tokio::test(flavor = "current_thread")] @@ -184,14 +257,25 @@ async fn hinted_http2() { let (ior, _) = io::duplex(100); + let mut registry = prom::Registry::default(); inbound() .with_stack(new_ok()) - .push_detect_http(new_panic("tcp stack must not be used")) + .push_detect_http( + super::HttpDetectMetrics::register(&mut registry), + new_panic("tcp stack must not be used"), + ) .into_inner() .new_service(target) .oneshot(ior) .await .expect("should succeed"); + + // No detection is performed when HTTP/2 is hinted, so no metrics are recorded. + assert_not_contains_metric!(®istry, RESULTS_NOT_HTTP); + assert_not_contains_metric!(®istry, RESULTS_HTTP1); + assert_not_contains_metric!(®istry, RESULTS_HTTP2); + assert_not_contains_metric!(®istry, RESULTS_READ_TIMEOUT); + assert_not_contains_metric!(®istry, RESULTS_ERROR); } fn client_id() -> tls::ClientId { @@ -209,7 +293,11 @@ fn orig_dst_addr() -> OrigDstAddr { } fn inbound() -> Inbound<()> { - Inbound::new(test_util::default_config(), test_util::runtime().0) + Inbound::new( + test_util::default_config(), + test_util::runtime().0, + &mut Default::default(), + ) } fn new_panic(msg: &'static str) -> svc::ArcNewTcp { diff --git a/linkerd/app/inbound/src/direct.rs b/linkerd/app/inbound/src/direct.rs index 5025556ffd..5d0b7f22e6 100644 --- a/linkerd/app/inbound/src/direct.rs +++ b/linkerd/app/inbound/src/direct.rs @@ -15,6 +15,10 @@ use std::fmt::Debug; use thiserror::Error; use tracing::{debug_span, info_span}; +mod metrics; + +pub use self::metrics::MetricsFamilies; + /// Creates I/O errors when a connection cannot be forwarded because no transport /// header was present. #[derive(Debug, Default)] @@ -25,8 +29,8 @@ struct RefusedNoHeader; pub struct RefusedNoIdentity(()); #[derive(Debug, Error)] -#[error("a named target must be provided on gateway connections")] -struct RefusedNoTarget; +#[error("direct connections require transport header negotiation")] +struct TransportHeaderRequired(()); #[derive(Debug, Clone)] pub(crate) struct LocalTcp { @@ -93,7 +97,7 @@ impl Inbound { self, policies: impl policy::GetPolicy + Clone + Send + Sync + 'static, gateway: svc::ArcNewTcp>, - http: svc::ArcNewTcp>>, + http: svc::ArcNewTcp>>>, ) -> Inbound> where T: Param> + Param, @@ -108,6 +112,7 @@ impl Inbound { { self.map_stack(|config, rt, inner| { let detect_timeout = config.proxy.detect_protocol_timeout; + let metrics = rt.metrics.direct.clone(); let identity = rt .identity @@ -135,7 +140,14 @@ impl Inbound { // forwarding, or we may be processing an HTTP gateway connection. HTTP gateway // connections that have a transport header must provide a target name as a part of // the header. - .push_switch(Ok::, http) + .push_switch( + Ok::, + svc::stack(http) + .push(transport::metrics::NewServer::layer( + rt.metrics.proxy.transport.clone(), + )) + .into_inner(), + ) .push_switch( { let policies = policies.clone(); @@ -145,14 +157,14 @@ impl Inbound { port, name: None, protocol, - } => Ok(svc::Either::A({ + } => Ok(svc::Either::Left({ // When the transport header targets an alternate port (but does // not identify an alternate target name), we check the new // target's policy (rather than the inbound proxy's address). let addr = (client.local_addr.ip(), port).into(); let policy = policies.get_policy(OrigDstAddr(addr)); match protocol { - None => svc::Either::A(LocalTcp { + None => svc::Either::Left(LocalTcp { server_addr: Remote(ServerAddr(addr)), client_addr: client.client_addr, client_id: client.client_id, @@ -162,7 +174,7 @@ impl Inbound { // When TransportHeader includes the protocol, but does not // include an alternate name we go through the Inbound HTTP // stack. - svc::Either::B(LocalHttp { + svc::Either::Right(LocalHttp { addr: Remote(ServerAddr(addr)), policy, protocol, @@ -176,7 +188,7 @@ impl Inbound { port, name: Some(name), protocol, - } => Ok(svc::Either::B({ + } => Ok(svc::Either::Right({ // When the transport header provides an alternate target, the // connection is a gateway connection. We check the _gateway // address's_ policy (rather than the target address). @@ -204,6 +216,7 @@ impl Inbound { ) .check_new_service::<(TransportHeader, ClientInfo), _>() // Use ALPN to determine whether a transport header should be read. + .push(metrics::NewRecord::layer(metrics)) .push(svc::ArcNewService::layer()) .push(NewTransportHeaderServer::layer(detect_timeout)) .check_new_service::() @@ -215,7 +228,7 @@ impl Inbound { if client.header_negotiated() { Ok(client) } else { - Err(RefusedNoTarget.into()) + Err(TransportHeaderRequired(()).into()) } }) .push(svc::ArcNewService::layer()) @@ -347,11 +360,11 @@ impl svc::Param for LocalHttp { } } -impl svc::Param for LocalHttp { - fn param(&self) -> http::Version { +impl svc::Param for LocalHttp { + fn param(&self) -> http::Variant { match self.protocol { - SessionProtocol::Http1 => http::Version::Http1, - SessionProtocol::Http2 => http::Version::H2, + SessionProtocol::Http1 => http::Variant::Http1, + SessionProtocol::Http2 => http::Variant::H2, } } } diff --git a/linkerd/app/inbound/src/direct/metrics.rs b/linkerd/app/inbound/src/direct/metrics.rs new file mode 100644 index 0000000000..5730c094cd --- /dev/null +++ b/linkerd/app/inbound/src/direct/metrics.rs @@ -0,0 +1,91 @@ +use super::ClientInfo; +use linkerd_app_core::{ + metrics::prom::{self, EncodeLabelSetMut}, + svc, tls, + transport_header::{SessionProtocol, TransportHeader}, +}; + +#[cfg(test)] +mod tests; + +#[derive(Clone, Debug)] +pub struct NewRecord { + inner: N, + metrics: MetricsFamilies, +} + +#[derive(Clone, Debug, Default)] +pub struct MetricsFamilies { + connections: prom::Family, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct Labels { + header: TransportHeader, + client_id: tls::ClientId, +} + +impl MetricsFamilies { + pub fn register(reg: &mut prom::Registry) -> Self { + let connections = prom::Family::default(); + reg.register( + "connections", + "TCP connections with transport headers", + connections.clone(), + ); + + Self { connections } + } +} + +impl NewRecord { + pub fn layer(metrics: MetricsFamilies) -> impl svc::layer::Layer + Clone { + svc::layer::mk(move |inner| Self { + inner, + metrics: metrics.clone(), + }) + } +} + +impl svc::NewService<(TransportHeader, ClientInfo)> for NewRecord +where + N: svc::NewService<(TransportHeader, ClientInfo)>, +{ + type Service = N::Service; + + fn new_service(&self, (header, client): (TransportHeader, ClientInfo)) -> Self::Service { + self.metrics + .connections + .get_or_create(&Labels { + header: header.clone(), + client_id: client.client_id.clone(), + }) + .inc(); + + self.inner.new_service((header, client)) + } +} + +impl prom::EncodeLabelSetMut for Labels { + fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::encoding::EncodeLabel; + ( + "session_protocol", + self.header.protocol.as_ref().map(|p| match p { + SessionProtocol::Http1 => "http/1", + SessionProtocol::Http2 => "http/2", + }), + ) + .encode(enc.encode_label())?; + ("target_port", self.header.port).encode(enc.encode_label())?; + ("target_name", self.header.name.as_deref()).encode(enc.encode_label())?; + ("client_id", self.client_id.to_str()).encode(enc.encode_label())?; + Ok(()) + } +} + +impl prom::encoding::EncodeLabelSet for Labels { + fn encode(&self, mut enc: prom::encoding::LabelSetEncoder<'_>) -> Result<(), std::fmt::Error> { + self.encode_label_set(&mut enc) + } +} diff --git a/linkerd/app/inbound/src/direct/metrics/tests.rs b/linkerd/app/inbound/src/direct/metrics/tests.rs new file mode 100644 index 0000000000..163b6e11ab --- /dev/null +++ b/linkerd/app/inbound/src/direct/metrics/tests.rs @@ -0,0 +1,115 @@ +use super::*; +use crate::direct::ClientInfo; +use futures::future; +use linkerd_app_core::{ + io, + metrics::prom, + svc, tls, + transport::addrs::{ClientAddr, OrigDstAddr, Remote}, + transport_header::{SessionProtocol, TransportHeader}, + Error, +}; +use std::str::FromStr; + +fn new_ok() -> svc::ArcNewTcp { + svc::ArcNewService::new(|_| svc::BoxService::new(svc::mk(|_| future::ok::<(), Error>(())))) +} + +macro_rules! assert_counted { + ($registry:expr, $proto:expr, $port:expr, $name:expr, $value:expr) => {{ + let mut buf = String::new(); + prom::encoding::text::encode_registry(&mut buf, $registry).expect("encode registry failed"); + let metric = format!("connections_total{{session_protocol=\"{}\",target_port=\"{}\",target_name=\"{}\",client_id=\"test.client\"}}", $proto, $port, $name); + assert_eq!( + buf.split_terminator('\n') + .find(|l| l.starts_with(&*metric)), + Some(&*format!("{metric} {}", $value)), + "metric '{metric}' not found in:\n{buf}" + ); + }}; +} + +// Added helper to setup and run the test +fn run_metric_test(header: TransportHeader) -> prom::Registry { + let mut registry = prom::Registry::default(); + let families = MetricsFamilies::register(&mut registry); + let new_record = svc::layer::Layer::layer(&NewRecord::layer(families.clone()), new_ok()); + // common client info + let client_id = tls::ClientId::from_str("test.client").unwrap(); + let client_addr = Remote(ClientAddr(([127, 0, 0, 1], 40000).into())); + let local_addr = OrigDstAddr(([127, 0, 0, 1], 4143).into()); + let client_info = ClientInfo { + client_id: client_id.clone(), + alpn: Some(tls::NegotiatedProtocol("transport.l5d.io/v1".into())), + client_addr, + local_addr, + }; + let _svc = svc::NewService::new_service(&new_record, (header.clone(), client_info.clone())); + registry +} + +#[test] +fn records_metrics_http1_local() { + let header = TransportHeader { + port: 8080, + name: None, + protocol: Some(SessionProtocol::Http1), + }; + let registry = run_metric_test(header); + assert_counted!(®istry, "http/1", 8080, "", 1); +} + +#[test] +fn records_metrics_http2_local() { + let header = TransportHeader { + port: 8081, + name: None, + protocol: Some(SessionProtocol::Http2), + }; + let registry = run_metric_test(header); + assert_counted!(®istry, "http/2", 8081, "", 1); +} + +#[test] +fn records_metrics_opaq_local() { + let header = TransportHeader { + port: 8082, + name: None, + protocol: None, + }; + let registry = run_metric_test(header); + assert_counted!(®istry, "", 8082, "", 1); +} + +#[test] +fn records_metrics_http1_gateway() { + let header = TransportHeader { + port: 8080, + name: Some("mysvc.myns.svc.cluster.local".parse().unwrap()), + protocol: Some(SessionProtocol::Http1), + }; + let registry = run_metric_test(header); + assert_counted!(®istry, "http/1", 8080, "mysvc.myns.svc.cluster.local", 1); +} + +#[test] +fn records_metrics_http2_gateway() { + let header = TransportHeader { + port: 8081, + name: Some("mysvc.myns.svc.cluster.local".parse().unwrap()), + protocol: Some(SessionProtocol::Http2), + }; + let registry = run_metric_test(header); + assert_counted!(®istry, "http/2", 8081, "mysvc.myns.svc.cluster.local", 1); +} + +#[test] +fn records_metrics_opaq_gateway() { + let header = TransportHeader { + port: 8082, + name: Some("mysvc.myns.svc.cluster.local".parse().unwrap()), + protocol: None, + }; + let registry = run_metric_test(header); + assert_counted!(®istry, "", 8082, "mysvc.myns.svc.cluster.local", 1); +} diff --git a/linkerd/app/inbound/src/http.rs b/linkerd/app/inbound/src/http.rs index 395bfbea00..d896d44b3e 100644 --- a/linkerd/app/inbound/src/http.rs +++ b/linkerd/app/inbound/src/http.rs @@ -18,7 +18,7 @@ pub mod fuzz { test_util::{support::connect::Connect, *}, Config, Inbound, }; - use hyper::{client::conn::Builder as ClientBuilder, Body, Request, Response}; + use hyper::{Body, Request, Response}; use libfuzzer_sys::arbitrary::Arbitrary; use linkerd_app_core::{ identity, io, @@ -41,9 +41,8 @@ pub mod fuzz { } pub async fn fuzz_entry_raw(requests: Vec) { - let mut server = hyper::server::conn::Http::new(); - server.http1_only(true); - let mut client = ClientBuilder::new(); + let server = hyper::server::conn::http1::Builder::new(); + let mut client = hyper::client::conn::http1::Builder::new(); let connect = support::connect().endpoint_fn_boxed(Target::addr(), hello_fuzz_server(server)); let profiles = profile::resolver(); @@ -55,7 +54,7 @@ pub mod fuzz { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_fuzz_server(cfg, rt, profiles, connect).new_service(Target::HTTP1); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; // Now send all of the requests for inp in requests.iter() { @@ -74,7 +73,7 @@ pub mod fuzz { .header(header_name, header_value) .body(Body::default()) { - let rsp = http_util::http_request(&mut client, req).await; + let rsp = client.send_request(req).await; tracing::info!(?rsp); if let Ok(rsp) = rsp { let body = http_util::body_to_string(rsp.into_body()).await; @@ -86,18 +85,18 @@ pub mod fuzz { } } - drop(client); // It's okay if the background task returns an error, as this would // indicate that the proxy closed the connection --- which it will do on // invalid inputs. We want to ensure that the proxy doesn't crash in the // face of these inputs, and the background task will panic in this // case. - let res = bg.await; + drop(client); + let res = bg.join_all().await; tracing::info!(?res, "background tasks completed") } fn hello_fuzz_server( - http: hyper::server::conn::Http, + http: hyper::server::conn::http1::Builder, ) -> impl Fn(Remote) -> io::Result { move |_endpoint| { let (client_io, server_io) = support::io::duplex(4096); @@ -163,12 +162,12 @@ pub mod fuzz { } #[derive(Clone, Debug)] - struct Target(http::Version); + struct Target(http::Variant); // === impl Target === impl Target { - const HTTP1: Self = Self(http::Version::Http1); + const HTTP1: Self = Self(http::Variant::Http1); fn addr() -> SocketAddr { ([127, 0, 0, 1], 80).into() @@ -193,8 +192,8 @@ pub mod fuzz { } } - impl svc::Param for Target { - fn param(&self) -> http::Version { + impl svc::Param for Target { + fn param(&self) -> http::Variant { self.0 } } @@ -228,6 +227,9 @@ pub mod fuzz { kind: "server".into(), name: "testsrv".into(), }), + local_rate_limit: Arc::new( + linkerd_proxy_server_policy::LocalRateLimit::default(), + ), }, ); policy @@ -236,11 +238,14 @@ pub mod fuzz { impl svc::Param for Target { fn param(&self) -> policy::ServerLabel { - policy::ServerLabel(Arc::new(policy::Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "testsrv".into(), - })) + policy::ServerLabel( + Arc::new(policy::Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "testsrv".into(), + }), + 1000, + ) } } diff --git a/linkerd/app/inbound/src/http/router.rs b/linkerd/app/inbound/src/http/router.rs index c431a33378..ec38be71f5 100644 --- a/linkerd/app/inbound/src/http/router.rs +++ b/linkerd/app/inbound/src/http/router.rs @@ -33,7 +33,7 @@ struct Logical { /// The request's logical destination. Used for profile discovery. logical: Option, addr: Remote, - http: http::Version, + http: http::Variant, tls: tls::ConditionalServerTls, permit: policy::HttpRoutePermit, labels: tap::Labels, @@ -69,7 +69,7 @@ struct LogicalError { impl Inbound { pub(crate) fn push_http_router(self, profiles: P) -> Inbound> where - T: Param + T: Param + Param> + Param> + Param @@ -83,6 +83,7 @@ impl Inbound { { self.map_stack(|config, rt, connect| { let allow_profile = config.allow_discovery.clone(); + let unsafe_authority_labels = config.unsafe_authority_labels; let h1_params = config.proxy.connect.http1; let h2_params = config.proxy.connect.http2.clone(); @@ -105,8 +106,8 @@ impl Inbound { addr: t.addr, permit: t.permit, params: match t.http { - http::Version::Http1 => http::client::Params::Http1(h1_params), - http::Version::H2 => http::client::Params::H2(h2_params.clone()) + http::Variant::Http1 => http::client::Params::Http1(h1_params), + http::Variant::H2 => http::client::Params::H2(h2_params.clone()) }, } }) @@ -122,7 +123,9 @@ impl Inbound { rt.metrics .proxy .http_endpoint - .to_layer::(), + .to_layer_via::( + endpoint_labels(unsafe_authority_labels), + ), ) .push_on_service(http_tracing::client(rt.span_sink.clone(), super::trace_labels())) .push_on_service(http::BoxResponse::layer()) @@ -163,14 +166,14 @@ impl Inbound { |(rx, logical): (Option, Logical)| -> Result<_, Infallible> { if let Some(rx) = rx { if let Some(addr) = rx.logical_addr() { - return Ok(svc::Either::A(Profile { + return Ok(svc::Either::Left(Profile { addr, logical, profiles: rx, })); } } - Ok(svc::Either::B(logical)) + Ok(svc::Either::Right(logical)) }, http.clone().into_inner(), ) @@ -189,7 +192,7 @@ impl Inbound { // discovery (so that we skip the profile stack above). let addr = match logical.logical.clone() { Some(addr) => addr, - None => return Ok(svc::Either::B((None, logical))), + None => return Ok(svc::Either::Right((None, logical))), }; if !allow_profile.matches(addr.name()) { tracing::debug!( @@ -197,9 +200,9 @@ impl Inbound { suffixes = %allow_profile, "Skipping discovery, address not in configured DNS suffixes", ); - return Ok(svc::Either::B((None, logical))); + return Ok(svc::Either::Right((None, logical))); } - Ok(svc::Either::A(logical)) + Ok(svc::Either::Left(logical)) }, router .check_new_service::<(Option, Logical), http::Request<_>>() @@ -387,13 +390,17 @@ impl Param for Logical { } } -impl Param for Logical { - fn param(&self) -> metrics::EndpointLabels { +fn endpoint_labels( + unsafe_authority_labels: bool, +) -> impl svc::ExtractParam + Clone { + move |t: &Logical| -> metrics::EndpointLabels { metrics::InboundEndpointLabels { - tls: self.tls.clone(), - authority: self.logical.as_ref().map(|d| d.as_http_authority()), - target_addr: self.addr.into(), - policy: self.permit.labels.clone(), + tls: t.tls.clone(), + authority: unsafe_authority_labels + .then(|| t.logical.as_ref().map(|d| d.as_http_authority())) + .flatten(), + target_addr: t.addr.into(), + policy: t.permit.labels.clone(), } .into() } diff --git a/linkerd/app/inbound/src/http/server.rs b/linkerd/app/inbound/src/http/server.rs index 06149d2be8..6316e3ea06 100644 --- a/linkerd/app/inbound/src/http/server.rs +++ b/linkerd/app/inbound/src/http/server.rs @@ -1,6 +1,6 @@ use super::set_identity_header::NewSetIdentityHeader; use crate::{policy, Inbound}; -pub use linkerd_app_core::proxy::http::{normalize_uri, Version}; +pub use linkerd_app_core::proxy::http::{normalize_uri, Variant}; use linkerd_app_core::{ config::ProxyConfig, errors, http_tracing, io, @@ -31,7 +31,7 @@ impl Inbound { pub fn push_http_server(self) -> Inbound> where // Connection target. - T: Param + T: Param + Param + Param + Param @@ -95,7 +95,7 @@ impl Inbound { pub fn push_http_tcp_server(self) -> Inbound> where // Connection target. - T: Param, + T: Param, T: Clone + Send + Unpin + 'static, // Server-side socket. I: io::AsyncRead + io::AsyncWrite + io::PeerAddr + Send + Unpin + 'static, @@ -203,6 +203,10 @@ impl errors::HttpRescue for ServerRescue { )); } + if errors::is_caused_by::(&*error) { + return Ok(errors::SyntheticHttpResponse::rate_limited(error)); + } + if errors::is_caused_by::(&*error) { return Ok(errors::SyntheticHttpResponse::not_found(error)); } diff --git a/linkerd/app/inbound/src/http/tests.rs b/linkerd/app/inbound/src/http/tests.rs index f5c9351240..aeda68b4a0 100644 --- a/linkerd/app/inbound/src/http/tests.rs +++ b/linkerd/app/inbound/src/http/tests.rs @@ -6,21 +6,22 @@ use crate::{ }, Config, Inbound, }; -use hyper::{body::HttpBody, client::conn::Builder as ClientBuilder, Body, Request, Response}; +use hyper::{Request, Response}; use linkerd_app_core::{ classify, - errors::respond::L5D_PROXY_ERROR, + errors::header::L5D_PROXY_ERROR, identity, io, metrics, - proxy::http, - svc::{self, NewService, Param}, + proxy::http::{self, BoxBody}, + svc::{self, http::TokioExecutor, NewService, Param}, tls, transport::{ClientAddr, OrigDstAddr, Remote, ServerAddr}, - NameAddr, ProxyRuntime, + Error, NameAddr, ProxyRuntime, }; use linkerd_app_test::connect::ConnectFuture; use linkerd_tracing::test::trace_init; use std::{net::SocketAddr, sync::Arc}; use tokio::time; +use tower::ServiceExt; use tracing::Instrument; fn build_server( @@ -32,7 +33,7 @@ fn build_server( where I: io::AsyncRead + io::AsyncWrite + io::PeerAddr + Send + Unpin + 'static, { - Inbound::new(cfg, rt) + Inbound::new(cfg, rt, &mut Default::default()) .with_stack(connect) .map_stack(|cfg, _, s| { s.push_map_target(|t| Param::>::param(&t)) @@ -46,9 +47,10 @@ where #[tokio::test(flavor = "current_thread")] async fn unmeshed_http1_hello_world() { - let mut server = hyper::server::conn::Http::new(); - server.http1_only(true); - let mut client = ClientBuilder::new(); + let mut server = hyper::server::conn::http1::Builder::new(); + server.timer(hyper_util::rt::TokioTimer::new()); + let mut client = hyper::client::conn::http1::Builder::new(); + let _trace = trace_init(); // Build a mock "connector" that returns the upstream "server" IO. @@ -63,29 +65,38 @@ async fn unmeshed_http1_hello_world() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_HTTP1); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let rsp = http_util::http_request(&mut client, req).await.unwrap(); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); assert_eq!(rsp.status(), http::StatusCode::OK); let body = http_util::body_to_string(rsp.into_body()).await.unwrap(); assert_eq!(body, "Hello world!"); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] async fn downgrade_origin_form() { // Reproduces https://github.com/linkerd/linkerd2/issues/5298 - let mut server = hyper::server::conn::Http::new(); - server.http1_only(true); - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut server = hyper::server::conn::http1::Builder::new(); + server.timer(hyper_util::rt::TokioTimer::new()); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); let _trace = trace_init(); // Build a mock "connector" that returns the upstream "server" IO. @@ -100,30 +111,67 @@ async fn downgrade_origin_form() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_H2); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = { + tracing::info!(settings = ?client, "connecting client with"); + let (client_io, server_io) = io::duplex(4096); + + let (client, conn) = client + .handshake(hyper_util::rt::TokioIo::new(client_io)) + .await + .expect("Client must connect"); + + let mut bg = tokio::task::JoinSet::new(); + bg.spawn( + async move { + server.oneshot(server_io).await?; + tracing::info!("proxy serve task complete"); + Ok(()) + } + .instrument(tracing::info_span!("proxy")), + ); + bg.spawn( + async move { + conn.await?; + tracing::info!("client background complete"); + Ok(()) + } + .instrument(tracing::info_span!("client_bg")), + ); + + (client, bg) + }; let req = Request::builder() .method(http::Method::GET) .uri("/") .header(http::header::HOST, "foo.svc.cluster.local") .header("l5d-orig-proto", "HTTP/1.1") - .body(Body::default()) + .body(BoxBody::empty()) .unwrap(); - let rsp = http_util::http_request(&mut client, req).await.unwrap(); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); assert_eq!(rsp.status(), http::StatusCode::OK); let body = http_util::body_to_string(rsp.into_body()).await.unwrap(); assert_eq!(body, "Hello world!"); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] async fn downgrade_absolute_form() { - let mut server = hyper::server::conn::Http::new(); - server.http1_only(true); - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); + let mut server = hyper::server::conn::http1::Builder::new(); + server.timer(hyper_util::rt::TokioTimer::new()); let _trace = trace_init(); // Build a mock "connector" that returns the upstream "server" IO. @@ -138,22 +186,60 @@ async fn downgrade_absolute_form() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_H2); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + + let (mut client, bg) = { + tracing::info!(settings = ?client, "connecting client with"); + let (client_io, server_io) = io::duplex(4096); + + let (client, conn) = client + .handshake(hyper_util::rt::TokioIo::new(client_io)) + .await + .expect("Client must connect"); + + let mut bg = tokio::task::JoinSet::new(); + bg.spawn( + async move { + server.oneshot(server_io).await?; + tracing::info!("proxy serve task complete"); + Ok(()) + } + .instrument(tracing::info_span!("proxy")), + ); + bg.spawn( + async move { + conn.await?; + tracing::info!("client background complete"); + Ok(()) + } + .instrument(tracing::info_span!("client_bg")), + ); + + (client, bg) + }; let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550/") .header(http::header::HOST, "foo.svc.cluster.local") .header("l5d-orig-proto", "HTTP/1.1; absolute-form") - .body(Body::default()) + .body(BoxBody::empty()) .unwrap(); - let rsp = http_util::http_request(&mut client, req).await.unwrap(); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); assert_eq!(rsp.status(), http::StatusCode::OK); let body = http_util::body_to_string(rsp.into_body()).await.unwrap(); assert_eq!(body, "Hello world!"); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] @@ -165,7 +251,7 @@ async fn http1_bad_gateway_meshed_response_error_header() { // Build a client using the connect that always errors so that responses // are BAD_GATEWAY. - let mut client = ClientBuilder::new(); + let mut client = hyper::client::conn::http1::Builder::new(); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -173,25 +259,34 @@ async fn http1_bad_gateway_meshed_response_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::meshed_http1()); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; // Send a request and assert that it is a BAD_GATEWAY with the expected // header message. let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::BAD_GATEWAY); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::BAD_GATEWAY); // NOTE: this does not include a stack error context for that endpoint // because we don't build a real HTTP endpoint stack, which adds error // context to this error, and the client rescue layer is below where the // logical error context is added. - check_error_header(response.headers(), "server is not listening"); + check_error_header(rsp.headers(), "client error (Connect)"); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] @@ -203,7 +298,7 @@ async fn http1_bad_gateway_unmeshed_response() { // Build a client using the connect that always errors so that responses // are BAD_GATEWAY. - let mut client = ClientBuilder::new(); + let mut client = hyper::client::conn::http1::Builder::new(); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -211,24 +306,33 @@ async fn http1_bad_gateway_unmeshed_response() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_HTTP1); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; // Send a request and assert that it is a BAD_GATEWAY with the expected // header message. let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::BAD_GATEWAY); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::BAD_GATEWAY); assert!( - response.headers().get(L5D_PROXY_ERROR).is_none(), + rsp.headers().get(L5D_PROXY_ERROR).is_none(), "response must not contain L5D_PROXY_ERROR header" ); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] @@ -238,12 +342,11 @@ async fn http1_connect_timeout_meshed_response_error_header() { // Build a mock connect that sleeps longer than the default inbound // connect timeout. - let server = hyper::server::conn::Http::new(); - let connect = support::connect().endpoint(Target::addr(), connect_timeout(server)); + let connect = support::connect().endpoint(Target::addr(), connect_timeout()); // Build a client using the connect that always sleeps so that responses // are GATEWAY_TIMEOUT. - let mut client = ClientBuilder::new(); + let mut client = hyper::client::conn::http1::Builder::new(); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -251,26 +354,35 @@ async fn http1_connect_timeout_meshed_response_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::meshed_http1()); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; // Send a request and assert that it is a GATEWAY_TIMEOUT with the // expected header message. let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::GATEWAY_TIMEOUT); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::GATEWAY_TIMEOUT); // NOTE: this does not include a stack error context for that endpoint // because we don't build a real HTTP endpoint stack, which adds error // context to this error, and the client rescue layer is below where the // logical error context is added. - check_error_header(response.headers(), "connect timed out after 1s"); + check_error_header(rsp.headers(), "client error (Connect)"); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] @@ -280,12 +392,11 @@ async fn http1_connect_timeout_unmeshed_response_error_header() { // Build a mock connect that sleeps longer than the default inbound // connect timeout. - let server = hyper::server::conn::Http::new(); - let connect = support::connect().endpoint(Target::addr(), connect_timeout(server)); + let connect = support::connect().endpoint(Target::addr(), connect_timeout()); // Build a client using the connect that always sleeps so that responses // are GATEWAY_TIMEOUT. - let mut client = ClientBuilder::new(); + let mut client = hyper::client::conn::http1::Builder::new(); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -293,24 +404,33 @@ async fn http1_connect_timeout_unmeshed_response_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_HTTP1); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; // Send a request and assert that it is a GATEWAY_TIMEOUT with the // expected header message. let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::empty()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::GATEWAY_TIMEOUT); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::GATEWAY_TIMEOUT); assert!( - response.headers().get(L5D_PROXY_ERROR).is_none(), + rsp.headers().get(L5D_PROXY_ERROR).is_none(), "response must not contain L5D_PROXY_ERROR header" ); + // Wait for all of the background tasks to complete, panicking if any returned an error. drop(client); - bg.await.expect("background task failed"); + bg.join_all() + .await + .into_iter() + .collect::, Error>>() + .expect("background task failed"); } #[tokio::test(flavor = "current_thread")] @@ -321,8 +441,8 @@ async fn h2_response_meshed_error_header() { let connect = support::connect().endpoint_fn_boxed(Target::addr(), connect_error()); // Build a client using the connect that always errors. - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -330,25 +450,28 @@ async fn h2_response_meshed_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::meshed_h2()); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http2(&mut client, server).await; // Send a request and assert that it is SERVICE_UNAVAILABLE with the // expected header message. let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::empty()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::GATEWAY_TIMEOUT); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::GATEWAY_TIMEOUT); - check_error_header(response.headers(), "service in fail-fast"); + check_error_header(rsp.headers(), "service in fail-fast"); // Drop the client and discard the result of awaiting the proxy background // task. The result is discarded because it hits an error that is related // to the mock implementation and has no significance to the test. - drop(client); - let _ = bg.await; + let _ = bg.join_all().await; } #[tokio::test(flavor = "current_thread")] @@ -359,8 +482,8 @@ async fn h2_response_unmeshed_error_header() { let connect = support::connect().endpoint_fn_boxed(Target::addr(), connect_error()); // Build a client using the connect that always errors. - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -368,27 +491,30 @@ async fn h2_response_unmeshed_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_H2); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http2(&mut client, server).await; // Send a request and assert that it is SERVICE_UNAVAILABLE with the // expected header message. let req = Request::builder() .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::GATEWAY_TIMEOUT); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::GATEWAY_TIMEOUT); assert!( - response.headers().get(L5D_PROXY_ERROR).is_none(), + rsp.headers().get(L5D_PROXY_ERROR).is_none(), "response must not contain L5D_PROXY_ERROR header" ); // Drop the client and discard the result of awaiting the proxy background // task. The result is discarded because it hits an error that is related // to the mock implementation and has no significance to the test. - drop(client); - let _ = bg.await; + let _ = bg.join_all().await; } #[tokio::test(flavor = "current_thread")] @@ -399,8 +525,8 @@ async fn grpc_meshed_response_error_header() { let connect = support::connect().endpoint_fn_boxed(Target::addr(), connect_error()); // Build a client using the connect that always errors. - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -408,7 +534,7 @@ async fn grpc_meshed_response_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::meshed_h2()); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http2(&mut client, server).await; // Send a request and assert that it is OK with the expected header // message. @@ -416,18 +542,21 @@ async fn grpc_meshed_response_error_header() { .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") .header(http::header::CONTENT_TYPE, "application/grpc") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::OK); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::OK); - check_error_header(response.headers(), "service in fail-fast"); + check_error_header(rsp.headers(), "service in fail-fast"); // Drop the client and discard the result of awaiting the proxy background // task. The result is discarded because it hits an error that is related // to the mock implementation and has no significance to the test. - drop(client); - let _ = bg.await; + let _ = bg.join_all().await; } #[tokio::test(flavor = "current_thread")] @@ -438,8 +567,8 @@ async fn grpc_unmeshed_response_error_header() { let connect = support::connect().endpoint_fn_boxed(Target::addr(), connect_error()); // Build a client using the connect that always errors. - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -447,7 +576,7 @@ async fn grpc_unmeshed_response_error_header() { let cfg = default_config(); let (rt, _shutdown) = runtime(); let server = build_server(cfg, rt, profiles, connect).new_service(Target::UNMESHED_H2); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http2(&mut client, server).await; // Send a request and assert that it is OK with the expected header // message. @@ -455,20 +584,23 @@ async fn grpc_unmeshed_response_error_header() { .method(http::Method::GET) .uri("http://foo.svc.cluster.local:5550") .header(http::header::CONTENT_TYPE, "application/grpc") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::OK); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::OK); assert!( - response.headers().get(L5D_PROXY_ERROR).is_none(), + rsp.headers().get(L5D_PROXY_ERROR).is_none(), "response must not contain L5D_PROXY_ERROR header" ); // Drop the client and discard the result of awaiting the proxy background // task. The result is discarded because it hits an error that is related // to the mock implementation and has no significance to the test. - drop(client); - let _ = bg.await; + let _ = bg.join_all().await; } #[tokio::test(flavor = "current_thread")] @@ -477,8 +609,8 @@ async fn grpc_response_class() { // Build a mock connector serves a gRPC server that returns errors. let connect = { - let mut server = hyper::server::conn::Http::new(); - server.http2_only(true); + let mut server = hyper::server::conn::http2::Builder::new(TokioExecutor::new()); + server.timer(hyper_util::rt::TokioTimer::new()); support::connect().endpoint_fn_boxed( Target::addr(), grpc_status_server(server, tonic::Code::Unknown), @@ -486,8 +618,8 @@ async fn grpc_response_class() { }; // Build a client using the connect that always errors. - let mut client = ClientBuilder::new(); - client.http2_only(true); + let mut client = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + client.timer(hyper_util::rt::TokioTimer::new()); let profiles = profile::resolver(); let profile_tx = profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); @@ -500,7 +632,7 @@ async fn grpc_response_class() { .http_endpoint .into_report(time::Duration::from_secs(3600)); let server = build_server(cfg, rt, profiles, connect).new_service(Target::meshed_h2()); - let (mut client, bg) = http_util::connect_and_accept(&mut client, server).await; + let (mut client, bg) = http_util::connect_and_accept_http2(&mut client, server).await; // Send a request and assert that it is OK with the expected header // message. @@ -508,29 +640,43 @@ async fn grpc_response_class() { .method(http::Method::POST) .uri("http://foo.svc.cluster.local:5550") .header(http::header::CONTENT_TYPE, "application/grpc") - .body(Body::default()) + .body(BoxBody::default()) .unwrap(); - let mut response = http_util::http_request(&mut client, req).await.unwrap(); - assert_eq!(response.status(), http::StatusCode::OK); + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::OK); - response.body_mut().data().await; - let trls = response.body_mut().trailers().await.unwrap().unwrap(); + use http_body_util::BodyExt; + let mut body = rsp.into_body(); + let trls = body + .frame() + .await + .unwrap() + .unwrap() + .into_trailers() + .expect("trailers frame"); assert_eq!(trls.get("grpc-status").unwrap().to_str().unwrap(), "2"); let response_total = metrics .get_response_total( &metrics::EndpointLabels::Inbound(metrics::InboundEndpointLabels { tls: Target::meshed_h2().1, - authority: Some("foo.svc.cluster.local:5550".parse().unwrap()), + authority: None, target_addr: "127.0.0.1:80".parse().unwrap(), policy: metrics::RouteAuthzLabels { route: metrics::RouteLabels { - server: metrics::ServerLabel(Arc::new(policy::Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "testsrv".into(), - })), + server: metrics::ServerLabel( + Arc::new(policy::Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "testsrv".into(), + }), + 80, + ), route: policy::Meta::new_default("default"), }, authz: Arc::new(policy::Meta::Resource { @@ -546,24 +692,124 @@ async fn grpc_response_class() { .expect("response_total not found"); assert_eq!(response_total, 1.0); - drop((client, bg)); + drop(bg); +} + +#[tokio::test(flavor = "current_thread")] +async fn unsafe_authority_labels_true() { + let _trace = trace_init(); + + let mut cfg = default_config(); + cfg.unsafe_authority_labels = true; + test_unsafe_authority_labels(cfg, Some("foo.svc.cluster.local:5550".parse().unwrap())).await; +} + +#[tokio::test(flavor = "current_thread")] +async fn unsafe_authority_labels_false() { + let _trace = trace_init(); + + let cfg = default_config(); + test_unsafe_authority_labels(cfg, None).await; +} + +async fn test_unsafe_authority_labels( + cfg: Config, + expected_authority: Option, +) { + let connect = { + let mut server = hyper::server::conn::http1::Builder::new(); + server.timer(hyper_util::rt::TokioTimer::new()); + support::connect().endpoint_fn_boxed(Target::addr(), hello_server(server)) + }; + + // Build a client using the connect that always errors. + let mut client = hyper::client::conn::http1::Builder::new(); + let profiles = profile::resolver(); + let profile_tx = + profiles.profile_tx(NameAddr::from_str_and_port("foo.svc.cluster.local", 5550).unwrap()); + profile_tx.send(profile::Profile::default()).unwrap(); + + let (rt, _shutdown) = runtime(); + let metrics = rt + .metrics + .clone() + .http_endpoint + .into_report(time::Duration::from_secs(3600)); + let server = build_server(cfg, rt, profiles, connect).new_service(Target::meshed_http1()); + let (mut client, bg) = http_util::connect_and_accept_http1(&mut client, server).await; + + // Send a request and assert that it is OK with the expected header + // message. + let req = Request::builder() + .method(http::Method::POST) + .uri("http://foo.svc.cluster.local:5550") + .header(http::header::CONTENT_TYPE, "text/plain") + .body(BoxBody::default()) + .unwrap(); + + let rsp = client + .send_request(req) + .await + .expect("HTTP client request failed"); + tracing::info!(?rsp); + assert_eq!(rsp.status(), http::StatusCode::OK); + + use http_body_util::BodyExt; + let mut body = rsp.into_body(); + while let Some(Ok(_)) = body.frame().await {} + + tracing::info!("{metrics:#?}"); + let response_total = metrics + .get_response_total( + &metrics::EndpointLabels::Inbound(metrics::InboundEndpointLabels { + tls: Target::meshed_http1().1, + authority: expected_authority, + target_addr: "127.0.0.1:80".parse().unwrap(), + policy: metrics::RouteAuthzLabels { + route: metrics::RouteLabels { + server: metrics::ServerLabel( + Arc::new(policy::Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "testsrv".into(), + }), + 80, + ), + route: policy::Meta::new_default("default"), + }, + authz: Arc::new(policy::Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "serverauthorization".into(), + name: "testsaz".into(), + }), + }, + }), + Some(http::StatusCode::OK), + &classify::Class::Http(Ok(http::StatusCode::OK)), + ) + .expect("response_total not found"); + assert_eq!(response_total, 1.0); + + drop(bg); } #[tracing::instrument] fn hello_server( - http: hyper::server::conn::Http, + server: hyper::server::conn::http1::Builder, ) -> impl Fn(Remote) -> io::Result { move |endpoint| { let span = tracing::info_span!("hello_server", ?endpoint); let _e = span.enter(); tracing::info!("mock connecting"); let (client_io, server_io) = support::io::duplex(4096); - let hello_svc = hyper::service::service_fn(|request: Request| async move { - tracing::info!(?request); - Ok::<_, io::Error>(Response::new(Body::from("Hello world!"))) - }); + let hello_svc = + hyper::service::service_fn(|request: Request| async move { + tracing::info!(?request); + Ok::<_, io::Error>(Response::new(BoxBody::from_static("Hello world!"))) + }); tokio::spawn( - http.serve_connection(server_io, hello_svc) + server + .serve_connection(hyper_util::rt::TokioIo::new(server_io), hello_svc) .in_current_span(), ); Ok(io::BoxedIo::new(client_io)) @@ -572,7 +818,7 @@ fn hello_server( #[tracing::instrument] fn grpc_status_server( - http: hyper::server::conn::Http, + server: hyper::server::conn::http2::Builder, status: tonic::Code, ) -> impl Fn(Remote) -> io::Result { move |endpoint| { @@ -581,26 +827,33 @@ fn grpc_status_server( tracing::info!("mock connecting"); let (client_io, server_io) = support::io::duplex(4096); tokio::spawn( - http.serve_connection( - server_io, - hyper::service::service_fn(move |request: Request| async move { - tracing::info!(?request); - let (mut tx, rx) = Body::channel(); - tokio::spawn(async move { - let mut trls = ::http::HeaderMap::new(); - trls.insert("grpc-status", (status as u32).to_string().parse().unwrap()); - tx.send_trailers(trls).await - }); - Ok::<_, io::Error>( - http::Response::builder() - .version(::http::Version::HTTP_2) - .header("content-type", "application/grpc") - .body(rx) - .unwrap(), - ) - }), - ) - .in_current_span(), + server + .serve_connection( + hyper_util::rt::TokioIo::new(server_io), + hyper::service::service_fn( + move |request: Request| async move { + tracing::info!(?request); + let (mut tx, rx) = + http_body_util::channel::Channel::::new(1024); + tokio::spawn(async move { + let mut trls = ::http::HeaderMap::new(); + trls.insert( + "grpc-status", + (status as u32).to_string().parse().unwrap(), + ); + tx.send_trailers(trls).await + }); + Ok::<_, io::Error>( + http::Response::builder() + .version(::http::Version::HTTP_2) + .header("content-type", "application/grpc") + .body(rx) + .unwrap(), + ) + }, + ), + ) + .in_current_span(), ); Ok(io::BoxedIo::new(client_io)) } @@ -617,9 +870,7 @@ fn connect_error() -> impl Fn(Remote) -> io::Result { } #[tracing::instrument] -fn connect_timeout( - http: hyper::server::conn::Http, -) -> Box) -> ConnectFuture + Send> { +fn connect_timeout() -> Box) -> ConnectFuture + Send> { Box::new(move |endpoint| { let span = tracing::info_span!("connect_timeout", ?endpoint); Box::pin( @@ -636,7 +887,7 @@ fn connect_timeout( } #[derive(Clone, Debug)] -struct Target(http::Version, tls::ConditionalServerTls); +struct Target(http::Variant, tls::ConditionalServerTls); #[track_caller] fn check_error_header(hdrs: &::http::HeaderMap, expected: &str) { @@ -655,17 +906,17 @@ fn check_error_header(hdrs: &::http::HeaderMap, expected: &str) { impl Target { const UNMESHED_HTTP1: Self = Self( - http::Version::Http1, + http::Variant::Http1, tls::ConditionalServerTls::None(tls::NoServerTls::NoClientHello), ); const UNMESHED_H2: Self = Self( - http::Version::H2, + http::Variant::H2, tls::ConditionalServerTls::None(tls::NoServerTls::NoClientHello), ); fn meshed_http1() -> Self { Self( - http::Version::Http1, + http::Variant::Http1, tls::ConditionalServerTls::Some(tls::ServerTls::Established { client_id: Some(tls::ClientId( "foosa.barns.serviceaccount.identity.linkerd.cluster.local" @@ -679,7 +930,7 @@ impl Target { fn meshed_h2() -> Self { Self( - http::Version::H2, + http::Variant::H2, tls::ConditionalServerTls::Some(tls::ServerTls::Established { client_id: Some(tls::ClientId( "foosa.barns.serviceaccount.identity.linkerd.cluster.local" @@ -714,8 +965,8 @@ impl svc::Param> for Target { } } -impl svc::Param for Target { - fn param(&self) -> http::Version { +impl svc::Param for Target { + fn param(&self) -> http::Variant { self.0 } } @@ -748,6 +999,7 @@ impl svc::Param for Target { kind: "server".into(), name: "testsrv".into(), }), + local_rate_limit: Default::default(), }, ); policy @@ -756,11 +1008,14 @@ impl svc::Param for Target { impl svc::Param for Target { fn param(&self) -> policy::ServerLabel { - policy::ServerLabel(Arc::new(policy::Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "testsrv".into(), - })) + policy::ServerLabel( + Arc::new(policy::Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "testsrv".into(), + }), + 80, + ) } } diff --git a/linkerd/app/inbound/src/lib.rs b/linkerd/app/inbound/src/lib.rs index 95e03ee7e3..80772332e6 100644 --- a/linkerd/app/inbound/src/lib.rs +++ b/linkerd/app/inbound/src/lib.rs @@ -18,12 +18,17 @@ mod server; #[cfg(any(test, feature = "test-util", fuzzing))] pub mod test_util; -pub use self::{metrics::InboundMetrics, policy::DefaultPolicy}; +#[cfg(fuzzing)] +pub use self::http::fuzz as http_fuzz; +pub use self::{ + detect::MetricsFamilies as DetectMetrics, metrics::InboundMetrics, policy::DefaultPolicy, +}; use linkerd_app_core::{ config::{ConnectConfig, ProxyConfig, QueueConfig}, drain, - http_tracing::OpenCensusSink, + http_tracing::SpanSink, identity, io, + metrics::prom, proxy::{tap, tcp}, svc, transport::{self, Remote, ServerAddr}, @@ -33,9 +38,6 @@ use std::{fmt::Debug, time::Duration}; use thiserror::Error; use tracing::debug_span; -#[cfg(fuzzing)] -pub use self::http::fuzz as http_fuzz; - #[derive(Clone, Debug)] pub struct Config { pub allow_discovery: NameMatch, @@ -53,6 +55,9 @@ pub struct Config { /// Configures how HTTP requests are buffered *for each inbound port*. pub http_request_queue: QueueConfig, + + /// Enables unsafe authority labels. + pub unsafe_authority_labels: bool, } #[derive(Clone)] @@ -67,7 +72,7 @@ struct Runtime { metrics: InboundMetrics, identity: identity::creds::Receiver, tap: tap::Registry, - span_sink: OpenCensusSink, + span_sink: Option, drain: drain::Watch, } @@ -149,9 +154,9 @@ impl Inbound { } impl Inbound<()> { - pub fn new(config: Config, runtime: ProxyRuntime) -> Self { + pub fn new(config: Config, runtime: ProxyRuntime, prom: &mut prom::Registry) -> Self { let runtime = Runtime { - metrics: InboundMetrics::new(runtime.metrics), + metrics: InboundMetrics::new(runtime.metrics, prom), identity: runtime.identity, tap: runtime.tap, span_sink: runtime.span_sink, @@ -167,7 +172,11 @@ impl Inbound<()> { #[cfg(any(test, feature = "test-util"))] pub fn for_test() -> (Self, drain::Signal) { let (rt, drain) = test_util::runtime(); - let this = Self::new(test_util::default_config(), rt); + let this = Self::new( + test_util::default_config(), + rt, + &mut prom::Registry::default(), + ); (this, drain) } @@ -201,6 +210,7 @@ impl Inbound<()> { // forwarding and HTTP proxying). let ConnectConfig { ref keepalive, + ref user_timeout, ref timeout, .. } = config.proxy.connect; @@ -209,7 +219,7 @@ impl Inbound<()> { #[error("inbound connection must not target port {0}")] struct Loop(u16); - svc::stack(transport::ConnectTcp::new(*keepalive)) + svc::stack(transport::ConnectTcp::new(*keepalive, *user_timeout)) // Limits the time we wait for a connection to be established. .push_connect_timeout(*timeout) // Prevent connections that would target the inbound proxy port from looping. diff --git a/linkerd/app/inbound/src/metrics.rs b/linkerd/app/inbound/src/metrics.rs index 6f803eb6d1..4c4ad8f33b 100644 --- a/linkerd/app/inbound/src/metrics.rs +++ b/linkerd/app/inbound/src/metrics.rs @@ -25,16 +25,27 @@ pub struct InboundMetrics { /// Holds metrics that are common to both inbound and outbound proxies. These metrics are /// reported separately pub proxy: Proxy, + + pub detect: crate::detect::MetricsFamilies, + pub direct: crate::direct::MetricsFamilies, } impl InboundMetrics { - pub(crate) fn new(proxy: Proxy) -> Self { + pub(crate) fn new(proxy: Proxy, reg: &mut prom::Registry) -> Self { + let detect = + crate::detect::MetricsFamilies::register(reg.sub_registry_with_prefix("tcp_detect")); + let direct = crate::direct::MetricsFamilies::register( + reg.sub_registry_with_prefix("tcp_transport_header"), + ); + Self { http_authz: authz::HttpAuthzMetrics::default(), http_errors: error::HttpErrorMetrics::default(), tcp_authz: authz::TcpAuthzMetrics::default(), tcp_errors: error::TcpErrorMetrics::default(), proxy, + detect, + direct, } } } diff --git a/linkerd/app/inbound/src/metrics/authz.rs b/linkerd/app/inbound/src/metrics/authz.rs index d79694e9d9..5f53c1e097 100644 --- a/linkerd/app/inbound/src/metrics/authz.rs +++ b/linkerd/app/inbound/src/metrics/authz.rs @@ -1,4 +1,4 @@ -use crate::policy::{AllowPolicy, HttpRoutePermit, ServerPermit}; +use crate::policy::{AllowPolicy, HttpRoutePermit, Meta, ServerPermit}; use linkerd_app_core::{ metrics::{ metrics, Counter, FmtLabels, FmtMetrics, RouteAuthzLabels, RouteLabels, ServerAuthzLabels, @@ -21,6 +21,10 @@ metrics! { "The total number of inbound HTTP requests that could not be associated with a route" }, + inbound_http_local_ratelimit_total: Counter { + "The total number of inbound HTTP requests that were rate-limited" + }, + inbound_tcp_authz_allow_total: Counter { "The total number of inbound TCP connections that were authorized" }, @@ -43,6 +47,7 @@ struct HttpInner { allow: Mutex>, deny: Mutex>, route_not_found: Mutex>, + http_local_rate_limit: Mutex>, } #[derive(Debug, Default)] @@ -52,6 +57,13 @@ struct TcpInner { terminate: Mutex>, } +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub struct HTTPLocalRateLimitLabels { + pub server: ServerLabel, + pub rate_limit: Option>, + pub scope: &'static str, +} + #[derive(Debug, Hash, PartialEq, Eq)] struct Key { target: TargetAddr, @@ -63,6 +75,7 @@ type ServerKey = Key; type ServerAuthzKey = Key; type RouteKey = Key; type RouteAuthzKey = Key; +type HttpLocalRateLimitKey = Key; // === impl HttpAuthzMetrics === @@ -98,6 +111,20 @@ impl HttpAuthzMetrics { .or_default() .incr(); } + + pub fn ratelimit( + &self, + labels: HTTPLocalRateLimitLabels, + dst: OrigDstAddr, + tls: tls::ConditionalServerTls, + ) { + self.0 + .http_local_rate_limit + .lock() + .entry(HttpLocalRateLimitKey::new(labels, dst, tls)) + .or_default() + .incr(); + } } impl FmtMetrics for HttpAuthzMetrics { @@ -140,6 +167,19 @@ impl FmtMetrics for HttpAuthzMetrics { } drop(route_not_found); + let local_ratelimit = self.0.http_local_rate_limit.lock(); + if !local_ratelimit.is_empty() { + inbound_http_local_ratelimit_total.fmt_help(f)?; + inbound_http_local_ratelimit_total.fmt_scopes( + f, + local_ratelimit + .iter() + .map(|(k, c)| ((k.target, (&k.labels, TlsAccept(&k.tls))), c)), + |c| c, + )?; + } + drop(local_ratelimit); + Ok(()) } } @@ -202,6 +242,26 @@ impl FmtMetrics for TcpAuthzMetrics { } } +// === impl HTTPLocalRateLimitLabels === + +impl FmtLabels for HTTPLocalRateLimitLabels { + fn fmt_labels(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.server.fmt_labels(f)?; + if let Some(rl) = &self.rate_limit { + write!( + f, + ",ratelimit_group=\"{}\",ratelimit_kind=\"{}\",ratelimit_name=\"{}\",ratelimit_scope=\"{}\"", + rl.group(), + rl.kind(), + rl.name(), + self.scope, + ) + } else { + write!(f, ",ratelimit_scope=\"{}\"", self.scope) + } + } +} + // === impl Key === impl Key { diff --git a/linkerd/app/inbound/src/policy.rs b/linkerd/app/inbound/src/policy.rs index 176c92551d..db33f7a37f 100644 --- a/linkerd/app/inbound/src/policy.rs +++ b/linkerd/app/inbound/src/policy.rs @@ -5,6 +5,8 @@ mod http; mod store; mod tcp; +use crate::metrics::authz::HTTPLocalRateLimitLabels; + pub(crate) use self::store::Store; pub use self::{ config::Config, @@ -27,7 +29,8 @@ pub use linkerd_proxy_server_policy::{ authz::Suffix, grpc::Route as GrpcRoute, http::{filter::Redirection, Route as HttpRoute}, - route, Authentication, Authorization, Meta, Protocol, RoutePolicy, ServerPolicy, + route, Authentication, Authorization, Meta, Protocol, RateLimitError, RoutePolicy, + ServerPolicy, }; use std::sync::Arc; use thiserror::Error; @@ -44,7 +47,7 @@ pub trait GetPolicy { fn get_policy(&self, dst: OrigDstAddr) -> AllowPolicy; } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug)] pub enum DefaultPolicy { Allow(ServerPolicy), Deny, @@ -90,6 +93,7 @@ impl From for ServerPolicy { DefaultPolicy::Allow(p) => p, DefaultPolicy::Deny => ServerPolicy { protocol: Protocol::Opaque(Arc::new([])), + local_rate_limit: Default::default(), meta: Meta::new_default("deny"), }, } @@ -129,7 +133,21 @@ impl AllowPolicy { #[inline] pub fn server_label(&self) -> ServerLabel { - ServerLabel(self.server.borrow().meta.clone()) + ServerLabel(self.server.borrow().meta.clone(), self.dst.port()) + } + + pub fn ratelimit_label(&self, error: &RateLimitError) -> HTTPLocalRateLimitLabels { + use RateLimitError::*; + + let scope = match error { + Total(_) => "total", + PerIdentity(_) | Override(_) => "identity", + }; + HTTPLocalRateLimitLabels { + server: self.server_label(), + rate_limit: self.server.borrow().local_rate_limit.meta(), + scope, + } } async fn changed(&mut self) { @@ -202,7 +220,7 @@ impl ServerPermit { protocol: server.protocol.clone(), labels: ServerAuthzLabels { authz: authz.meta.clone(), - server: ServerLabel(server.meta.clone()), + server: ServerLabel(server.meta.clone(), dst.port()), }, } } diff --git a/linkerd/app/inbound/src/policy/api.rs b/linkerd/app/inbound/src/policy/api.rs index a15a68a19c..11bc576b60 100644 --- a/linkerd/app/inbound/src/policy/api.rs +++ b/linkerd/app/inbound/src/policy/api.rs @@ -34,8 +34,7 @@ static INVALID_POLICY: once_cell::sync::OnceCell = once_cell::sync impl Api where S: tonic::client::GrpcService + Clone, - S::ResponseBody: - http::HttpBody + Default + Send + 'static, + S::ResponseBody: http::Body + Send + 'static, { pub(super) fn new( workload: Arc, @@ -60,8 +59,7 @@ impl Service for Api where S: tonic::client::GrpcService, S: Clone + Send + Sync + 'static, - S::ResponseBody: - http::HttpBody + Default + Send + 'static, + S::ResponseBody: http::Body + Send + 'static, S::Future: Send + 'static, { type Response = diff --git a/linkerd/app/inbound/src/policy/config.rs b/linkerd/app/inbound/src/policy/config.rs index 4249132386..c9c5bd3756 100644 --- a/linkerd/app/inbound/src/policy/config.rs +++ b/linkerd/app/inbound/src/policy/config.rs @@ -42,8 +42,8 @@ impl Config { where C: tonic::client::GrpcService, C: Clone + Unpin + Send + Sync + 'static, - C::ResponseBody: http::HttpBody, - C::ResponseBody: Default + Send + 'static, + C::ResponseBody: http::Body, + C::ResponseBody: Send + 'static, C::Future: Send, { match self { diff --git a/linkerd/app/inbound/src/policy/defaults.rs b/linkerd/app/inbound/src/policy/defaults.rs index e9104b53a7..29a5dd01da 100644 --- a/linkerd/app/inbound/src/policy/defaults.rs +++ b/linkerd/app/inbound/src/policy/defaults.rs @@ -36,6 +36,15 @@ pub fn cluster_unauthenticated( ) } +pub fn audit(timeout: Duration) -> ServerPolicy { + mk( + "audit", + all_nets(), + Authentication::Unauthenticated, + timeout, + ) +} + pub fn all_mtls_unauthenticated(timeout: Duration) -> ServerPolicy { mk( "all-tls-unauthenticated", @@ -79,5 +88,6 @@ fn mk( ServerPolicy { meta: Meta::new_default(name), protocol, + local_rate_limit: Default::default(), } } diff --git a/linkerd/app/inbound/src/policy/http.rs b/linkerd/app/inbound/src/policy/http.rs index 7f446f7778..e3ac922f48 100644 --- a/linkerd/app/inbound/src/policy/http.rs +++ b/linkerd/app/inbound/src/policy/http.rs @@ -9,7 +9,7 @@ use linkerd_app_core::{ svc::{self, ServiceExt}, tls, transport::{ClientAddr, OrigDstAddr, Remote}, - Error, Result, + Conditional, Error, Result, }; use linkerd_proxy_server_policy::{grpc, http, route::RouteMatch}; use std::{sync::Arc, task}; @@ -171,6 +171,8 @@ where } }; + try_fut!(self.check_rate_limit()); + future::Either::Left( self.inner .new_service((permit, self.target.clone())) @@ -202,7 +204,25 @@ impl HttpPolicyService { .iter() .find(|a| super::is_authorized(a, self.connection.client, &self.connection.tls)) { - Some(authz) => authz, + Some(authz) => { + if authz.meta.is_audit() { + tracing::info!( + server.group = %labels.server.0.group(), + server.kind = %labels.server.0.kind(), + server.name = %labels.server.0.name(), + route.group = %labels.route.group(), + route.kind = %labels.route.kind(), + route.name = %labels.route.name(), + client.tls = ?self.connection.tls, + client.ip = %self.connection.client.ip(), + authz.group = %authz.meta.group(), + authz.kind = %authz.meta.kind(), + authz.name = %authz.meta.name(), + "Request allowed", + ); + } + authz + } None => { tracing::info!( server.group = %labels.server.0.group(), @@ -269,6 +289,28 @@ impl HttpPolicyService { .route_not_found(labels, self.connection.dst, self.connection.tls.clone()); HttpRouteNotFound(()).into() } + + fn check_rate_limit(&self) -> Result<()> { + let id = match self.connection.tls { + Conditional::Some(tls::ServerTls::Established { + client_id: Some(tls::ClientId(ref id)), + .. + }) => Some(id), + _ => None, + }; + self.policy + .borrow() + .local_rate_limit + .check(id) + .map_err(|err| { + self.metrics.ratelimit( + self.policy.ratelimit_label(&err), + self.connection.dst, + self.connection.tls.clone(), + ); + err.into() + }) + } } fn apply_http_filters( diff --git a/linkerd/app/inbound/src/policy/http/tests.rs b/linkerd/app/inbound/src/policy/http/tests.rs index 0369207938..e3a14a0240 100644 --- a/linkerd/app/inbound/src/policy/http/tests.rs +++ b/linkerd/app/inbound/src/policy/http/tests.rs @@ -1,6 +1,8 @@ use super::*; use crate::policy::{Authentication, Authorization, Meta, Protocol, ServerPolicy}; use linkerd_app_core::{svc::Service, Infallible}; +use linkerd_http_box::BoxBody; +use linkerd_proxy_server_policy::{LocalRateLimit, RateLimitError}; macro_rules! conn { ($client:expr, $dst:expr) => {{ @@ -19,7 +21,7 @@ macro_rules! conn { } macro_rules! new_svc { - ($proto:expr, $conn:expr, $rsp:expr) => {{ + ($proto:expr, $conn:expr, $rsp:expr, $rl: expr) => {{ let (policy, tx) = AllowPolicy::for_test( $conn.dst, ServerPolicy { @@ -29,6 +31,7 @@ macro_rules! new_svc { kind: "Server".into(), name: "testsrv".into(), }), + local_rate_limit: Arc::new($rl), }, ); let svc = HttpPolicyService { @@ -38,7 +41,7 @@ macro_rules! new_svc { metrics: HttpAuthzMetrics::default(), inner: |(permit, _): (HttpRoutePermit, ())| { let f = $rsp; - svc::mk(move |req: ::http::Request| { + svc::mk(move |req: ::http::Request| { futures::future::ready((f)(permit.clone(), req)) }) }, @@ -46,19 +49,28 @@ macro_rules! new_svc { (svc, tx) }}; - ($proto:expr) => {{ + ($proto:expr, $conn:expr, $rsp:expr) => {{ + new_svc!($proto, $conn, $rsp, Default::default()) + }}; + + ($proto:expr, $rl:expr) => {{ new_svc!( $proto, conn!(), - |permit: HttpRoutePermit, _req: ::http::Request| { + |permit: HttpRoutePermit, _req: ::http::Request| { let mut rsp = ::http::Response::builder() - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(); rsp.extensions_mut().insert(permit.clone()); Ok::<_, Infallible>(rsp) - } + }, + $rl ) }}; + + ($proto:expr) => {{ + new_svc!($proto, Default::default()) + }}; } #[tokio::test(flavor = "current_thread")] @@ -108,11 +120,7 @@ async fn http_route() { // Test that authorization policies allow requests: let rsp = svc - .call( - ::http::Request::builder() - .body(hyper::Body::default()) - .unwrap(), - ) + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) .await .expect("serves"); let permit = rsp @@ -126,7 +134,7 @@ async fn http_route() { .call( ::http::Request::builder() .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -138,7 +146,7 @@ async fn http_route() { .call( ::http::Request::builder() .method(::http::Method::DELETE) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -197,15 +205,12 @@ async fn http_route() { }, ], }])), + local_rate_limit: Arc::new(Default::default()), }) .expect("must send"); assert!(svc - .call( - ::http::Request::builder() - .body(hyper::Body::default()) - .unwrap(), - ) + .call(::http::Request::builder().body(BoxBody::default()).unwrap(),) .await .expect_err("fails") .is::()); @@ -214,7 +219,7 @@ async fn http_route() { .call( ::http::Request::builder() .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -225,7 +230,7 @@ async fn http_route() { .call( ::http::Request::builder() .method(::http::Method::DELETE) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -273,14 +278,14 @@ async fn http_filter_header() { }, }], }])); - let inner = |permit: HttpRoutePermit, req: ::http::Request| -> Result<_> { + let inner = |permit: HttpRoutePermit, req: ::http::Request| -> Result<_> { assert_eq!(req.headers().len(), 1); assert_eq!( req.headers().get("testkey"), Some(&"testval".parse().unwrap()) ); let mut rsp = ::http::Response::builder() - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(); rsp.extensions_mut().insert(permit); Ok(rsp) @@ -288,11 +293,7 @@ async fn http_filter_header() { let (mut svc, _tx) = new_svc!(proto, conn!(), inner); let rsp = svc - .call( - ::http::Request::builder() - .body(hyper::Body::default()) - .unwrap(), - ) + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) .await .expect("serves"); let permit = rsp @@ -342,16 +343,12 @@ async fn http_filter_inject_failure() { }], }])); let inner = |_: HttpRoutePermit, - _: ::http::Request| - -> Result<::http::Response> { unreachable!() }; + _: ::http::Request| + -> Result<::http::Response> { unreachable!() }; let (mut svc, _tx) = new_svc!(proto, conn!(), inner); let err = svc - .call( - ::http::Request::builder() - .body(hyper::Body::default()) - .unwrap(), - ) + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) .await .expect_err("fails"); assert_eq!( @@ -363,6 +360,82 @@ async fn http_filter_inject_failure() { ); } +#[tokio::test(flavor = "current_thread")] +async fn rate_limit_allow() { + use linkerd_app_core::{Ipv4Net, Ipv6Net}; + + let rmeta = Meta::new_default("default"); + + // Rate-limit with plenty of room for two consecutive requests + let rl = LocalRateLimit::new_no_overrides_for_test(Some(10), Some(5)); + + let authorizations = Arc::new([Authorization { + meta: rmeta.clone(), + networks: vec![Ipv4Net::default().into(), Ipv6Net::default().into()], + authentication: Authentication::Unauthenticated, + }]); + + let (mut svc, _tx) = new_svc!( + Protocol::Http1(Arc::new([http::default(authorizations.clone())])), + rl + ); + + // First request should be allowed + let rsp = svc + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) + .await + .expect("serves"); + assert_eq!(rsp.status(), ::http::StatusCode::OK); + + // Second request should be allowed as well + let rsp = svc + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) + .await + .expect("serves"); + assert_eq!(rsp.status(), ::http::StatusCode::OK); +} + +#[tokio::test(flavor = "current_thread")] +async fn rate_limit_deny() { + use linkerd_app_core::{Ipv4Net, Ipv6Net}; + + let rmeta = Meta::new_default("default"); + + // Rate-limit with room for only one request per second + let rl = LocalRateLimit::new_no_overrides_for_test(Some(10), Some(1)); + + let authorizations = Arc::new([Authorization { + meta: rmeta.clone(), + networks: vec![Ipv4Net::default().into(), Ipv6Net::default().into()], + authentication: Authentication::Unauthenticated, + }]); + + let (mut svc, _tx) = new_svc!( + Protocol::Http1(Arc::new([http::default(authorizations.clone())])), + rl + ); + + // First request should be allowed + let rsp = svc + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) + .await + .expect("serves"); + assert_eq!(rsp.status(), ::http::StatusCode::OK); + + // Second request should be denied + let rsp = svc + .call(::http::Request::builder().body(BoxBody::default()).unwrap()) + .await + .expect_err("should deny"); + let err = rsp + .downcast_ref::() + .expect("rate limit error"); + match err { + RateLimitError::PerIdentity(rps) => assert_eq!(rps, &std::num::NonZeroU32::new(1).unwrap()), + _ => panic!("unexpected error"), + }; +} + #[tokio::test(flavor = "current_thread")] async fn grpc_route() { use linkerd_proxy_server_policy::grpc::{ @@ -422,7 +495,7 @@ async fn grpc_route() { ::http::Request::builder() .uri("/foo.bar.bah/baz") .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -438,7 +511,7 @@ async fn grpc_route() { ::http::Request::builder() .uri("/foo.bar.bah/qux") .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -450,7 +523,7 @@ async fn grpc_route() { ::http::Request::builder() .uri("/boo.bar.bah/bah") .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -502,14 +575,14 @@ async fn grpc_filter_header() { }, }], }])); - let inner = |permit: HttpRoutePermit, req: ::http::Request| -> Result<_> { + let inner = |permit: HttpRoutePermit, req: ::http::Request| -> Result<_> { assert_eq!(req.headers().len(), 1); assert_eq!( req.headers().get("testkey"), Some(&"testval".parse().unwrap()) ); let mut rsp = ::http::Response::builder() - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(); rsp.extensions_mut().insert(permit); Ok(rsp) @@ -521,7 +594,7 @@ async fn grpc_filter_header() { ::http::Request::builder() .uri("/foo.bar.bah/baz") .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await @@ -579,8 +652,8 @@ async fn grpc_filter_inject_failure() { }], }])); let inner = |_: HttpRoutePermit, - _: ::http::Request| - -> Result<::http::Response> { unreachable!() }; + _: ::http::Request| + -> Result<::http::Response> { unreachable!() }; let (mut svc, _tx) = new_svc!(proto, conn!(), inner); let err = svc @@ -588,7 +661,7 @@ async fn grpc_filter_inject_failure() { ::http::Request::builder() .uri("/foo.bar.bah/baz") .method(::http::Method::POST) - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) .await diff --git a/linkerd/app/inbound/src/policy/store.rs b/linkerd/app/inbound/src/policy/store.rs index 50bc155133..14b5653261 100644 --- a/linkerd/app/inbound/src/policy/store.rs +++ b/linkerd/app/inbound/src/policy/store.rs @@ -77,8 +77,7 @@ impl Store { S: tonic::client::GrpcService, S: Clone + Send + Sync + 'static, S::Future: Send, - S::ResponseBody: - http::HttpBody + Default + Send + 'static, + S::ResponseBody: http::Body + Send + 'static, { let opaque_default = Self::make_opaque(default.clone()); // The initial set of policies never expire from the cache. @@ -142,8 +141,7 @@ where S: tonic::client::GrpcService, S: Clone + Send + Sync + 'static, S::Future: Send, - S::ResponseBody: - http::HttpBody + Default + Send + 'static, + S::ResponseBody: http::Body + Send + 'static, { fn get_policy(&self, dst: OrigDstAddr) -> AllowPolicy { // Lookup the policy for the target port in the cache. If it doesn't diff --git a/linkerd/app/inbound/src/policy/tcp.rs b/linkerd/app/inbound/src/policy/tcp.rs index 9cf0b74317..2defa5288f 100644 --- a/linkerd/app/inbound/src/policy/tcp.rs +++ b/linkerd/app/inbound/src/policy/tcp.rs @@ -194,6 +194,19 @@ fn check_authorized( { for authz in &**authzs { if super::is_authorized(authz, client_addr, tls) { + if authz.meta.is_audit() { + tracing::info!( + server.group = %server.meta.group(), + server.kind = %server.meta.kind(), + server.name = %server.meta.name(), + client.tls = ?tls, + client.ip = %client_addr.ip(), + authz.group = %authz.meta.group(), + authz.kind = %authz.meta.kind(), + authz.name = %authz.meta.name(), + "Request allowed", + ); + } return Ok(ServerPermit::new(dst, server, authz)); } } diff --git a/linkerd/app/inbound/src/policy/tcp/tests.rs b/linkerd/app/inbound/src/policy/tcp/tests.rs index 957c51d736..4fdb825e93 100644 --- a/linkerd/app/inbound/src/policy/tcp/tests.rs +++ b/linkerd/app/inbound/src/policy/tcp/tests.rs @@ -26,6 +26,7 @@ async fn unauthenticated_allowed() { kind: "server".into(), name: "test".into(), }), + local_rate_limit: Arc::new(Default::default()), }; let tls = tls::ConditionalServerTls::None(tls::NoServerTls::NoClientHello); @@ -42,11 +43,14 @@ async fn unauthenticated_allowed() { kind: "serverauthorization".into(), name: "unauth".into() }), - server: ServerLabel(Arc::new(Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "test".into() - })) + server: ServerLabel( + Arc::new(Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "test".into() + }), + 1000 + ) }, } ); @@ -75,6 +79,7 @@ async fn authenticated_identity() { kind: "server".into(), name: "test".into(), }), + local_rate_limit: Arc::new(Default::default()), }; let tls = tls::ConditionalServerTls::Some(tls::ServerTls::Established { @@ -94,11 +99,14 @@ async fn authenticated_identity() { kind: "serverauthorization".into(), name: "tls-auth".into() }), - server: ServerLabel(Arc::new(Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "test".into() - })) + server: ServerLabel( + Arc::new(Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "test".into() + }), + 1000 + ) } } ); @@ -138,6 +146,7 @@ async fn authenticated_suffix() { kind: "server".into(), name: "test".into(), }), + local_rate_limit: Arc::new(Default::default()), }; let tls = tls::ConditionalServerTls::Some(tls::ServerTls::Established { @@ -156,11 +165,14 @@ async fn authenticated_suffix() { kind: "serverauthorization".into(), name: "tls-auth".into() }), - server: ServerLabel(Arc::new(Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "test".into() - })), + server: ServerLabel( + Arc::new(Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "test".into() + }), + 1000 + ), } } ); @@ -197,6 +209,7 @@ async fn tls_unauthenticated() { kind: "server".into(), name: "test".into(), }), + local_rate_limit: Arc::new(Default::default()), }; let tls = tls::ConditionalServerTls::Some(tls::ServerTls::Established { @@ -215,11 +228,14 @@ async fn tls_unauthenticated() { kind: "serverauthorization".into(), name: "tls-unauth".into() }), - server: ServerLabel(Arc::new(Meta::Resource { - group: "policy.linkerd.io".into(), - kind: "server".into(), - name: "test".into() - })), + server: ServerLabel( + Arc::new(Meta::Resource { + group: "policy.linkerd.io".into(), + kind: "server".into(), + name: "test".into() + }), + 1000 + ), } } ); diff --git a/linkerd/app/inbound/src/server.rs b/linkerd/app/inbound/src/server.rs index 5dd58ad1d3..b8458fcce3 100644 --- a/linkerd/app/inbound/src/server.rs +++ b/linkerd/app/inbound/src/server.rs @@ -29,8 +29,8 @@ impl Inbound<()> { where C: tonic::client::GrpcService, C: Clone + Unpin + Send + Sync + 'static, - C::ResponseBody: http::HttpBody, - C::ResponseBody: Default + Send + 'static, + C::ResponseBody: http::Body, + C::ResponseBody: Send + 'static, C::Future: Send, { self.config @@ -55,6 +55,8 @@ impl Inbound<()> { I: Debug + Unpin + Send + Sync + 'static, P: profiles::GetProfile, { + let detect_metrics = self.runtime.metrics.detect.clone(); + // Handles connections to ports that can't be determined to be HTTP. let forward = self .clone() @@ -97,7 +99,7 @@ impl Inbound<()> { // Determines how to handle an inbound connection, dispatching it to the appropriate // stack. http.push_http_tcp_server() - .push_detect(forward) + .push_detect(detect_metrics, forward) .push_accept(addr.port(), policies, direct) .into_inner() } diff --git a/linkerd/app/inbound/src/test_util.rs b/linkerd/app/inbound/src/test_util.rs index efd962300a..103a6d871d 100644 --- a/linkerd/app/inbound/src/test_util.rs +++ b/linkerd/app/inbound/src/test_util.rs @@ -10,7 +10,7 @@ use linkerd_app_core::{ http::{h1, h2}, tap, }, - transport::{DualListenAddr, Keepalive}, + transport::{DualListenAddr, Keepalive, UserTimeout}, ProxyRuntime, }; pub use linkerd_app_test as support; @@ -46,6 +46,7 @@ pub fn default_config() -> Config { kind: "server".into(), name: "testsrv".into(), }), + local_rate_limit: Arc::new(Default::default()), } .into(), ports: Default::default(), @@ -59,10 +60,12 @@ pub fn default_config() -> Config { server: config::ServerConfig { addr: DualListenAddr(([0, 0, 0, 0], 0).into(), None), keepalive: Keepalive(None), + user_timeout: UserTimeout(None), http2: h2::ServerParams::default(), }, connect: config::ConnectConfig { keepalive: Keepalive(None), + user_timeout: UserTimeout(None), timeout: Duration::from_secs(1), backoff: exp_backoff::ExponentialBackoff::try_new( Duration::from_millis(100), @@ -86,6 +89,7 @@ pub fn default_config() -> Config { }, discovery_idle_timeout: Duration::from_secs(20), profile_skip_timeout: Duration::from_secs(1), + unsafe_authority_labels: false, } } diff --git a/linkerd/app/integration/Cargo.toml b/linkerd/app/integration/Cargo.toml index 3d083fb4be..a419743a87 100644 --- a/linkerd/app/integration/Cargo.toml +++ b/linkerd/app/integration/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app-integration" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Proxy integration tests @@ -17,23 +17,24 @@ default = [] flakey = [] [dependencies] -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false, features = ["executor"] } -h2 = "0.3" -http = "0.2" -http-body = "0.4" -hyper = { version = "0.14", features = [ +h2 = { workspace = true } +http = { workspace = true } +http-body = { workspace = true } +http-body-util = { workspace = true } +hyper = { workspace = true, features = [ "http1", "http2", - "stream", "client", "server", ] } +hyper-util = { workspace = true, features = ["service"] } ipnet = "2" linkerd-app = { path = "..", features = ["allow-loopback"] } linkerd-app-core = { path = "../core" } linkerd-metrics = { path = "../../metrics", features = ["test_util"] } -linkerd2-proxy-api = { version = "0.13", features = [ +linkerd2-proxy-api = { workspace = true, features = [ "destination", "arbitrary", ] } @@ -45,10 +46,10 @@ regex = "1" socket2 = "0.5" tokio = { version = "1", features = ["io-util", "net", "rt", "macros"] } tokio-stream = { version = "0.1", features = ["sync"] } -tokio-rustls = "0.24" -rustls-pemfile = "1.0" -tower = { version = "0.4", default-features = false } -tonic = { version = "0.10", features = ["transport"], default-features = false } +tokio-rustls = { workspace = true } +rustls-pemfile = "2.2" +tower = { workspace = true, default-features = false } +tonic = { workspace = true, features = ["transport"], default-features = false } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = [ "fmt", diff --git a/linkerd/app/integration/src/client.rs b/linkerd/app/integration/src/client.rs index ed5c5763f3..11d39ad2b3 100644 --- a/linkerd/app/integration/src/client.rs +++ b/linkerd/app/integration/src/client.rs @@ -1,26 +1,28 @@ use super::*; -use linkerd_app_core::proxy::http::TracingExecutor; +use http::{Request, Response}; +use linkerd_app_core::{proxy::http::TokioExecutor, svc::http::BoxBody}; use parking_lot::Mutex; use std::io; -use tokio::net::TcpStream; -use tokio::task::JoinHandle; +use tokio::{net::TcpStream, task::JoinHandle}; use tokio_rustls::rustls::{self, ClientConfig}; use tracing::info_span; -type ClientError = hyper::Error; -type Request = http::Request; -type Response = http::Response; -type Sender = mpsc::UnboundedSender<(Request, oneshot::Sender>)>; +type ClientError = hyper_util::client::legacy::Error; +type Sender = mpsc::UnboundedSender<( + Request, + oneshot::Sender, ClientError>>, +)>; #[derive(Clone)] pub struct TlsConfig { client_config: Arc, - name: rustls::ServerName, + name: rustls::pki_types::ServerName<'static>, } impl TlsConfig { - pub fn new(client_config: Arc, name: &str) -> Self { - let name = rustls::ServerName::try_from(name).expect("name must be a valid DNS name"); + pub fn new(client_config: Arc, name: &'static str) -> Self { + let name = + rustls::pki_types::ServerName::try_from(name).expect("name must be a valid DNS name"); TlsConfig { client_config, name, @@ -74,9 +76,6 @@ pub fn http2_tls>(addr: SocketAddr, auth: T, tls: TlsConfig) -> Client::new(addr, auth.into(), Run::Http2, Some(tls)) } -pub fn tcp(addr: SocketAddr) -> tcp::TcpClient { - tcp::client(addr) -} pub struct Client { addr: SocketAddr, run: Run, @@ -132,11 +131,19 @@ impl Client { pub fn request( &self, builder: http::request::Builder, - ) -> impl Future> + Send + Sync + 'static { - self.send_req(builder.body(Bytes::new().into()).unwrap()) + ) -> impl Future, ClientError>> + Send + 'static + { + let req = builder.body(BoxBody::empty()).unwrap(); + self.send_req(req) } - pub async fn request_body(&self, req: Request) -> Response { + pub async fn request_body(&self, req: Request) -> Response + where + B: Body + Send + 'static, + B::Data: Send + 'static, + B::Error: Into, + { + let req = req.map(BoxBody::new); self.send_req(req).await.expect("response") } @@ -152,11 +159,16 @@ impl Client { } } - #[tracing::instrument(skip(self))] - pub(crate) fn send_req( + #[tracing::instrument(skip(self, req))] + pub(crate) fn send_req( &self, - mut req: Request, - ) -> impl Future> + Send + Sync + 'static { + mut req: Request, + ) -> impl Future, ClientError>> + Send + 'static + where + B: Body + Send + 'static, + B::Data: Send + 'static, + B::Error: Into, + { if req.uri().scheme().is_none() { if self.tls.is_some() { *req.uri_mut() = format!("https://{}{}", self.authority, req.uri().path()) @@ -170,7 +182,8 @@ impl Client { } tracing::debug!(headers = ?req.headers(), "request"); let (tx, rx) = oneshot::channel(); - let _ = self.tx.send((req.map(Into::into), tx)); + let req = req.map(BoxBody::new); + let _ = self.tx.send((req, tx)); async { rx.await.expect("request cancelled") }.in_current_span() } @@ -220,13 +233,17 @@ enum Run { Http2, } +pub type Running = Pin + Send + 'static>>; + fn run( addr: SocketAddr, version: Run, tls: Option, ) -> (Sender, JoinHandle<()>, Running) { - let (tx, rx) = - mpsc::unbounded_channel::<(Request, oneshot::Sender>)>(); + let (tx, rx) = mpsc::unbounded_channel::<( + Request, + oneshot::Sender, ClientError>>, + )>(); let test_name = thread_name(); let absolute_uris = if let Run::Http1 { absolute_uris } = version { @@ -235,7 +252,12 @@ fn run( false }; - let (running_tx, running) = running(); + let (running_tx, running) = { + let (tx, rx) = oneshot::channel(); + let rx = Box::pin(rx.map(|_| ())); + (tx, rx) + }; + let conn = Conn { addr, absolute_uris, @@ -250,10 +272,9 @@ fn run( let span = info_span!("test client", peer_addr = %addr, ?version, test = %test_name); let work = async move { - let client = hyper::Client::builder() + let client = hyper_util::client::legacy::Client::builder(TokioExecutor::new()) .http2_only(http2_only) - .executor(TracingExecutor) - .build::(conn); + .build::(conn); tracing::trace!("client task started"); let mut rx = rx; let (drain_tx, drain) = drain::channel(); @@ -263,7 +284,6 @@ fn run( // instance would remain un-dropped. async move { while let Some((req, cb)) = rx.recv().await { - let req = req.map(hyper::Body::from); tracing::trace!(?req); let req = client.request(req); tokio::spawn( @@ -295,9 +315,11 @@ struct Conn { } impl tower::Service for Conn { - type Response = RunningIo; + type Response = hyper_util::rt::TokioIo; type Error = io::Error; - type Future = Pin> + Send + 'static>>; + type Future = Pin< + Box>> + Send + 'static>, + >; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) @@ -327,19 +349,19 @@ impl tower::Service for Conn { } else { Box::pin(io) as Pin> }; - Ok(RunningIo { + Ok(hyper_util::rt::TokioIo::new(RunningIo { io, abs_form, _running: Some(running), - }) + })) }) } } -impl hyper::client::connect::Connection for RunningIo { - fn connected(&self) -> hyper::client::connect::Connected { +impl hyper_util::client::legacy::connect::Connection for RunningIo { + fn connected(&self) -> hyper_util::client::legacy::connect::Connected { // Setting `proxy` to true will configure Hyper to use absolute-form // URIs on this connection. - hyper::client::connect::Connected::new().proxy(self.abs_form) + hyper_util::client::legacy::connect::Connected::new().proxy(self.abs_form) } } diff --git a/linkerd/app/integration/src/controller.rs b/linkerd/app/integration/src/controller.rs index 13d1344af7..82fa6a8ad8 100644 --- a/linkerd/app/integration/src/controller.rs +++ b/linkerd/app/integration/src/controller.rs @@ -2,7 +2,7 @@ use super::*; pub use linkerd2_proxy_api::destination as pb; use linkerd2_proxy_api::net; -use linkerd_app_core::proxy::http::TracingExecutor; +use linkerd_app_core::proxy::http::TokioExecutor; use parking_lot::Mutex; use std::collections::VecDeque; use std::net::IpAddr; @@ -343,7 +343,7 @@ pub(crate) async fn run( delay: Option + Send>>>, ) -> Listening where - T: tower::Service, Response = http::Response>, + T: tower::Service, Response = http::Response>, T: Clone + Send + 'static, T::Error: Into>, T::Future: Send, @@ -372,12 +372,16 @@ where let _ = listening_tx.send(()); } - let mut http = hyper::server::conn::Http::new().with_executor(TracingExecutor); - http.http2_only(true); + let mut http = hyper::server::conn::http2::Builder::new(TokioExecutor::new()); loop { let (sock, addr) = listener.accept().await?; let span = tracing::debug_span!("conn", %addr).or_current(); - let serve = http.serve_connection(sock, svc.clone()); + let serve = http + .timer(hyper_util::rt::TokioTimer::new()) + .serve_connection( + hyper_util::rt::TokioIo::new(sock), + hyper_util::service::TowerToHyperService::new(svc.clone()), + ); let f = async move { serve.await.map_err(|error| { tracing::error!( @@ -530,6 +534,7 @@ impl From for pb::Update { tls_identity, authority_override: None, http2: None, + resource_ref: None, }], metric_labels: set_labels, })), @@ -610,7 +615,11 @@ pub fn retry_budget( } pub fn dst_override(authority: String, weight: u32) -> pb::WeightedDst { - pb::WeightedDst { authority, weight } + pb::WeightedDst { + authority, + weight, + backend_ref: None, + } } pub fn route() -> RouteBuilder { diff --git a/linkerd/app/integration/src/identity.rs b/linkerd/app/integration/src/identity.rs index f2ba49766c..afc373852b 100644 --- a/linkerd/app/integration/src/identity.rs +++ b/linkerd/app/integration/src/identity.rs @@ -8,7 +8,7 @@ use std::{ }; use linkerd2_proxy_api::identity as pb; -use tokio_rustls::rustls; +use tokio_rustls::rustls::{self, pki_types::CertificateDer, server::WebPkiClientVerifier}; use tonic as grpc; pub struct Identity { @@ -36,7 +36,7 @@ type Certify = Box< static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13]; static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = - &[rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]; + &[rustls::crypto::ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]; struct Certificates { pub leaf: Vec, @@ -50,11 +50,17 @@ impl Certificates { { let f = fs::File::open(p)?; let mut r = io::BufReader::new(f); - let mut certs = rustls_pemfile::certs(&mut r) + let mut certs = rustls_pemfile::certs(&mut r); + let leaf = certs + .next() + .expect("no leaf cert in pemfile") + .map_err(|_| io::Error::new(io::ErrorKind::Other, "rustls error reading certs"))? + .as_ref() + .to_vec(); + let intermediates = certs + .map(|cert| cert.map(|cert| cert.as_ref().to_vec())) + .collect::, _>>() .map_err(|_| io::Error::new(io::ErrorKind::Other, "rustls error reading certs"))?; - let mut certs = certs.drain(..); - let leaf = certs.next().expect("no leaf cert in pemfile"); - let intermediates = certs.collect(); Ok(Certificates { leaf, @@ -62,11 +68,14 @@ impl Certificates { }) } - pub fn chain(&self) -> Vec { + pub fn chain(&self) -> Vec> { let mut chain = Vec::with_capacity(self.intermediates.len() + 1); chain.push(self.leaf.clone()); chain.extend(self.intermediates.clone()); - chain.into_iter().map(rustls::Certificate).collect() + chain + .into_iter() + .map(rustls::pki_types::CertificateDer::from) + .collect() } pub fn response(&self) -> pb::CertifyResponse { @@ -79,43 +88,49 @@ impl Certificates { } impl Identity { - fn load_key

(p: P) -> rustls::PrivateKey + fn load_key

(p: P) -> rustls::pki_types::PrivateKeyDer<'static> where P: AsRef, { let p8 = fs::read(&p).expect("read key"); - rustls::PrivateKey(p8) + rustls::pki_types::PrivateKeyDer::try_from(p8).expect("decode key") } fn configs( trust_anchors: &str, certs: &Certificates, - key: rustls::PrivateKey, + key: rustls::pki_types::PrivateKeyDer<'static>, ) -> (Arc, Arc) { use std::io::Cursor; let mut roots = rustls::RootCertStore::empty(); - let trust_anchors = - rustls_pemfile::certs(&mut Cursor::new(trust_anchors)).expect("error parsing pemfile"); - let (added, skipped) = roots.add_parsable_certificates(&trust_anchors[..]); + let trust_anchors = rustls_pemfile::certs(&mut Cursor::new(trust_anchors)) + .map(|bytes| bytes.map(CertificateDer::from)) + .collect::, _>>() + .expect("error parsing pemfile"); + let (added, skipped) = roots.add_parsable_certificates(trust_anchors); assert_ne!(added, 0, "trust anchors must include at least one cert"); assert_eq!(skipped, 0, "no certs in pemfile should be invalid"); - let client_config = rustls::ClientConfig::builder() - .with_cipher_suites(TLS_SUPPORTED_CIPHERSUITES) - .with_safe_default_kx_groups() + let mut provider = rustls::crypto::ring::default_provider(); + provider.cipher_suites = TLS_SUPPORTED_CIPHERSUITES.to_vec(); + let provider = Arc::new(provider); + + let client_config = rustls::ClientConfig::builder_with_provider(provider.clone()) .with_protocol_versions(TLS_VERSIONS) .expect("client config must be valid") .with_root_certificates(roots.clone()) .with_no_client_auth(); - let server_config = rustls::ServerConfig::builder() - .with_cipher_suites(TLS_SUPPORTED_CIPHERSUITES) - .with_safe_default_kx_groups() + let client_cert_verifier = + WebPkiClientVerifier::builder_with_provider(Arc::new(roots), provider.clone()) + .allow_unauthenticated() + .build() + .expect("server verifier must be valid"); + + let server_config = rustls::ServerConfig::builder_with_provider(provider) .with_protocol_versions(TLS_VERSIONS) .expect("server config must be valid") - .with_client_cert_verifier(Arc::new( - rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(roots), - )) + .with_client_cert_verifier(client_cert_verifier) .with_single_cert(certs.chain(), key) .unwrap(); diff --git a/linkerd/app/integration/src/lib.rs b/linkerd/app/integration/src/lib.rs index 671fa69e7e..1b19ff5856 100644 --- a/linkerd/app/integration/src/lib.rs +++ b/linkerd/app/integration/src/lib.rs @@ -26,9 +26,9 @@ pub use bytes::{Buf, BufMut, Bytes}; pub use futures::stream::{Stream, StreamExt}; pub use futures::{future, FutureExt, TryFuture, TryFutureExt}; pub use http::{HeaderMap, Request, Response, StatusCode}; -pub use http_body::Body as HttpBody; +pub use http_body::Body; pub use linkerd_app as app; -pub use linkerd_app_core::{drain, Addr}; +pub use linkerd_app_core::{drain, Addr, Error}; pub use linkerd_app_test::*; pub use linkerd_tracing::test::*; use socket2::Socket; @@ -50,8 +50,6 @@ pub use tower::Service; pub const ENV_TEST_PATIENCE_MS: &str = "RUST_TEST_PATIENCE_MS"; pub const DEFAULT_TEST_PATIENCE: Duration = Duration::from_millis(15); -pub type Error = Box; - /// Retry an assertion up to a specified number of times, waiting /// `RUST_TEST_PATIENCE_MS` between retries. /// @@ -73,7 +71,7 @@ pub type Error = Box; macro_rules! assert_eventually { ($cond:expr, retries: $retries:expr, $($arg:tt)+) => { { - use std::{env, u64}; + use std::{env}; use std::str::FromStr; use tokio::time::{Instant, Duration}; use tracing::Instrument as _; @@ -219,15 +217,6 @@ impl Shutdown { pub type ShutdownRx = Pin + Send>>; -/// A channel used to signal when a Client's related connection is running or closed. -pub fn running() -> (oneshot::Sender<()>, Running) { - let (tx, rx) = oneshot::channel(); - let rx = Box::pin(rx.map(|_| ())); - (tx, rx) -} - -pub type Running = Pin + Send + Sync + 'static>>; - pub fn s(bytes: &[u8]) -> &str { ::std::str::from_utf8(bytes).unwrap() } @@ -267,7 +256,7 @@ impl fmt::Display for HumanDuration { pub async fn cancelable( drain: drain::Watch, - f: impl Future> + Send + 'static, + f: impl Future>, ) -> Result<(), E> { tokio::select! { res = f => res, diff --git a/linkerd/app/integration/src/metrics.rs b/linkerd/app/integration/src/metrics.rs index 7faa8a4759..36ac497685 100644 --- a/linkerd/app/integration/src/metrics.rs +++ b/linkerd/app/integration/src/metrics.rs @@ -192,8 +192,8 @@ impl MetricMatch { } pub async fn assert_in(&self, client: &crate::client::Client) { + use std::env; use std::str::FromStr; - use std::{env, u64}; use tokio::time::{Duration, Instant}; use tracing::Instrument as _; const MAX_RETRIES: usize = 5; diff --git a/linkerd/app/integration/src/policy.rs b/linkerd/app/integration/src/policy.rs index 14760ded14..977fee2565 100644 --- a/linkerd/app/integration/src/policy.rs +++ b/linkerd/app/integration/src/policy.rs @@ -2,6 +2,7 @@ use super::*; pub use api::{inbound, outbound}; use api::{inbound::inbound_server_policies_server, outbound::outbound_policies_server}; use futures::stream; +use http_body_util::combinators::UnsyncBoxBody; use linkerd2_proxy_api as api; use parking_lot::Mutex; use std::collections::VecDeque; @@ -34,6 +35,9 @@ pub struct InboundSender(Tx); #[derive(Debug, Clone)] pub struct OutboundSender(Tx); +#[derive(Clone)] +struct RoutesSvc(grpc::service::Routes); + type Tx = mpsc::UnboundedSender>; type Rx = UnboundedReceiverStream>; type WatchStream = Pin> + Send + Sync + 'static>>; @@ -45,6 +49,7 @@ pub fn all_unauthenticated() -> inbound::Server { inbound::proxy_protocol::Detect { timeout: Some(Duration::from_secs(10).try_into().unwrap()), http_routes: vec![], + http_local_rate_limit: None, }, )), }), @@ -118,11 +123,11 @@ pub fn outbound_default(dst: impl ToString) -> outbound::OutboundPolicy { timeout: Some(Duration::from_secs(10).try_into().unwrap()), http1: Some(proxy_protocol::Http1 { routes: vec![route.clone()], - failure_accrual: None, + ..Default::default() }), http2: Some(proxy_protocol::Http2 { routes: vec![route], - failure_accrual: None, + ..Default::default() }), opaque: Some(proxy_protocol::Opaque { routes: vec![outbound_default_opaque_route(dst)], @@ -150,7 +155,7 @@ pub fn outbound_default_http_route(dst: impl ToString) -> outbound::HttpRoute { }], filters: Vec::new(), backends: Some(http_first_available(std::iter::once(backend(dst)))), - request_timeout: None, + ..Default::default() }], } } @@ -167,10 +172,12 @@ pub fn outbound_default_opaque_route(dst: impl ToString) -> outbound::OpaqueRout distribution::FirstAvailable { backends: vec![opaque_route::RouteBackend { backend: Some(backend(dst)), + filters: Vec::new(), }], }, )), }), + filters: Vec::new(), }], } } @@ -214,7 +221,7 @@ pub fn http_first_available( .map(|backend| http_route::RouteBackend { backend: Some(backend), filters: Vec::new(), - request_timeout: None, + ..Default::default() }) .collect(), }, @@ -305,7 +312,7 @@ impl Controller { Server(Arc::new(self.outbound)), )) .into_service(); - controller::run(svc, "support policy controller", None).await + controller::run(RoutesSvc(svc), "support policy controller", None).await } } @@ -506,6 +513,33 @@ impl Inner { } } +// === impl RoutesSvc === + +impl Service> for RoutesSvc { + type Response = + >>>::Response; + type Error = + >>>::Error; + type Future = + >>>::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + let Self(routes) = self; + routes.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + use http_body_util::{combinators::UnsyncBoxBody, BodyExt}; + + let Self(routes) = self; + let req = req.map(|body| { + UnsyncBoxBody::new(body.map_err(|err| grpc::Status::from_error(Box::new(err)))) + }); + + routes.call(req) + } +} + fn grpc_no_results() -> grpc::Status { grpc::Status::new( grpc::Code::NotFound, diff --git a/linkerd/app/integration/src/proxy.rs b/linkerd/app/integration/src/proxy.rs index 9df84451dc..fd7caa2d28 100644 --- a/linkerd/app/integration/src/proxy.rs +++ b/linkerd/app/integration/src/proxy.rs @@ -1,7 +1,9 @@ use super::*; use linkerd_app_core::{ svc::Param, - transport::{listen, orig_dst, Keepalive, ListenAddr, Local, OrigDstAddr, ServerAddr}, + transport::{ + listen, orig_dst, Keepalive, ListenAddr, Local, OrigDstAddr, ServerAddr, UserTimeout, + }, Result, }; use std::{collections::HashSet, thread}; @@ -68,7 +70,7 @@ struct MockDualOrigDst { impl listen::Bind for MockOrigDst where - T: Param + Param, + T: Param + Param + Param, { type Addrs = orig_dst::Addrs; type BoundAddrs = Local; @@ -118,7 +120,7 @@ impl fmt::Debug for MockOrigDst { impl listen::Bind for MockDualOrigDst where - T: Param + Param, + T: Param + Param + Param, { type Addrs = orig_dst::Addrs; type BoundAddrs = (Local, Option>); diff --git a/linkerd/app/integration/src/server.rs b/linkerd/app/integration/src/server.rs index cca6fae2c1..0099e84ef4 100644 --- a/linkerd/app/integration/src/server.rs +++ b/linkerd/app/integration/src/server.rs @@ -1,5 +1,7 @@ -use super::app_core::svc::http::TracingExecutor; +use super::app_core::svc::http::TokioExecutor; use super::*; +use http::{Request, Response}; +use linkerd_app_core::svc::http::BoxBody; use std::{ io, sync::atomic::{AtomicUsize, Ordering}, @@ -12,23 +14,35 @@ pub fn new() -> Server { } pub fn http1() -> Server { - Server::http1() + Server { + routes: Default::default(), + version: Run::Http1, + tls: None, + } } pub fn http1_tls(tls: Arc) -> Server { - Server::http1_tls(tls) + Server { + routes: Default::default(), + version: Run::Http1, + tls: Some(tls), + } } pub fn http2() -> Server { - Server::http2() + Server { + routes: Default::default(), + version: Run::Http2, + tls: None, + } } pub fn http2_tls(tls: Arc) -> Server { - Server::http2_tls(tls) -} - -pub fn tcp() -> tcp::TcpServer { - tcp::server() + Server { + routes: Default::default(), + version: Run::Http2, + tls: Some(tls), + } } pub struct Server { @@ -45,9 +59,8 @@ pub struct Listening { pub(super) http_version: Option, } -type Request = http::Request; -type Response = http::Response; -type RspFuture = Pin> + Send + Sync + 'static>>; +type RspFuture = + Pin, Error>> + Send + 'static>>; impl Listening { pub fn connections(&self) -> usize { @@ -92,29 +105,6 @@ impl Listening { } impl Server { - fn new(run: Run, tls: Option>) -> Self { - Server { - routes: HashMap::new(), - version: run, - tls, - } - } - fn http1() -> Self { - Server::new(Run::Http1, None) - } - - fn http1_tls(tls: Arc) -> Self { - Server::new(Run::Http1, Some(tls)) - } - - fn http2() -> Self { - Server::new(Run::Http2, None) - } - - fn http2_tls(tls: Arc) -> Self { - Server::new(Run::Http2, Some(tls)) - } - /// Return a string body as a 200 OK response, with the string as /// the response body. pub fn route(mut self, path: &str, resp: &str) -> Self { @@ -126,11 +116,11 @@ impl Server { /// to send back. pub fn route_fn(self, path: &str, cb: F) -> Self where - F: Fn(Request) -> Response + Send + Sync + 'static, + F: Fn(Request) -> Response + Send + Sync + 'static, { self.route_async(path, move |req| { let res = cb(req); - async move { Ok::<_, BoxError>(res) } + async move { Ok::<_, Error>(res) } }) } @@ -138,9 +128,9 @@ impl Server { /// a response to send back. pub fn route_async(mut self, path: &str, cb: F) -> Self where - F: Fn(Request) -> U + Send + Sync + 'static, - U: TryFuture + Send + Sync + 'static, - U::Error: Into + Send + 'static, + F: Fn(Request) -> U + Send + Sync + 'static, + U: TryFuture> + Send + 'static, + U::Error: Into + Send + 'static, { let func = move |req| Box::pin(cb(req).map_err(Into::into)) as RspFuture; self.routes.insert(path.into(), Route(Box::new(func))); @@ -148,16 +138,17 @@ impl Server { } pub fn route_with_latency(self, path: &str, resp: &str, latency: Duration) -> Self { - let resp = Bytes::from(resp.to_string()); + let body = resp.to_owned(); self.route_async(path, move |_| { - let resp = resp.clone(); + let body = body.clone(); async move { tokio::time::sleep(latency).await; - Ok::<_, BoxError>( + Ok::<_, Error>( http::Response::builder() - .status(200) - .body(hyper::Body::from(resp.clone())) - .unwrap(), + .status(StatusCode::OK) + .body(http_body_util::Full::new(Bytes::from(body.clone()))) + .unwrap() + .map(BoxBody::new), ) } }) @@ -193,12 +184,7 @@ impl Server { drain.clone(), async move { tracing::info!("support server running"); - let mut new_svc = NewSvc(Arc::new(self.routes)); - let mut http = hyper::server::conn::Http::new().with_executor(TracingExecutor); - match self.version { - Run::Http1 => http.http1_only(true), - Run::Http2 => http.http2_only(true), - }; + let svc = Svc(Arc::new(self.routes)); if let Some(delay) = delay { let _ = listening_tx.take().unwrap().send(()); delay.await; @@ -217,27 +203,41 @@ impl Server { let sock = accept_connection(sock, tls_config.clone()) .instrument(span.clone()) .await?; - let http = http.clone(); let srv_conn_count = srv_conn_count.clone(); - let svc = new_svc.call(()); + let svc = svc.clone(); let f = async move { tracing::trace!("serving..."); - let svc = svc.await; - tracing::trace!("service acquired"); srv_conn_count.fetch_add(1, Ordering::Release); - let svc = svc.map_err(|e| { - tracing::error!("support/server new_service error: {}", e) - })?; - let result = http - .serve_connection(sock, svc) - .await - .map_err(|e| tracing::error!("support/server error: {}", e)); + use hyper_util::{rt::TokioIo, service::TowerToHyperService}; + let (sock, svc) = (TokioIo::new(sock), TowerToHyperService::new(svc)); + let result = match self.version { + Run::Http1 => hyper::server::conn::http1::Builder::new() + .timer(hyper_util::rt::TokioTimer::new()) + .serve_connection(sock, svc) + .await + .map_err(|e| tracing::error!("support/server error: {}", e)), + Run::Http2 => { + hyper::server::conn::http2::Builder::new(TokioExecutor::new()) + .timer(hyper_util::rt::TokioTimer::new()) + .serve_connection(sock, svc) + .await + .map_err(|e| tracing::error!("support/server error: {}", e)) + } + }; tracing::trace!(?result, "serve done"); result }; - tokio::spawn( - cancelable(drain.clone(), f).instrument(span.clone().or_current()), - ); + // let fut = Box::pin(cancelable(drain.clone(), f).instrument(span.clone().or_current())) + let drain = drain.clone(); + tokio::spawn(async move { + tokio::select! { + res = f => res, + _ = drain.signaled() => { + tracing::debug!("canceled!"); + Ok(()) + } + } + }); } } .instrument( @@ -266,17 +266,19 @@ pub(super) enum Run { Http2, } -struct Route(Box RspFuture + Send + Sync>); +struct Route(Box) -> RspFuture + Send + Sync>); impl Route { fn string(body: &str) -> Route { - let body = Bytes::from(body.to_string()); + let body = http_body_util::Full::new(Bytes::from(body.to_string())); Route(Box::new(move |_| { + let body = body.clone(); Box::pin(future::ok( http::Response::builder() - .status(200) - .body(hyper::Body::from(body.clone())) - .unwrap(), + .status(StatusCode::OK) + .body(body) + .unwrap() + .map(BoxBody::new), )) })) } @@ -288,58 +290,53 @@ impl std::fmt::Debug for Route { } } -type BoxError = Box; - -#[derive(Debug)] +#[derive(Clone, Debug)] struct Svc(Arc>); impl Svc { - fn route(&mut self, req: Request) -> RspFuture { + fn route( + &mut self, + req: Request, + ) -> impl Future, crate::app_core::Error>> + Send + where + B: Body + Send + Sync + 'static, + B::Data: Send + 'static, + B::Error: std::error::Error + Send + Sync + 'static, + { match self.0.get(req.uri().path()) { Some(Route(ref func)) => { tracing::trace!(path = %req.uri().path(), "found route for path"); - func(req) + func(req.map(BoxBody::new)) } None => { tracing::warn!("server 404: {:?}", req.uri().path()); - let res = http::Response::builder() - .status(404) - .body(Default::default()) - .unwrap(); - Box::pin(async move { Ok(res) }) + Box::pin(futures::future::ok( + http::Response::builder() + .status(StatusCode::NOT_FOUND) + .body(BoxBody::empty()) + .unwrap(), + )) } } } } -impl tower::Service for Svc { - type Response = Response; - type Error = BoxError; +impl tower::Service> for Svc +where + B: Body + Send + Sync + 'static, + B::Data: Send, + B::Error: std::error::Error + Send + Sync, +{ + type Response = Response; + type Error = Error; type Future = RspFuture; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, req: Request) -> Self::Future { - self.route(req) - } -} - -#[derive(Debug)] -struct NewSvc(Arc>); - -impl Service<()> for NewSvc { - type Response = Svc; - type Error = ::std::io::Error; - type Future = future::Ready>; - - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, _: ()) -> Self::Future { - future::ok(Svc(Arc::clone(&self.0))) + fn call(&mut self, req: Request) -> Self::Future { + Box::pin(self.route(req)) } } @@ -357,7 +354,6 @@ async fn accept_connection( _running: None, }) } - None => Ok(RunningIo { io: Box::pin(io), abs_form: false, diff --git a/linkerd/app/integration/src/tap.rs b/linkerd/app/integration/src/tap.rs index 14a2a84de1..019cf11698 100644 --- a/linkerd/app/integration/src/tap.rs +++ b/linkerd/app/integration/src/tap.rs @@ -2,6 +2,7 @@ use super::*; use futures::stream; use http_body::Body; use linkerd2_proxy_api::tap as pb; +use linkerd_app_core::svc::http::BoxBody; pub fn client(addr: SocketAddr) -> Client { let api = pb::tap_client::TapClient::new(SyncSvc(client::http2(addr, "localhost"))); @@ -106,7 +107,6 @@ pub trait TapEventExt { //fn id(&self) -> (u32, u64); fn event(&self) -> &pb::tap_event::http::Event; - fn request_init_method(&self) -> String; fn request_init_authority(&self) -> &str; fn request_init_path(&self) -> &str; @@ -134,16 +134,6 @@ impl TapEventExt for pb::TapEvent { } } - fn request_init_method(&self) -> String { - match self.event() { - pb::tap_event::http::Event::RequestInit(_ev) => { - //TODO: ugh - unimplemented!("method"); - } - e => panic!("not RequestInit event: {:?}", e), - } - } - fn request_init_authority(&self) -> &str { match self.event() { pb::tap_event::http::Event::RequestInit(ev) => &ev.authority, @@ -188,15 +178,14 @@ impl TapEventExt for pb::TapEvent { struct SyncSvc(client::Client); type ResponseFuture = - Pin, String>> + Send>>; + Pin, String>> + Send>>; impl tower::Service> for SyncSvc where - B: Body + Send + 'static, - B::Data: Send + 'static, - B::Error: Send + 'static, + B: Body, + B::Error: std::fmt::Debug, { - type Response = http::Response; + type Response = http::Response; type Error = String; type Future = ResponseFuture; @@ -205,20 +194,31 @@ where } fn call(&mut self, req: http::Request) -> Self::Future { - // this is okay to do because the body should always be complete, we - // just can't prove it. - let req = futures::executor::block_on(async move { - let (parts, body) = req.into_parts(); - let body = match hyper::body::to_bytes(body).await { - Ok(body) => body, - Err(_) => unreachable!("body should not fail"), - }; - http::Request::from_parts(parts, body) - }); - Box::pin( - self.0 - .send_req(req.map(Into::into)) - .map_err(|err| err.to_string()), - ) + use http_body_util::Full; + let Self(client) = self; + let req = req.map(Self::collect_body).map(Full::new).map(BoxBody::new); + let fut = client.send_req(req).map_err(|err| err.to_string()); + Box::pin(fut) + } +} + +impl SyncSvc { + /// Collects the given [`Body`], returning a [`Bytes`]. + /// + /// NB: This blocks the current thread until the provided body has been collected. This is + /// an acceptable practice in test code for the sake of simplicitly, because we will always + /// provide [`SyncSvc`] with bodies that are complete. + fn collect_body(body: B) -> Bytes + where + B: Body, + B::Error: std::fmt::Debug, + { + futures::executor::block_on(async move { + use http_body_util::BodyExt; + body.collect() + .await + .expect("body should not fail") + .to_bytes() + }) } } diff --git a/linkerd/app/integration/src/tcp.rs b/linkerd/app/integration/src/tcp.rs index ff487f8ad4..b44504e187 100644 --- a/linkerd/app/integration/src/tcp.rs +++ b/linkerd/app/integration/src/tcp.rs @@ -1,10 +1,11 @@ use super::*; -use std::collections::VecDeque; -use std::io; -use std::net::TcpListener as StdTcpListener; -use std::sync::atomic::{AtomicUsize, Ordering}; -use tokio::net::TcpStream; -use tokio::task::JoinHandle; +use std::{ + collections::VecDeque, + io, + net::TcpListener as StdTcpListener, + sync::atomic::{AtomicUsize, Ordering}, +}; +use tokio::{net::TcpStream, task::JoinHandle}; type TcpConnSender = mpsc::UnboundedSender<( Option>, @@ -148,10 +149,6 @@ impl TcpServer { } impl TcpConn { - pub fn target_addr(&self) -> SocketAddr { - self.addr - } - pub async fn read(&self) -> Vec { self.try_read() .await diff --git a/linkerd/app/integration/src/test_env.rs b/linkerd/app/integration/src/test_env.rs index da7886cd96..c364998fb9 100644 --- a/linkerd/app/integration/src/test_env.rs +++ b/linkerd/app/integration/src/test_env.rs @@ -1,6 +1,7 @@ use linkerd_app::env::{EnvError, Strings}; use std::collections::HashMap; +/// An implementation of [`Strings`] that wraps for use in tests. #[derive(Clone, Default)] pub struct TestEnv { values: HashMap<&'static str, String>, @@ -9,18 +10,22 @@ pub struct TestEnv { // === impl TestEnv === impl TestEnv { + /// Puts a new key-value pair in the test environment. pub fn put(&mut self, key: &'static str, value: String) { self.values.insert(key, value); } + /// Returns true if this environment contains the given key. pub fn contains_key(&self, key: &'static str) -> bool { self.values.contains_key(key) } + /// Removes a new key-value pair from the test environment. pub fn remove(&mut self, key: &'static str) { self.values.remove(key); } + /// Extends this test environment using the other given [`TestEnv`]. pub fn extend(&mut self, other: TestEnv) { self.values.extend(other.values); } diff --git a/linkerd/app/integration/src/tests/client_policy.rs b/linkerd/app/integration/src/tests/client_policy.rs index 50de7fa640..ef04b0d3e5 100644 --- a/linkerd/app/integration/src/tests/client_policy.rs +++ b/linkerd/app/integration/src/tests/client_policy.rs @@ -63,11 +63,11 @@ async fn empty_http1_route() { hosts: Vec::new(), rules: Vec::new(), }], - failure_accrual: None, + ..Default::default() }), http2: Some(proxy_protocol::Http2 { routes: vec![policy::outbound_default_http_route(&dst)], - failure_accrual: None, + ..Default::default() }), opaque: Some(proxy_protocol::Opaque { routes: vec![policy::outbound_default_opaque_route(&dst)], @@ -148,7 +148,7 @@ async fn empty_http2_route() { timeout: Some(Duration::from_secs(10).try_into().unwrap()), http1: Some(proxy_protocol::Http1 { routes: vec![policy::outbound_default_http_route(&dst)], - failure_accrual: None, + ..Default::default() }), http2: Some(proxy_protocol::Http2 { routes: vec![outbound::HttpRoute { @@ -156,7 +156,7 @@ async fn empty_http2_route() { hosts: Vec::new(), rules: Vec::new(), }], - failure_accrual: None, + ..Default::default() }), opaque: Some(proxy_protocol::Opaque { routes: vec![policy::outbound_default_opaque_route(&dst)], @@ -223,7 +223,7 @@ async fn header_based_routing() { backends: Some(policy::http_first_available(std::iter::once( policy::backend(dst), ))), - request_timeout: None, + ..Default::default() }; let route = outbound::HttpRoute { @@ -237,7 +237,7 @@ async fn header_based_routing() { backends: Some(policy::http_first_available(std::iter::once( policy::backend(&dst_world), ))), - request_timeout: None, + ..Default::default() }, // x-hello-city: sf | x-hello-city: san francisco mk_header_rule( @@ -266,11 +266,11 @@ async fn header_based_routing() { timeout: Some(Duration::from_secs(10).try_into().unwrap()), http1: Some(proxy_protocol::Http1 { routes: vec![route.clone()], - failure_accrual: None, + ..Default::default() }), http2: Some(proxy_protocol::Http2 { routes: vec![route], - failure_accrual: None, + ..Default::default() }), opaque: Some(proxy_protocol::Opaque { routes: vec![policy::outbound_default_opaque_route(&dst_world)], @@ -400,8 +400,7 @@ async fn path_based_routing() { backends: Some(policy::http_first_available(std::iter::once( policy::backend(dst), ))), - - request_timeout: None, + ..Default::default() }; let route = outbound::HttpRoute { @@ -415,7 +414,7 @@ async fn path_based_routing() { backends: Some(policy::http_first_available(std::iter::once( policy::backend(&dst_world), ))), - request_timeout: None, + ..Default::default() }, // /goodbye/* mk_path_rule( @@ -449,11 +448,11 @@ async fn path_based_routing() { timeout: Some(Duration::from_secs(10).try_into().unwrap()), http1: Some(proxy_protocol::Http1 { routes: vec![route.clone()], - failure_accrual: None, + ..Default::default() }), http2: Some(proxy_protocol::Http2 { routes: vec![route], - failure_accrual: None, + ..Default::default() }), opaque: Some(proxy_protocol::Opaque { routes: vec![policy::outbound_default_opaque_route(&dst_world)], diff --git a/linkerd/app/integration/src/tests/discovery.rs b/linkerd/app/integration/src/tests/discovery.rs index 67caf75c73..49604da6d3 100644 --- a/linkerd/app/integration/src/tests/discovery.rs +++ b/linkerd/app/integration/src/tests/discovery.rs @@ -481,16 +481,17 @@ mod http2 { let res = fut.await.expect("beta response"); assert_eq!(res.status(), http::StatusCode::OK); - assert_eq!( - String::from_utf8( - hyper::body::to_bytes(res.into_body()) - .await - .unwrap() - .to_vec(), - ) - .unwrap(), - "beta" - ); + + let body = { + let body = res.into_body(); + let body = http_body_util::BodyExt::collect(body) + .await + .unwrap() + .to_bytes() + .to_vec(); + String::from_utf8(body).unwrap() + }; + assert_eq!(body, "beta"); } } diff --git a/linkerd/app/integration/src/tests/identity.rs b/linkerd/app/integration/src/tests/identity.rs index 38048bbe79..cdc26921c0 100644 --- a/linkerd/app/integration/src/tests/identity.rs +++ b/linkerd/app/integration/src/tests/identity.rs @@ -24,7 +24,7 @@ async fn nonblocking_identity_detection() { let msg1 = "custom tcp hello\n"; let msg2 = "custom tcp bye"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept(move |read| { assert_eq!(read, msg1.as_bytes()); msg2 @@ -33,7 +33,7 @@ async fn nonblocking_identity_detection() { .await; let proxy = proxy.inbound(srv).run_with_test_env(env).await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); // Create an idle connection and then an active connection. Ensure that // protocol detection on the idle connection does not block communication on diff --git a/linkerd/app/integration/src/tests/profile_dst_overrides.rs b/linkerd/app/integration/src/tests/profile_dst_overrides.rs index 5cb6e69311..431a225548 100644 --- a/linkerd/app/integration/src/tests/profile_dst_overrides.rs +++ b/linkerd/app/integration/src/tests/profile_dst_overrides.rs @@ -1,5 +1,6 @@ use crate::*; use linkerd2_proxy_api::destination as pb; +use linkerd_app_core::svc::http::BoxBody; use std::sync::atomic::{AtomicUsize, Ordering}; struct Service { @@ -14,11 +15,17 @@ impl Service { let counter = response_counter.clone(); let svc = server::http1() .route_fn("/load-profile", |_| { - Response::builder().status(201).body("".into()).unwrap() + Response::builder() + .status(201) + .body(BoxBody::empty()) + .unwrap() }) .route_fn("/", move |_req| { counter.fetch_add(1, Ordering::SeqCst); - Response::builder().status(200).body(name.into()).unwrap() + Response::builder() + .status(200) + .body(BoxBody::from_static(name)) + .unwrap() }) .run() .await; diff --git a/linkerd/app/integration/src/tests/profiles.rs b/linkerd/app/integration/src/tests/profiles.rs index 8505bc1a60..a35b240127 100644 --- a/linkerd/app/integration/src/tests/profiles.rs +++ b/linkerd/app/integration/src/tests/profiles.rs @@ -1,3 +1,5 @@ +use linkerd_app_core::svc::http::BoxBody; + use crate::*; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -71,7 +73,10 @@ impl TestBuilder { // This route is just called by the test setup, to trigger the proxy // to start fetching the ServiceProfile. .route_fn("/load-profile", |_| { - Response::builder().status(201).body("".into()).unwrap() + Response::builder() + .status(201) + .body(BoxBody::empty()) + .unwrap() }); if self.default_routes { @@ -121,7 +126,7 @@ impl TestBuilder { ::std::thread::sleep(Duration::from_secs(1)); Response::builder() .status(200) - .body("slept".into()) + .body(BoxBody::from_static("slept")) .unwrap() }) .route_async("/0.5", move |req| { @@ -129,14 +134,20 @@ impl TestBuilder { async move { // Read the entire body before responding, so that the // client doesn't fail when writing it out. - let _body = hyper::body::to_bytes(req.into_body()).await; - tracing::debug!(body = ?_body.as_ref().map(|body| body.len()), "recieved body"); + let body = http_body_util::BodyExt::collect(req.into_body()) + .await + .map(http_body_util::Collected::to_bytes); + let bytes = body.as_ref().map(Bytes::len); + tracing::debug!(?bytes, "recieved body"); Ok::<_, Error>(if fail { - Response::builder().status(533).body("nope".into()).unwrap() + Response::builder() + .status(533) + .body(BoxBody::from_static("nope")) + .unwrap() } else { Response::builder() .status(200) - .body("retried".into()) + .body(BoxBody::from_static("retried")) .unwrap() }) } @@ -144,11 +155,14 @@ impl TestBuilder { .route_fn("/0.5/sleep", move |_req| { ::std::thread::sleep(Duration::from_secs(1)); if counter2.fetch_add(1, Ordering::Relaxed) % 2 == 0 { - Response::builder().status(533).body("nope".into()).unwrap() + Response::builder() + .status(533) + .body(BoxBody::from_static("nope")) + .unwrap() } else { Response::builder() .status(200) - .body("retried".into()) + .body(BoxBody::from_static("retried")) .unwrap() } }) @@ -156,12 +170,15 @@ impl TestBuilder { if counter3.fetch_add(1, Ordering::Relaxed) % 2 == 0 { Response::builder() .status(533) - .body(vec![b'x'; 1024 * 100].into()) + .body(BoxBody::new(http_body_util::Full::new(Bytes::from(vec![ + b'x'; + 1024 * 100 + ])))) .unwrap() } else { Response::builder() .status(200) - .body("retried".into()) + .body(BoxBody::from_static("retried")) .unwrap() } }) @@ -182,6 +199,8 @@ impl TestBuilder { } mod cross_version { + use std::convert::Infallible; + use super::*; pub(super) async fn retry_if_profile_allows(version: server::Server) { @@ -245,7 +264,7 @@ mod cross_version { let req = client .request_builder("/0.5") .method(http::Method::POST) - .body("req has a body".into()) + .body(BoxBody::from_static("req has a body")) .unwrap(); let res = client.request_body(req).await; assert_eq!(res.status(), 200); @@ -266,7 +285,7 @@ mod cross_version { let req = client .request_builder("/0.5") .method(http::Method::PUT) - .body("req has a body".into()) + .body(BoxBody::from_static("req has a body")) .unwrap(); let res = client.request_body(req).await; assert_eq!(res.status(), 200); @@ -284,13 +303,14 @@ mod cross_version { .await; let client = test.client; - let (mut tx, body) = hyper::body::Body::channel(); + let (mut tx, body) = http_body_util::channel::Channel::::new(1024); let req = client .request_builder("/0.5") .method("POST") .body(body) .unwrap(); - let res = tokio::spawn(async move { client.request_body(req).await }); + let fut = client.send_req(req); + let res = tokio::spawn(fut); tx.send_data(Bytes::from_static(b"hello")) .await .expect("the whole body should be read"); @@ -298,7 +318,7 @@ mod cross_version { .await .expect("the whole body should be read"); drop(tx); - let res = res.await.unwrap(); + let res = res.await.unwrap().unwrap(); assert_eq!(res.status(), 200); } @@ -361,7 +381,9 @@ mod cross_version { let req = client .request_builder("/0.5") .method("POST") - .body(hyper::Body::from(&[1u8; 64 * 1024 + 1][..])) + .body(BoxBody::new(http_body_util::Full::new(Bytes::from( + &[1u8; 64 * 1024 + 1][..], + )))) .unwrap(); let res = client.request_body(req).await; assert_eq!(res.status(), 533); @@ -383,13 +405,14 @@ mod cross_version { .await; let client = test.client; - let (mut tx, body) = hyper::body::Body::channel(); + let (mut tx, body) = http_body_util::channel::Channel::::new(1024); let req = client .request_builder("/0.5") .method("POST") .body(body) .unwrap(); - let res = tokio::spawn(async move { client.request_body(req).await }); + let fut = client.send_req(req); + let res = tokio::spawn(fut); // send a 32k chunk tx.send_data(Bytes::from(&[1u8; 32 * 1024][..])) .await @@ -403,7 +426,7 @@ mod cross_version { .await .expect("the whole body should be read"); drop(tx); - let res = res.await.unwrap(); + let res = res.await.unwrap().unwrap(); assert_eq!(res.status(), 533); } @@ -587,6 +610,8 @@ mod http2 { } mod grpc_retry { + use std::convert::Infallible; + use super::*; use http::header::{HeaderName, HeaderValue}; static GRPC_STATUS: HeaderName = HeaderName::from_static("grpc-status"); @@ -610,7 +635,7 @@ mod grpc_retry { let rsp = Response::builder() .header(GRPC_STATUS.clone(), header) .status(200) - .body(hyper::Body::empty()) + .body(BoxBody::empty()) .unwrap(); tracing::debug!(headers = ?rsp.headers()); rsp @@ -658,9 +683,16 @@ mod grpc_retry { let mut trailers = HeaderMap::with_capacity(1); trailers.insert(GRPC_STATUS.clone(), status); tracing::debug!(?trailers); - let (mut tx, body) = hyper::body::Body::channel(); + let (mut tx, body) = + http_body_util::channel::Channel::::new(1024); tx.send_trailers(trailers).await.unwrap(); - Ok::<_, Error>(Response::builder().status(200).body(body).unwrap()) + Ok::<_, Error>( + Response::builder() + .status(200) + .body(body) + .unwrap() + .map(BoxBody::new), + ) } } }); @@ -701,10 +733,17 @@ mod grpc_retry { let mut trailers = HeaderMap::with_capacity(1); trailers.insert(GRPC_STATUS.clone(), GRPC_STATUS_OK.clone()); tracing::debug!(?trailers); - let (mut tx, body) = hyper::body::Body::channel(); + let (mut tx, body) = + http_body_util::channel::Channel::::new(1024); tx.send_data("hello world".into()).await.unwrap(); tx.send_trailers(trailers).await.unwrap(); - Ok::<_, Error>(Response::builder().status(200).body(body).unwrap()) + Ok::<_, Error>( + Response::builder() + .status(200) + .body(body) + .unwrap() + .map(BoxBody::new), + ) } } }); @@ -749,13 +788,20 @@ mod grpc_retry { let mut trailers = HeaderMap::with_capacity(1); trailers.insert(GRPC_STATUS.clone(), GRPC_STATUS_OK.clone()); tracing::debug!(?trailers); - let (mut tx, body) = hyper::body::Body::channel(); + let (mut tx, body) = + http_body_util::channel::Channel::::new(1024); tokio::spawn(async move { tx.send_data("hello".into()).await.unwrap(); tx.send_data("world".into()).await.unwrap(); tx.send_trailers(trailers).await.unwrap(); }); - Ok::<_, Error>(Response::builder().status(200).body(body).unwrap()) + Ok::<_, Error>( + Response::builder() + .status(200) + .body(body) + .unwrap() + .map(BoxBody::new), + ) } } }); @@ -787,21 +833,38 @@ mod grpc_retry { assert_eq!(retries.load(Ordering::Relaxed), 1); } - async fn data(body: &mut hyper::Body) -> Bytes { + async fn data(body: &mut B) -> B::Data + where + B: http_body::Body + Unpin, + B::Data: std::fmt::Debug, + B::Error: std::fmt::Debug, + { + use http_body_util::BodyExt; let data = body - .data() + .frame() .await - .expect("body data frame must not be eaten") - .unwrap(); + .expect("a result") + .expect("a frame") + .into_data() + .expect("a chunk of data"); tracing::info!(?data); data } - async fn trailers(body: &mut hyper::Body) -> http::HeaderMap { + + async fn trailers(body: &mut B) -> http::HeaderMap + where + B: http_body::Body + Unpin, + B::Error: std::fmt::Debug, + { + use http_body_util::BodyExt; let trailers = body - .trailers() + .frame() .await - .expect("trailers future should not fail") - .expect("response should have trailers"); + .expect("a result") + .expect("a frame") + .into_trailers() + .ok() + .expect("a trailers frame"); tracing::info!(?trailers); trailers } diff --git a/linkerd/app/integration/src/tests/shutdown.rs b/linkerd/app/integration/src/tests/shutdown.rs index f07f0672da..e8adcfa14e 100644 --- a/linkerd/app/integration/src/tests/shutdown.rs +++ b/linkerd/app/integration/src/tests/shutdown.rs @@ -1,3 +1,5 @@ +use linkerd_app_core::svc::http::BoxBody; + use crate::*; #[tokio::test] @@ -26,10 +28,13 @@ async fn h2_exercise_goaways_connections() { let (shdn, rx) = shutdown_signal(); - let body = Bytes::from(vec![b'1'; RESPONSE_SIZE]); + let body = http_body_util::Full::new(Bytes::from(vec![b'1'; RESPONSE_SIZE])); let srv = server::http2() .route_fn("/", move |_req| { - Response::builder().body(body.clone().into()).unwrap() + Response::builder() + .body(body.clone()) + .unwrap() + .map(BoxBody::new) }) .run() .await; @@ -48,8 +53,10 @@ async fn h2_exercise_goaways_connections() { let bodies = resps .into_iter() - .map(|resp| { - hyper::body::aggregate(resp.into_body()) + .map(Response::into_body) + .map(|body| { + http_body_util::BodyExt::collect(body) + .map_ok(http_body_util::Collected::aggregate) // Make sure the bodies weren't cut off .map_ok(|buf| assert_eq!(buf.remaining(), RESPONSE_SIZE)) }) @@ -70,7 +77,7 @@ async fn http1_closes_idle_connections() { let (shdn, rx) = shutdown_signal(); const RESPONSE_SIZE: usize = 1024 * 16; - let body = Bytes::from(vec![b'1'; RESPONSE_SIZE]); + let body = http_body_util::Full::new(Bytes::from(vec![b'1'; RESPONSE_SIZE])); let shdn = Arc::new(Mutex::new(Some(shdn))); let srv = server::http1() @@ -78,7 +85,10 @@ async fn http1_closes_idle_connections() { // Trigger a shutdown signal while the request is made // but a response isn't returned yet. shdn.lock().take().expect("only 1 request").signal(); - Response::builder().body(body.clone().into()).unwrap() + Response::builder() + .body(body.clone()) + .unwrap() + .map(BoxBody::new) }) .run() .await; @@ -99,7 +109,7 @@ async fn tcp_waits_for_proxies_to_close() { let msg1 = "custom tcp hello\n"; let msg2 = "custom tcp bye"; - let srv = server::tcp() + let srv = crate::tcp::server() // Trigger a shutdown while TCP stream is busy .accept_fut(move |mut sock| { async move { @@ -115,7 +125,7 @@ async fn tcp_waits_for_proxies_to_close() { .await; let proxy = proxy::new().inbound(srv).shutdown_signal(rx).run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; diff --git a/linkerd/app/integration/src/tests/tap.rs b/linkerd/app/integration/src/tests/tap.rs index 8736953c31..31d14929d6 100644 --- a/linkerd/app/integration/src/tests/tap.rs +++ b/linkerd/app/integration/src/tests/tap.rs @@ -253,10 +253,13 @@ async fn grpc_headers_end() { .unwrap(); assert_eq!(res.status(), 200); assert_eq!(res.headers()["grpc-status"], "1"); - assert_eq!( - hyper::body::to_bytes(res.into_body()).await.unwrap().len(), - 0 - ); + let body = res.into_body(); + let bytes = http_body_util::BodyExt::collect(body) + .await + .unwrap() + .to_bytes() + .len(); + assert_eq!(bytes, 0); let event = events.skip(2).next().await.expect("2nd").expect("stream"); assert_eq!(event.response_end_eos_grpc(), 1); diff --git a/linkerd/app/integration/src/tests/telemetry.rs b/linkerd/app/integration/src/tests/telemetry.rs index 3b466a0a0c..fc74455097 100644 --- a/linkerd/app/integration/src/tests/telemetry.rs +++ b/linkerd/app/integration/src/tests/telemetry.rs @@ -57,9 +57,7 @@ impl Fixture { let client = client::new(proxy.inbound, "tele.test.svc.cluster.local"); let tcp_dst_labels = metrics::labels().label("direction", "inbound"); let tcp_src_labels = tcp_dst_labels.clone().label("target_addr", orig_dst); - let labels = tcp_dst_labels - .clone() - .label("authority", "tele.test.svc.cluster.local"); + let labels = tcp_dst_labels.clone().label("target_port", orig_dst.port()); let tcp_src_labels = tcp_src_labels.label("peer", "src"); let tcp_dst_labels = tcp_dst_labels.label("peer", "dst"); Fixture { @@ -121,7 +119,7 @@ impl TcpFixture { const BYE_MSG: &'static str = "custom tcp bye"; async fn server() -> server::Listening { - server::tcp() + crate::tcp::server() .accept(move |read| { assert_eq!(read, Self::HELLO_MSG.as_bytes()); TcpFixture::BYE_MSG @@ -147,7 +145,7 @@ impl TcpFixture { .run() .await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let metrics = client::http1(proxy.admin, "localhost"); let src_labels = metrics::labels() @@ -186,7 +184,7 @@ impl TcpFixture { .run() .await; - let client = client::tcp(proxy.outbound); + let client = crate::tcp::client(proxy.outbound); let metrics = client::http1(proxy.admin, "localhost"); let src_labels = metrics::labels() @@ -294,7 +292,7 @@ async fn metrics_endpoint_outbound_response_count() { test_http_count("response_total", Fixture::outbound()).await } -async fn test_http_count(metric: &str, fixture: impl Future) { +async fn test_http_count(metric_name: &str, fixture: impl Future) { let _trace = trace_init(); let Fixture { client, @@ -307,9 +305,13 @@ async fn test_http_count(metric: &str, fixture: impl Future) { .. } = fixture.await; - let metric = labels.metric(metric); + let metric = labels.metric(metric_name); - assert!(metric.is_not_in(metrics.get("/metrics").await)); + let scrape = metrics.get("/metrics").await; + assert!( + metric.is_not_in(scrape), + "{metric:?} should not be in /metrics" + ); info!("client.get(/)"); assert_eq!(client.get("/").await, "hello"); @@ -321,6 +323,7 @@ async fn test_http_count(metric: &str, fixture: impl Future) { mod response_classification { use super::Fixture; use crate::*; + use linkerd_app_core::svc::http::BoxBody; use tracing::info; const REQ_STATUS_HEADER: &str = "x-test-status-requested"; @@ -355,7 +358,7 @@ mod response_classification { // TODO: tests for grpc statuses unreachable!("not called in test") } else { - Response::new("".into()) + Response::new(BoxBody::empty()) }; *rsp.status_mut() = status; rsp @@ -1304,9 +1307,13 @@ async fn metrics_compression() { ); } - let mut body = hyper::body::aggregate(resp.into_body()) - .await - .expect("response body concat"); + let mut body = { + let body = resp.into_body(); + http_body_util::BodyExt::collect(body) + .await + .expect("response body concat") + .aggregate() + }; let mut decoder = flate2::read::GzDecoder::new(std::io::Cursor::new( body.copy_to_bytes(body.remaining()), )); diff --git a/linkerd/app/integration/src/tests/telemetry/log_stream.rs b/linkerd/app/integration/src/tests/telemetry/log_stream.rs index 0e2e962cdf..e2fb7ed37a 100644 --- a/linkerd/app/integration/src/tests/telemetry/log_stream.rs +++ b/linkerd/app/integration/src/tests/telemetry/log_stream.rs @@ -177,7 +177,7 @@ async fn get_log_stream( client .request_builder(&format!("{}?{}", PATH, filter)) .method(http::Method::GET) - .body(hyper::Body::from(filter)) + .body(http_body_util::Full::new(Bytes::from(filter))) .unwrap(), ) .await; @@ -199,7 +199,7 @@ async fn query_log_stream( client .request_builder(PATH) .method("QUERY") - .body(hyper::Body::from(filter)) + .body(http_body_util::Full::new(Bytes::from(filter))) .unwrap(), ) .await; @@ -210,17 +210,26 @@ async fn query_log_stream( /// Spawns a task to collect all the logs in a streaming body and parse them as /// JSON. -fn collect_logs( - mut body: hyper::Body, -) -> (JoinHandle>, oneshot::Sender<()>) { +fn collect_logs(mut body: B) -> (JoinHandle>, oneshot::Sender<()>) +where + B: Body + Send + Unpin + 'static, + B::Error: std::error::Error, +{ + use http_body_util::BodyExt; let (done_tx, done_rx) = oneshot::channel(); let result = tokio::spawn(async move { let mut result = Vec::new(); let logs = &mut result; let fut = async move { - while let Some(res) = body.data().await { + while let Some(res) = body.frame().await { let chunk = match res { - Ok(chunk) => chunk, + Ok(frame) => { + if let Ok(data) = frame.into_data() { + data + } else { + break; + } + } Err(e) => { println!("body failed: {}", e); break; diff --git a/linkerd/app/integration/src/tests/telemetry/tcp_errors.rs b/linkerd/app/integration/src/tests/telemetry/tcp_errors.rs index 6aed1daa46..fdfaf8ba7c 100644 --- a/linkerd/app/integration/src/tests/telemetry/tcp_errors.rs +++ b/linkerd/app/integration/src/tests/telemetry/tcp_errors.rs @@ -113,7 +113,7 @@ async fn inbound_timeout() { let _trace = trace_init(); let (proxy, metrics) = Test::default().run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let _tcp_client = client.connect().await; @@ -133,7 +133,7 @@ async fn inbound_io_err() { let _trace = trace_init(); let (proxy, metrics) = Test::default().run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; @@ -167,7 +167,7 @@ async fn inbound_success() { "foo.ns1.svc.cluster.local", client_config.clone(), ); - let no_tls_client = client::tcp(proxy.inbound); + let no_tls_client = crate::tcp::client(proxy.inbound); let metric = metric(&proxy) .label("error", "tls detection timeout") @@ -198,7 +198,7 @@ async fn inbound_multi() { let _trace = trace_init(); let (proxy, metrics) = Test::default().run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let metric = metric(&proxy); let timeout_metric = metric.clone().label("error", "tls detection timeout"); @@ -244,7 +244,7 @@ async fn inbound_direct_multi() { let proxy = proxy::new().inbound(srv).inbound_direct(); let (proxy, metrics) = Test::new(proxy).run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let metric = metrics::metric(METRIC).label("target_addr", proxy.inbound); let timeout_metric = metric.clone().label("error", "tls detection timeout"); @@ -291,7 +291,7 @@ async fn inbound_invalid_ip() { .run() .await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let metric = metric(&proxy) .label("error", "unexpected") .label("target_addr", fake_ip); @@ -354,7 +354,7 @@ async fn inbound_direct_success() { .await; let tls_client = client::http1(proxy2.outbound, auth); - let no_tls_client = client::tcp(proxy1.inbound); + let no_tls_client = crate::tcp::client(proxy1.inbound); let metric = metrics::metric(METRIC) .label("target_addr", proxy1.inbound) diff --git a/linkerd/app/integration/src/tests/transparency.rs b/linkerd/app/integration/src/tests/transparency.rs index d72f8446c6..45845b29cc 100644 --- a/linkerd/app/integration/src/tests/transparency.rs +++ b/linkerd/app/integration/src/tests/transparency.rs @@ -1,4 +1,5 @@ use crate::*; +use linkerd_app_core::svc::http::{BoxBody, TokioExecutor}; use std::error::Error as _; use tokio::time::timeout; @@ -52,7 +53,7 @@ async fn outbound_tcp() { let msg1 = "custom tcp hello\n"; let msg2 = "custom tcp bye"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept(move |read| { assert_eq!(read, msg1.as_bytes()); msg2 @@ -69,7 +70,7 @@ async fn outbound_tcp() { .run() .await; - let client = client::tcp(proxy.outbound); + let client = crate::tcp::client(proxy.outbound); let tcp_client = client.connect().await; @@ -90,7 +91,7 @@ async fn outbound_tcp_external() { let msg1 = "custom tcp hello\n"; let msg2 = "custom tcp bye"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept(move |read| { assert_eq!(read, msg1.as_bytes()); msg2 @@ -108,7 +109,7 @@ async fn outbound_tcp_external() { .run() .await; - let client = client::tcp(proxy.outbound); + let client = crate::tcp::client(proxy.outbound); let tcp_client = client.connect().await; @@ -129,7 +130,7 @@ async fn inbound_tcp() { let msg1 = "custom tcp hello\n"; let msg2 = "custom tcp bye"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept(move |read| { assert_eq!(read, msg1.as_bytes()); msg2 @@ -138,7 +139,7 @@ async fn inbound_tcp() { .await; let proxy = proxy::new().inbound(srv).run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; @@ -200,7 +201,7 @@ async fn test_inbound_server_speaks_first(env: TestEnv) { let _trace = trace_init(); let (tx, rx) = mpsc::channel(1); - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |sock| serve_server_first(sock, tx)) .run() .await; @@ -230,7 +231,7 @@ async fn inbound_tcp_server_first_no_discovery() { let _trace = trace_init(); let (tx, rx) = mpsc::channel(1); - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |sock| serve_server_first(sock, tx)) .run() .await; @@ -302,7 +303,7 @@ async fn outbound_opaque_tcp_server_first() { let _trace = trace_init(); let (tx, rx) = mpsc::channel(1); - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |sock| serve_server_first(sock, tx)) .run() .await; @@ -359,7 +360,7 @@ async fn serve_server_first(mut sock: tokio::net::TcpStream, tx: mpsc::Sender<() async fn server_first_client(addr: SocketAddr, mut rx: mpsc::Receiver<()>) { const TIMEOUT: Duration = Duration::from_secs(5); - let client = client::tcp(addr); + let client = crate::tcp::client(addr); let tcp_client = client.connect().await; @@ -382,7 +383,7 @@ async fn tcp_connections_close_if_client_closes() { let (tx, mut rx) = mpsc::channel::<()>(1); - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |mut sock| { async move { let _tx = tx; @@ -401,7 +402,7 @@ async fn tcp_connections_close_if_client_closes() { .await; let proxy = proxy::new().inbound(srv).run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; tcp_client.write(msg1).await; @@ -492,7 +493,7 @@ macro_rules! http1_tests { assert_eq!(req.headers().get("host").unwrap(), host); Response::builder() .version(http::Version::HTTP_10) - .body("".into()) + .body(linkerd_app_core::svc::http::BoxBody::empty()) .unwrap() }) .run() @@ -529,7 +530,7 @@ macro_rules! http1_tests { .route_fn("/", move |req| { assert_eq!(req.headers()["host"], host); assert_eq!(req.uri().to_string(), format!("http://{}/", auth)); - Response::new("".into()) + Response::new(linkerd_app_core::svc::http::BoxBody::empty()) }) .run() .await; @@ -581,7 +582,7 @@ macro_rules! http1_tests { let chatproto_req = "[chatproto-c]{send}: hi all\n"; let chatproto_res = "[chatproto-s]{recv}: welcome!\n"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |mut sock| { async move { // Read upgrade_req... @@ -607,7 +608,7 @@ macro_rules! http1_tests { let mk = $proxy; let proxy = mk(srv).await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; @@ -730,7 +731,7 @@ macro_rules! http1_tests { let tunneled_req = b"{send}: hi all\n"; let tunneled_res = b"{recv}: welcome!\n"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |mut sock| { async move { // Read connect_req... @@ -763,7 +764,7 @@ macro_rules! http1_tests { let mk = $proxy; let proxy = mk(srv).await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; @@ -822,7 +823,7 @@ macro_rules! http1_tests { let tunneled_req = b"{send}: hi all\n"; let tunneled_res = b"{recv}: welcome!\n"; - let srv = server::tcp() + let srv = crate::tcp::server() .accept_fut(move |mut sock| { async move { // Read connect_req... @@ -855,7 +856,7 @@ macro_rules! http1_tests { let mk = $proxy; let proxy = mk(srv).await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; @@ -886,7 +887,7 @@ macro_rules! http1_tests { async fn http11_connect_bad_requests() { let _trace = trace_init(); - let srv = server::tcp() + let srv = crate::tcp::server() .accept(move |_sock| -> Vec { unreachable!("shouldn't get through the proxy"); }) @@ -897,7 +898,7 @@ macro_rules! http1_tests { // A TCP client is used since the HTTP client would stop these requests // from ever touching the network. - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let bad_uris = vec!["/origin-form", "/", "http://test/bar", "http://test", "*"]; @@ -969,7 +970,7 @@ macro_rules! http1_tests { let req = client .request_builder("/") .method("POST") - .body("hello".into()) + .body(linkerd_app_core::svc::http::BoxBody::from_static("hello")) .unwrap(); let resp = client.request_body(req).await; @@ -992,7 +993,7 @@ macro_rules! http1_tests { Ok::<_, std::io::Error>( Response::builder() .header("transfer-encoding", "chunked") - .body("world".into()) + .body(linkerd_app_core::svc::http::BoxBody::from_static("world")) .unwrap(), ) }) @@ -1006,7 +1007,7 @@ macro_rules! http1_tests { .request_builder("/") .method("POST") .header("transfer-encoding", "chunked") - .body("hello".into()) + .body(linkerd_app_core::svc::http::BoxBody::from_static("hello")) .unwrap(); let resp = client.request_body(req).await; @@ -1031,7 +1032,7 @@ macro_rules! http1_tests { } else { StatusCode::OK }; - let mut res = Response::new("".into()); + let mut res = Response::new(linkerd_app_core::svc::http::BoxBody::empty()); *res.status_mut() = status; res }) @@ -1070,7 +1071,7 @@ macro_rules! http1_tests { Response::builder() .status(status) .header("content-length", "0") - .body("".into()) + .body(linkerd_app_core::svc::http::BoxBody::empty()) .unwrap() }) .run() @@ -1116,7 +1117,10 @@ macro_rules! http1_tests { }) .unwrap_or(200); - Response::builder().status(status).body("".into()).unwrap() + Response::builder() + .status(status) + .body(linkerd_app_core::svc::http::BoxBody::empty()) + .unwrap() }) .run() .await; @@ -1179,7 +1183,7 @@ macro_rules! http1_tests { assert_eq!(req.method(), "HEAD"); Response::builder() .header("content-length", "55") - .body("".into()) + .body(linkerd_app_core::svc::http::BoxBody::empty()) .unwrap() }) .run() @@ -1209,7 +1213,7 @@ macro_rules! http1_tests { let _trace = trace_init(); // test both http/1.0 and 1.1 - let srv = server::tcp() + let srv = crate::tcp::server() .accept(move |_read| { "\ HTTP/1.0 200 OK\r\n\ @@ -1389,14 +1393,14 @@ async fn http10_without_host() { assert_eq!(req.uri().to_string(), "/"); Response::builder() .version(http::Version::HTTP_10) - .body("".into()) + .body(BoxBody::empty()) .unwrap() }) .run() .await; let proxy = proxy::new().inbound(srv).run().await; - let client = client::tcp(proxy.inbound); + let client = crate::tcp::client(proxy.inbound); let tcp_client = client.connect().await; @@ -1586,7 +1590,7 @@ async fn http2_request_without_authority() { let srv = server::http2() .route_fn("/", |req| { assert_eq!(req.uri().authority(), None); - Response::new("".into()) + Response::new(BoxBody::empty()) }) .run() .await; @@ -1601,15 +1605,15 @@ async fn http2_request_without_authority() { let io = tokio::net::TcpStream::connect(&addr) .await .expect("connect error"); - let (mut client, conn) = hyper::client::conn::Builder::new() - .http2_only(true) - .handshake(io) + let (mut client, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor::new()) + .timer(hyper_util::rt::TokioTimer::new()) + .handshake(hyper_util::rt::TokioIo::new(io)) .await .expect("handshake error"); tokio::spawn(conn.map_err(|e| tracing::info!("conn error: {:?}", e))); - let req = Request::new(hyper::Body::empty()); + let req = Request::new(BoxBody::empty()); // these properties are specifically what we want, and set by default assert_eq!(req.uri(), "/"); assert_eq!(req.version(), http::Version::HTTP_11); @@ -1633,12 +1637,14 @@ async fn http2_rst_stream_is_propagated() { let proxy = proxy::new().inbound(srv).run().await; let client = client::http2(proxy.inbound, "transparency.example.com"); - let err: hyper::Error = client + let err: hyper_util::client::legacy::Error = client .request(client.request_builder("/")) .await .expect_err("client request should error"); let rst = err + .source() + .expect("error should have a source") .source() .expect("error should have a source") .downcast_ref::() diff --git a/linkerd/app/outbound/Cargo.toml b/linkerd/app/outbound/Cargo.toml index 0d4a68c889..071db77e51 100644 --- a/linkerd/app/outbound/Cargo.toml +++ b/linkerd/app/outbound/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app-outbound" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Configures and runs the outbound proxy """ @@ -13,50 +13,67 @@ Configures and runs the outbound proxy default = [] allow-loopback = [] test-subscriber = [] -test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util"] +test-util = ["linkerd-app-test", "linkerd-meshtls-rustls/test-util", "dep:http-body"] + +prometheus-client-rust-242 = [] # TODO [dependencies] ahash = "0.8" -bytes = "1" -http = "0.2" +bytes = { workspace = true } +http = { workspace = true } +http-body = { workspace = true, optional = true } futures = { version = "0.3", default-features = false } -linkerd2-proxy-api = { version = "0.13", features = ["outbound"] } +linkerd2-proxy-api = { workspace = true, features = ["outbound"] } once_cell = "1" parking_lot = "0.12" -prometheus-client = "0.22" -thiserror = "1" +pin-project = "1" +prometheus-client = { workspace = true } +thiserror = "2" tokio = { version = "1", features = ["sync"] } -tonic = { version = "0.10", default-features = false } -tower = { version = "0.4", features = ["util"] } +tonic = { workspace = true, default-features = false } +tower = { workspace = true, features = ["util"] } tracing = "0.1" -pin-project = "1" linkerd-app-core = { path = "../core" } linkerd-app-test = { path = "../test", optional = true } linkerd-distribute = { path = "../../distribute" } linkerd-http-classify = { path = "../../http/classify" } +linkerd-http-prom = { path = "../../http/prom" } linkerd-http-retry = { path = "../../http/retry" } linkerd-http-route = { path = "../../http/route" } linkerd-identity = { path = "../../identity" } linkerd-meshtls-rustls = { path = "../../meshtls/rustls", optional = true } +linkerd-opaq-route = { path = "../../opaq-route" } linkerd-proxy-client-policy = { path = "../../proxy/client-policy", features = [ "proto", ] } linkerd-retry = { path = "../../retry" } +linkerd-tls-route = { path = "../../tls/route" } linkerd-tonic-stream = { path = "../../tonic-stream" } linkerd-tonic-watch = { path = "../../tonic-watch" } [dev-dependencies] -hyper = { version = "0.14", features = ["http1", "http2"] } +futures-util = "0.3" +http-body = { workspace = true } +http-body-util = { workspace = true, features = ["channel"] } +hyper = { workspace = true, features = ["http1", "http2"] } +hyper-util = { workspace = true } +tokio = { version = "1", features = ["macros", "sync", "time"] } +tokio-rustls = { workspace = true } +tokio-test = "0.4" +tower-test = { workspace = true } + linkerd-app-test = { path = "../test", features = ["client-policy"] } +linkerd-http-box = { path = "../../http/box" } +linkerd-http-prom = { path = "../../http/prom", features = ["test-util"] } linkerd-io = { path = "../../io", features = ["tokio-test"] } linkerd-meshtls = { path = "../../meshtls", features = ["rustls"] } linkerd-meshtls-rustls = { path = "../../meshtls/rustls", features = [ "test-util", ] } +linkerd-mock-http-body = { path = "../../mock/http-body" } linkerd-stack = { path = "../../stack", features = ["test-util"] } linkerd-tracing = { path = "../../tracing", features = ["ansi"] } -parking_lot = "0.12" -tokio = { version = "1", features = ["macros", "sync", "time"] } -tokio-test = "0.4" -tower-test = "0.4" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/linkerd/app/outbound/src/discover.rs b/linkerd/app/outbound/src/discover.rs index 51a3787580..196a6e87c5 100644 --- a/linkerd/app/outbound/src/discover.rs +++ b/linkerd/app/outbound/src/discover.rs @@ -234,18 +234,18 @@ fn policy_for_backend( static NO_OPAQ_FILTERS: Lazy> = Lazy::new(|| Arc::new([])); let opaque = policy::opaq::Opaque { - policy: Some(policy::opaq::Policy { - meta: meta.clone(), - filters: NO_OPAQ_FILTERS.clone(), - failure_policy: Default::default(), - request_timeout: None, - distribution: policy::RouteDistribution::FirstAvailable(Arc::new([ - policy::RouteBackend { - filters: NO_OPAQ_FILTERS.clone(), - backend: backend.clone(), - request_timeout: None, - }, - ])), + routes: Some(policy::opaq::Route { + policy: policy::opaq::Policy { + meta: meta.clone(), + filters: NO_OPAQ_FILTERS.clone(), + params: Default::default(), + distribution: policy::RouteDistribution::FirstAvailable(Arc::new([ + policy::RouteBackend { + filters: NO_OPAQ_FILTERS.clone(), + backend: backend.clone(), + }, + ])), + }, }), }; @@ -256,13 +256,11 @@ fn policy_for_backend( policy: policy::http::Policy { meta: meta.clone(), filters: NO_HTTP_FILTERS.clone(), - failure_policy: Default::default(), - request_timeout: None, + params: Default::default(), distribution: policy::RouteDistribution::FirstAvailable(Arc::new([ policy::RouteBackend { filters: NO_HTTP_FILTERS.clone(), backend: backend.clone(), - request_timeout: None, }, ])), }, @@ -331,6 +329,15 @@ impl svc::Param> for Discovery { } } +impl svc::Param for Discovery +where + T: svc::Param, +{ + fn param(&self) -> OrigDstAddr { + self.parent.param() + } +} + impl svc::Param>> for Discovery { fn param(&self) -> Option> { self.profile.clone().map(Into::into) diff --git a/linkerd/app/outbound/src/http.rs b/linkerd/app/outbound/src/http.rs index a76cfbb674..c39bdfc6f2 100644 --- a/linkerd/app/outbound/src/http.rs +++ b/linkerd/app/outbound/src/http.rs @@ -32,8 +32,8 @@ pub struct Http(T); #[derive(Clone, Debug, Default)] pub struct HttpMetrics { balancer: concrete::BalancerMetrics, - http_route: policy::RouteMetrics, - grpc_route: policy::RouteMetrics, + http_route: policy::HttpRouteMetrics, + grpc_route: policy::GrpcRouteMetrics, } pub fn spawn_routes( @@ -89,7 +89,7 @@ impl Outbound>>> pub fn push_http_cached(self, resolve: R) -> Outbound> where // Logical HTTP target. - T: svc::Param, + T: svc::Param, T: svc::Param>, T: Clone + Debug + PartialEq + Eq + Hash + Send + Sync + 'static, // Endpoint resolution. @@ -109,11 +109,11 @@ impl Outbound>>> // === impl Http === -impl svc::Param for Http +impl svc::Param for Http where - T: svc::Param, + T: svc::Param, { - fn param(&self) -> http::Version { + fn param(&self) -> http::Variant { self.0.param() } } @@ -132,12 +132,12 @@ where impl HttpMetrics { pub fn register(registry: &mut prom::Registry) -> Self { let http = registry.sub_registry_with_prefix("http"); - let http_route = policy::RouteMetrics::register(http.sub_registry_with_prefix("route")); + let http_route = policy::HttpRouteMetrics::register(http.sub_registry_with_prefix("route")); let balancer = concrete::BalancerMetrics::register(http.sub_registry_with_prefix("balancer")); let grpc = registry.sub_registry_with_prefix("grpc"); - let grpc_route = policy::RouteMetrics::register(grpc.sub_registry_with_prefix("route")); + let grpc_route = policy::GrpcRouteMetrics::register(grpc.sub_registry_with_prefix("route")); Self { balancer, diff --git a/linkerd/app/outbound/src/http/concrete.rs b/linkerd/app/outbound/src/http/concrete.rs index 1dcc9468fb..557e2fccc4 100644 --- a/linkerd/app/outbound/src/http/concrete.rs +++ b/linkerd/app/outbound/src/http/concrete.rs @@ -2,10 +2,14 @@ //! and distributes HTTP requests among them. use super::{balance::EwmaConfig, client, handle_proxy_error_headers}; -use crate::{http, stack_labels, BackendRef, Outbound, ParentRef}; +use crate::{ + http, stack_labels, + zone::{tcp_zone_labels, TcpZoneLabels}, + BackendRef, Outbound, ParentRef, +}; use linkerd_app_core::{ config::{ConnectConfig, QueueConfig}, - metrics::{prefix_labels, EndpointLabels, OutboundEndpointLabels}, + metrics::{prefix_labels, EndpointLabels, OutboundEndpointLabels, OutboundZoneLocality}, profiles, proxy::{ api_resolve::{ConcreteAddr, Metadata, ProtocolHint}, @@ -29,10 +33,13 @@ pub use self::balance::BalancerMetrics; pub enum Dispatch { Balance(NameAddr, EwmaConfig), Forward(Remote, Metadata), - Fail { message: Arc }, + /// A backend dispatcher that explicitly fails all requests. + Fail { + message: Arc, + }, } -/// A backend dispatcher explicitly fails all requests. +/// A backend dispatcher that explicitly fails all requests. #[derive(Debug, thiserror::Error)] #[error("{0}")] pub struct DispatcherFailed(Arc); @@ -113,28 +120,30 @@ impl Outbound { move |parent: T| -> Result<_, Infallible> { Ok(match parent.param() { Dispatch::Balance(addr, ewma) => { - svc::Either::A(svc::Either::A(balance::Balance { + svc::Either::Left(svc::Either::Left(balance::Balance { addr, ewma, parent, queue, })) } - Dispatch::Forward(addr, metadata) => svc::Either::A(svc::Either::B({ - let is_local = inbound_ips.contains(&addr.ip()); - let http2 = http2.override_from(metadata.http2_client_params()); - Endpoint { - is_local, - addr, - metadata, - parent, - queue, - close_server_connection_on_remote_proxy_error: true, - http1, - http2, - } - })), - Dispatch::Fail { message } => svc::Either::B(message), + Dispatch::Forward(addr, metadata) => { + svc::Either::Left(svc::Either::Right({ + let is_local = inbound_ips.contains(&addr.ip()); + let http2 = http2.override_from(metadata.http2_client_params()); + Endpoint { + is_local, + addr, + metadata, + parent, + queue, + close_server_connection_on_remote_proxy_error: true, + http1, + http2, + } + })) + } + Dispatch::Fail { message } => svc::Either::Right(message), }) }, svc::stack(fail).check_new_clone().into_inner(), @@ -212,12 +221,25 @@ where OutboundEndpointLabels { authority: self.parent.param(), labels: prefix_labels("dst", self.metadata.labels().iter()), + zone_locality: self.param(), server_id: self.param(), target_addr: self.addr.into(), } } } +impl svc::Param for Endpoint { + fn param(&self) -> OutboundZoneLocality { + OutboundZoneLocality::new(&self.metadata) + } +} + +impl svc::Param for Endpoint { + fn param(&self) -> TcpZoneLabels { + tcp_zone_labels(self.param()) + } +} + impl svc::Param for Endpoint where T: svc::Param>, @@ -257,23 +279,23 @@ impl svc::Param for Endpoint { } } -impl svc::Param for Endpoint +impl svc::Param for Endpoint where - T: svc::Param, + T: svc::Param, { - fn param(&self) -> http::Version { + fn param(&self) -> http::Variant { self.parent.param() } } impl svc::Param for Endpoint where - T: svc::Param, + T: svc::Param, { fn param(&self) -> client::Params { match self.param() { - http::Version::H2 => client::Params::H2(self.http2.clone()), - http::Version::Http1 => { + http::Variant::H2 => client::Params::H2(self.http2.clone()), + http::Variant::Http1 => { // When the target is local (i.e. same as source of traffic) // then do not perform a protocol upgrade to HTTP/2 if self.is_local { diff --git a/linkerd/app/outbound/src/http/endpoint.rs b/linkerd/app/outbound/src/http/endpoint.rs index 60fcd62f97..8556f96b7f 100644 --- a/linkerd/app/outbound/src/http/endpoint.rs +++ b/linkerd/app/outbound/src/http/endpoint.rs @@ -4,7 +4,7 @@ use super::{ handle_proxy_error_headers::{self, NewHandleProxyErrorHeaders}, NewRequireIdentity, }; -use crate::{tcp::tagged_transport, Outbound}; +use crate::{tcp::tagged_transport, zone::TcpZoneLabels, Outbound}; use linkerd_app_core::{ classify, config, errors, http_tracing, metrics, proxy::{api_resolve::ProtocolHint, http, tap}, @@ -20,7 +20,7 @@ mod tests; #[derive(Clone, Debug)] pub struct Connect { - version: http::Version, + version: http::Variant, inner: T, } @@ -44,7 +44,7 @@ impl Outbound { T: svc::Param, T: Clone + Send + Sync + 'static, // Http endpoint body. - B: http::HttpBody + std::fmt::Debug + Default + Send + 'static, + B: http::Body + std::fmt::Debug + Default + Unpin + Send + 'static, B::Data: Send + 'static, // TCP endpoint stack. C: svc::MakeConnection> + Clone + Send + Sync + Unpin + 'static, @@ -81,7 +81,7 @@ impl Outbound> { T: tap::Inspect, T: Clone + Send + Sync + 'static, // Http endpoint body. - B: http::HttpBody + std::fmt::Debug + Default + Send + 'static, + B: http::Body + std::fmt::Debug + Default + Send + 'static, B::Data: Send + 'static, { self.map_stack(|config, rt, inner| { @@ -103,12 +103,13 @@ impl Outbound> { // is only done when the `Closable` parameter is set to true. // This module always strips error headers from responses. .push(NewHandleProxyErrorHeaders::layer()) + .push_on_service(http::BoxRequest::layer()) + .push_on_service(http::EnforceTimeouts::layer()) // Handle connection-level errors eagerly so that we can report 5XX failures in tap // and metrics. HTTP error metrics are not incremented here so that errors are not // double-counted--i.e., endpoint metrics track these responses and error metrics // track proxy errors that occur higher in the stack. .push(ClientRescue::layer(config.emit_headers)) - .push_on_service(http::BoxRequest::layer()) .push(tap::NewTapHttp::layer(rt.tap.clone())) .push( rt.metrics @@ -190,8 +191,8 @@ where } match self.version { - http::Version::Http1 => Some(SessionProtocol::Http1), - http::Version::H2 => Some(SessionProtocol::Http2), + http::Variant::Http1 => Some(SessionProtocol::Http1), + http::Variant::H2 => Some(SessionProtocol::Http2), } } } @@ -237,6 +238,13 @@ impl> svc::Param f } } +impl> svc::Param for Connect { + #[inline] + fn param(&self) -> TcpZoneLabels { + self.inner.param() + } +} + // === impl EndpointError === impl From<(&T, Error)> for EndpointError diff --git a/linkerd/app/outbound/src/http/endpoint/tests.rs b/linkerd/app/outbound/src/http/endpoint/tests.rs index e11bfac400..a65db50021 100644 --- a/linkerd/app/outbound/src/http/endpoint/tests.rs +++ b/linkerd/app/outbound/src/http/endpoint/tests.rs @@ -1,12 +1,14 @@ use super::*; use crate::{http, tcp, test_util::*}; use ::http::header::{CONNECTION, UPGRADE}; +use linkerd_app_core::metrics::OutboundZoneLocality; use linkerd_app_core::{ io, proxy::api_resolve::ProtocolHint, - svc::{NewService, ServiceExt}, + svc::{http::TokioExecutor, NewService, ServiceExt}, Infallible, }; +use linkerd_http_box::BoxBody; use std::net::SocketAddr; static WAS_ORIG_PROTO: &str = "request-orig-proto"; @@ -33,7 +35,7 @@ async fn http11_forward() { let svc = stack.new_service(Endpoint { addr: Remote(ServerAddr(addr)), - version: http::Version::Http1, + version: http::Variant::Http1, hint: ProtocolHint::Unknown, }); @@ -69,7 +71,7 @@ async fn http2_forward() { let svc = stack.new_service(Endpoint { addr: Remote(ServerAddr(addr)), - version: http::Version::H2, + version: http::Variant::H2, hint: ProtocolHint::Unknown, }); @@ -107,7 +109,7 @@ async fn orig_proto_upgrade() { let svc = stack.new_service(Endpoint { addr: Remote(ServerAddr(addr)), - version: http::Version::Http1, + version: http::Variant::Http1, hint: ProtocolHint::Http2, }); @@ -163,7 +165,7 @@ async fn orig_proto_skipped_on_http_upgrade() { let svc = stack.new_service(Endpoint { addr: Remote(ServerAddr(addr)), - version: http::Version::Http1, + version: http::Variant::Http1, hint: ProtocolHint::Http2, }); @@ -174,7 +176,7 @@ async fn orig_proto_skipped_on_http_upgrade() { // The request has upgrade headers .header(CONNECTION, "upgrade") .header(UPGRADE, "linkerdrocks") - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(); let rsp = svc.oneshot(req).await.unwrap(); assert_eq!(rsp.status(), http::StatusCode::NO_CONTENT); @@ -205,7 +207,7 @@ async fn orig_proto_http2_noop() { let svc = stack.new_service(Endpoint { addr: Remote(ServerAddr(addr)), - version: http::Version::H2, + version: http::Variant::H2, hint: ProtocolHint::Http2, }); @@ -235,18 +237,28 @@ fn serve(version: ::http::Version) -> io::Result { .fold(rsp, |rsp, orig_proto| { rsp.header(WAS_ORIG_PROTO, orig_proto) }); - future::ok::<_, Infallible>(rsp.body(hyper::Body::default()).unwrap()) + future::ok::<_, Infallible>(rsp.body(BoxBody::default()).unwrap()) }); - let mut http = hyper::server::conn::Http::new(); + let (client_io, server_io) = io::duplex(4096); match version { - ::http::Version::HTTP_10 | ::http::Version::HTTP_11 => http.http1_only(true), - ::http::Version::HTTP_2 => http.http2_only(true), + ::http::Version::HTTP_10 | ::http::Version::HTTP_11 => { + let mut http = hyper::server::conn::http1::Builder::new(); + let fut = http + .timer(hyper_util::rt::TokioTimer::new()) + .serve_connection(hyper_util::rt::TokioIo::new(server_io), svc); + tokio::spawn(fut); + } + ::http::Version::HTTP_2 => { + let mut http = hyper::server::conn::http2::Builder::new(TokioExecutor::new()); + let fut = http + .timer(hyper_util::rt::TokioTimer::new()) + .serve_connection(hyper_util::rt::TokioIo::new(server_io), svc); + tokio::spawn(fut); + } _ => unreachable!("unsupported HTTP version {:?}", version), }; - let (client_io, server_io) = io::duplex(4096); - tokio::spawn(http.serve_connection(server_io, svc)); Ok(io::BoxedIo::new(client_io)) } @@ -254,7 +266,7 @@ fn serve(version: ::http::Version) -> io::Result { struct Endpoint { addr: Remote, hint: ProtocolHint, - version: http::Version, + version: http::Variant, } // === impl Endpoint === @@ -300,6 +312,7 @@ impl svc::Param for Endpoint { metrics::OutboundEndpointLabels { authority: None, labels: None, + zone_locality: OutboundZoneLocality::Unknown, server_id: self.param(), target_addr: self.addr.into(), } @@ -312,8 +325,8 @@ impl svc::Param for Endpoint { } } -impl svc::Param for Endpoint { - fn param(&self) -> http::Version { +impl svc::Param for Endpoint { + fn param(&self) -> http::Variant { self.version } } @@ -321,8 +334,8 @@ impl svc::Param for Endpoint { impl svc::Param for Endpoint { fn param(&self) -> http::client::Params { match self.version { - http::Version::H2 => http::client::Params::H2(Default::default()), - http::Version::Http1 => match self.hint { + http::Variant::H2 => http::client::Params::H2(Default::default()), + http::Variant::Http1 => match self.hint { ProtocolHint::Unknown | ProtocolHint::Opaque => { http::client::Params::Http1(http::h1::PoolSettings { max_idle: 1, diff --git a/linkerd/app/outbound/src/http/handle_proxy_error_headers.rs b/linkerd/app/outbound/src/http/handle_proxy_error_headers.rs index b6e6a429b6..69fdaf3112 100644 --- a/linkerd/app/outbound/src/http/handle_proxy_error_headers.rs +++ b/linkerd/app/outbound/src/http/handle_proxy_error_headers.rs @@ -1,6 +1,6 @@ use futures::prelude::*; use linkerd_app_core::{ - errors::respond::{L5D_PROXY_CONNECTION, L5D_PROXY_ERROR}, + errors::header::{L5D_PROXY_CONNECTION, L5D_PROXY_ERROR}, proxy::http::ClientHandle, svc, tls, }; @@ -158,6 +158,7 @@ fn update_response(rsp: &mut http::Response, closable: bool) -> bool { mod test { use super::*; use linkerd_app_core::{svc::ServiceExt, Infallible}; + use linkerd_http_box::BoxBody; use linkerd_tracing::test; use tokio::time; @@ -175,19 +176,19 @@ mod test { // with the l5d-proxy-error header. let mut req = http::Request::builder() .uri("http://foo.example.com") - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(); let (handle, closed) = ClientHandle::new(([192, 0, 2, 3], 50000).into()); req.extensions_mut().insert(handle); let svc = HandleProxyErrorHeaders::for_test( true, - svc::mk(|_: http::Request| { + svc::mk(|_: http::Request| { future::ok::<_, Infallible>( http::Response::builder() .status(http::StatusCode::BAD_GATEWAY) .header(L5D_PROXY_CONNECTION, "close") - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) }), @@ -212,19 +213,19 @@ mod test { // with the l5d-proxy-error header. let mut req = http::Request::builder() .uri("http://foo.example.com") - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(); let (handle, closed) = ClientHandle::new(([192, 0, 2, 3], 50000).into()); req.extensions_mut().insert(handle); let svc = HandleProxyErrorHeaders::for_test( false, - svc::mk(|_: http::Request| { + svc::mk(|_: http::Request| { future::ok::<_, Infallible>( http::Response::builder() .status(http::StatusCode::BAD_GATEWAY) .header(L5D_PROXY_CONNECTION, "close") - .body(hyper::Body::default()) + .body(BoxBody::default()) .unwrap(), ) }), diff --git a/linkerd/app/outbound/src/http/logical.rs b/linkerd/app/outbound/src/http/logical.rs index bb7d752e51..713ee46bb3 100644 --- a/linkerd/app/outbound/src/http/logical.rs +++ b/linkerd/app/outbound/src/http/logical.rs @@ -148,7 +148,7 @@ where ); let parent_ref = ParentRef::from(ep.clone()); let backend_ref = BackendRef::from(ep); - svc::Either::A(Concrete { + svc::Either::Left(Concrete { parent_ref, backend_ref, target: concrete::Dispatch::Forward(remote, meta), @@ -157,8 +157,10 @@ where failure_accrual: Default::default(), }) } - Self::Profile(profile) => svc::Either::B(svc::Either::A(profile)), - Self::Policy(policy) => svc::Either::B(svc::Either::B(policy)), + Self::Profile(profile) => { + svc::Either::Right(svc::Either::Left(profile)) + } + Self::Policy(policy) => svc::Either::Right(svc::Either::Right(policy)), }) }, // Switch profile and policy routing. @@ -216,11 +218,11 @@ where // === impl Concrete === -impl svc::Param for Concrete +impl svc::Param for Concrete where - T: svc::Param, + T: svc::Param, { - fn param(&self) -> http::Version { + fn param(&self) -> http::Variant { self.parent.param() } } diff --git a/linkerd/app/outbound/src/http/logical/policy.rs b/linkerd/app/outbound/src/http/logical/policy.rs index 254f3dfa49..029ab42f34 100644 --- a/linkerd/app/outbound/src/http/logical/policy.rs +++ b/linkerd/app/outbound/src/http/logical/policy.rs @@ -8,7 +8,7 @@ mod router; mod tests; pub use self::{ - route::{errors, RouteMetrics}, + route::{errors, GrpcRouteMetrics, HttpRouteMetrics}, router::{GrpcParams, HttpParams}, }; pub use linkerd_proxy_client_policy::{ClientPolicy, FailureAccrual}; @@ -50,8 +50,8 @@ where /// routing configurations to route requests over cached inner backend /// services. pub(super) fn layer( - http_metrics: route::RouteMetrics, - grpc_metrics: route::RouteMetrics, + http_metrics: route::HttpRouteMetrics, + grpc_metrics: route::GrpcRouteMetrics, ) -> impl svc::Layer> + Clone where // Inner stack. @@ -72,8 +72,8 @@ where http.push_switch( |pp: Policy| { Ok::<_, Infallible>(match pp { - Self::Http(http) => svc::Either::A(http), - Self::Grpc(grpc) => svc::Either::B(grpc), + Self::Http(http) => svc::Either::Left(http), + Self::Grpc(grpc) => svc::Either::Right(grpc), }) }, grpc.into_inner(), diff --git a/linkerd/app/outbound/src/http/logical/policy/route.rs b/linkerd/app/outbound/src/http/logical/policy/route.rs index 7ba5290989..ca50549e0e 100644 --- a/linkerd/app/outbound/src/http/logical/policy/route.rs +++ b/linkerd/app/outbound/src/http/logical/policy/route.rs @@ -1,21 +1,21 @@ use super::super::Concrete; -use crate::RouteRef; -use linkerd_app_core::{classify, metrics::prom, proxy::http, svc, Addr, Error, Result}; +use crate::{ParentRef, RouteRef}; +use linkerd_app_core::{classify, proxy::http, svc, Addr, Error, Result}; use linkerd_distribute as distribute; use linkerd_http_route as http_route; use linkerd_proxy_client_policy as policy; use std::{fmt::Debug, hash::Hash, sync::Arc}; pub(crate) mod backend; +pub(crate) mod extensions; pub(crate) mod filters; +pub(crate) mod metrics; +pub(crate) mod retry; pub(crate) use self::backend::{Backend, MatchedBackend}; pub use self::filters::errors; -#[derive(Clone, Debug, Default)] -pub struct RouteMetrics { - backend: backend::RouteBackendMetrics, -} +pub use self::metrics::{GrpcRouteMetrics, HttpRouteMetrics}; /// A target type that includes a summary of exactly how a request was matched. /// This match state is required to apply route filters. @@ -28,33 +28,38 @@ pub(crate) struct Matched { } #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub(crate) struct Route { +pub(crate) struct Route { pub(super) parent: T, pub(super) addr: Addr, + pub(super) parent_ref: ParentRef, pub(super) route_ref: RouteRef, pub(super) filters: Arc<[F]>, pub(super) distribution: BackendDistribution, - pub(super) failure_policy: E, - pub(super) request_timeout: Option, + pub(super) params: P, } -pub(crate) type MatchedRoute = Matched>; +pub(crate) type MatchedRoute = Matched>; pub(crate) type Http = MatchedRoute< T, http_route::http::r#match::RequestMatch, policy::http::Filter, - policy::http::StatusRanges, + policy::http::RouteParams, >; pub(crate) type Grpc = MatchedRoute< T, http_route::grpc::r#match::RouteMatch, policy::grpc::Filter, - policy::grpc::Codes, + policy::grpc::RouteParams, >; pub(crate) type BackendDistribution = distribute::Distribution>; pub(crate) type NewDistribute = distribute::NewDistribute, (), N>; +pub type Metrics = metrics::RouteMetrics< + ::StreamLabel, + ::StreamLabel, +>; + /// Wraps errors with route metadata. #[derive(Debug, thiserror::Error)] #[error("route {}: {source}", route.0)] @@ -64,31 +69,9 @@ struct RouteError { source: Error, } -// === impl RouteMetrics === - -impl RouteMetrics { - pub fn register(reg: &mut prom::Registry) -> Self { - Self { - backend: backend::RouteBackendMetrics::register( - reg.sub_registry_with_prefix("backend"), - ), - } - } - - #[cfg(test)] - pub(crate) fn request_count( - &self, - p: crate::ParentRef, - r: RouteRef, - b: crate::BackendRef, - ) -> backend::RequestCount { - self.backend.request_count(p, r, b) - } -} - // === impl MatchedRoute === -impl MatchedRoute +impl MatchedRoute where // Parent target. T: Debug + Eq + Hash, @@ -98,18 +81,22 @@ where // Request filter. F: Debug + Eq + Hash, F: Clone + Send + Sync + 'static, - // Failure policy. - E: Clone + Send + Sync + 'static, + // Route params. + P: Clone + Send + Sync + 'static, // Assert that filters can be applied. Self: filters::Apply, Self: svc::Param, + Self: svc::Param, + Self: metrics::MkStreamLabel, + Self: svc::ExtractParam>, MatchedBackend: filters::Apply, + MatchedBackend: metrics::MkStreamLabel, { /// Builds a route stack that applies policy filters to requests and /// distributes requests over each route's backends. These [`Concrete`] /// backends are expected to be cached/shared by the inner stack. pub(crate) fn layer( - metrics: RouteMetrics, + metrics: Metrics>, ) -> impl svc::Layer> + Clone where // Inner stack. @@ -130,14 +117,24 @@ where .push(MatchedBackend::layer(metrics.backend.clone())) .lift_new_with_target() .push(NewDistribute::layer()) + .check_new::() + .check_new_service::>() // The router does not take the backend's availability into // consideration, so we must eagerly fail requests to prevent // leaking tasks onto the runtime. .push_on_service(svc::LoadShed::layer()) - // TODO(ver) attach the `E` typed failure policy to requests. .push(filters::NewApplyFilters::::layer()) - // Sets an optional request timeout. - .push(http::NewTimeout::layer()) + .push(retry::NewHttpRetry::::layer(metrics.retry.clone())) + .check_new::() + .check_new_service::>() + // Set request extensions based on the route configuration + // AND/OR headers + .push(extensions::NewSetExtensions::layer()) + .push(metrics::layer(&metrics.requests, &metrics.body_data)) + .check_new::() + .check_new_service::>() + // Configure a classifier to use in the endpoint stack. + // TODO(ver) move this into NewSetExtensions? .push(classify::NewClassify::layer()) .push(svc::NewMapErr::layer_with(|rt: &Self| { let route = rt.params.route_ref.clone(); @@ -152,17 +149,13 @@ where } } -impl svc::Param> for MatchedRoute { +impl svc::Param> for MatchedRoute { fn param(&self) -> BackendDistribution { self.params.distribution.clone() } } -impl svc::Param for MatchedRoute { - fn param(&self) -> http::timeout::ResponseTimeout { - http::timeout::ResponseTimeout(self.params.request_timeout) - } -} +// === impl Http === impl filters::Apply for Http { #[inline] @@ -176,14 +169,59 @@ impl filters::Apply for Http { } } +impl svc::ExtractParam> for Http { + fn extract_param(&self, req: &http::Request) -> metrics::labels::Route { + metrics::labels::Route::new( + self.params.parent_ref.clone(), + self.params.route_ref.clone(), + self.params.params.export_hostname_labels.then(|| req.uri()), + ) + } +} + +impl metrics::MkStreamLabel for Http { + type StatusLabels = metrics::labels::HttpRouteRsp; + type DurationLabels = metrics::labels::Route; + type StreamLabel = metrics::LabelHttpRouteRsp; + + fn mk_stream_labeler(&self, req: &::http::Request) -> Option { + let parent = self.params.parent_ref.clone(); + let route = self.params.route_ref.clone(); + let uri = self.params.params.export_hostname_labels.then(|| req.uri()); + Some(metrics::LabelHttpRsp::from(metrics::labels::Route::new( + parent, route, uri, + ))) + } +} + +impl svc::Param for Http { + fn param(&self) -> extensions::Params { + let retry = self.params.params.retry.clone(); + extensions::Params { + timeouts: self.params.params.timeouts.clone(), + retry: retry.map(|r| retry::RetryPolicy { + max_retries: r.max_retries as _, + max_request_bytes: r.max_request_bytes, + timeout: r.timeout, + backoff: r.backoff, + retryable_http_statuses: Some(r.status_ranges), + retryable_grpc_statuses: None, + }), + allow_l5d_request_headers: self.params.params.allow_l5d_request_headers, + } + } +} + impl svc::Param for Http { fn param(&self) -> classify::Request { classify::Request::ClientPolicy(classify::ClientPolicy::Http( - self.params.failure_policy.clone(), + policy::http::StatusRanges::default(), )) } } +// === impl Grpc === + impl filters::Apply for Grpc { #[inline] fn apply_request(&self, req: &mut ::http::Request) -> Result<()> { @@ -196,10 +234,53 @@ impl filters::Apply for Grpc { } } +impl svc::ExtractParam> for Grpc { + fn extract_param(&self, req: &http::Request) -> metrics::labels::Route { + metrics::labels::Route::new( + self.params.parent_ref.clone(), + self.params.route_ref.clone(), + self.params.params.export_hostname_labels.then(|| req.uri()), + ) + } +} + +impl metrics::MkStreamLabel for Grpc { + type StatusLabels = metrics::labels::GrpcRouteRsp; + type DurationLabels = metrics::labels::Route; + type StreamLabel = metrics::LabelGrpcRouteRsp; + + fn mk_stream_labeler(&self, req: &::http::Request) -> Option { + let parent = self.params.parent_ref.clone(); + let route = self.params.route_ref.clone(); + let uri = self.params.params.export_hostname_labels.then(|| req.uri()); + Some(metrics::LabelGrpcRsp::from(metrics::labels::Route::new( + parent, route, uri, + ))) + } +} + +impl svc::Param for Grpc { + fn param(&self) -> extensions::Params { + let retry = self.params.params.retry.clone(); + extensions::Params { + timeouts: self.params.params.timeouts.clone(), + retry: retry.map(|r| retry::RetryPolicy { + max_retries: r.max_retries as _, + max_request_bytes: r.max_request_bytes, + timeout: r.timeout, + backoff: r.backoff, + retryable_http_statuses: None, + retryable_grpc_statuses: Some(r.codes), + }), + allow_l5d_request_headers: self.params.params.allow_l5d_request_headers, + } + } +} + impl svc::Param for Grpc { fn param(&self) -> classify::Request { - classify::Request::ClientPolicy(classify::ClientPolicy::Grpc( - self.params.failure_policy.clone(), - )) + classify::Request::ClientPolicy( + classify::ClientPolicy::Grpc(policy::grpc::Codes::default()), + ) } } diff --git a/linkerd/app/outbound/src/http/logical/policy/route/backend.rs b/linkerd/app/outbound/src/http/logical/policy/route/backend.rs index 1ee9b2f258..9005830546 100644 --- a/linkerd/app/outbound/src/http/logical/policy/route/backend.rs +++ b/linkerd/app/outbound/src/http/logical/policy/route/backend.rs @@ -1,22 +1,18 @@ use super::{super::Concrete, filters}; use crate::{BackendRef, ParentRef, RouteRef}; use linkerd_app_core::{proxy::http, svc, Error, Result}; +use linkerd_http_prom::record_response::MkStreamLabel; use linkerd_http_route as http_route; use linkerd_proxy_client_policy as policy; use std::{fmt::Debug, hash::Hash, sync::Arc}; -mod count_reqs; -mod metrics; - -pub use self::count_reqs::RequestCount; -pub use self::metrics::RouteBackendMetrics; +pub(super) mod metrics; #[derive(Debug, PartialEq, Eq, Hash)] pub(crate) struct Backend { pub(crate) route_ref: RouteRef, pub(crate) concrete: Concrete, pub(crate) filters: Arc<[F]>, - pub(crate) request_timeout: Option, } pub(crate) type MatchedBackend = super::Matched>; @@ -25,6 +21,8 @@ pub(crate) type Http = pub(crate) type Grpc = MatchedBackend; +pub type Metrics = metrics::RouteBackendMetrics<::StreamLabel>; + /// Wraps errors with backend metadata. #[derive(Debug, thiserror::Error)] #[error("backend {}: {source}", backend.0)] @@ -42,17 +40,16 @@ impl Clone for Backend { route_ref: self.route_ref.clone(), filters: self.filters.clone(), concrete: self.concrete.clone(), - request_timeout: self.request_timeout, } } } -// === impl Matched === +// === impl MatchedBackend === -impl From<(Backend, super::MatchedRoute)> +impl From<(Backend, super::MatchedRoute)> for MatchedBackend { - fn from((params, route): (Backend, super::MatchedRoute)) -> Self { + fn from((params, route): (Backend, super::MatchedRoute)) -> Self { MatchedBackend { r#match: route.r#match, params, @@ -71,7 +68,7 @@ where F: Clone + Send + Sync + 'static, // Assert that filters can be applied. Self: filters::Apply, - RouteBackendMetrics: svc::ExtractParam, + Self: metrics::MkStreamLabel, { /// Builds a stack that applies per-route-backend policy filters over an /// inner [`Concrete`] stack. @@ -79,7 +76,7 @@ where /// This [`MatchedBackend`] must implement [`filters::Apply`] to apply these /// filters. pub(crate) fn layer( - metrics: RouteBackendMetrics, + metrics: Metrics, ) -> impl svc::Layer> + Clone where // Inner stack. @@ -102,8 +99,7 @@ where }| concrete, ) .push(filters::NewApplyFilters::::layer()) - .push(http::NewTimeout::layer()) - .push(count_reqs::NewCountRequests::layer_via(metrics.clone())) + .push(metrics::layer(&metrics)) .push(svc::NewMapErr::layer_with(|t: &Self| { let backend = t.params.concrete.backend_ref.clone(); move |source| { @@ -119,12 +115,6 @@ where } } -impl svc::Param for MatchedBackend { - fn param(&self) -> http::ResponseTimeout { - http::ResponseTimeout(self.params.request_timeout) - } -} - impl svc::Param for MatchedBackend { fn param(&self) -> ParentRef { self.params.concrete.parent_ref.clone() @@ -143,6 +133,8 @@ impl svc::Param for MatchedBackend { } } +// === impl Http === + impl filters::Apply for Http { #[inline] fn apply_request(&self, req: &mut ::http::Request) -> Result<()> { @@ -155,6 +147,23 @@ impl filters::Apply for Http { } } +impl metrics::MkStreamLabel for Http { + type StatusLabels = metrics::labels::HttpRouteBackendRsp; + type DurationLabels = metrics::labels::RouteBackend; + type StreamLabel = metrics::LabelHttpRouteBackendRsp; + + fn mk_stream_labeler(&self, _: &::http::Request) -> Option { + let parent = self.params.concrete.parent_ref.clone(); + let route = self.params.route_ref.clone(); + let backend = self.params.concrete.backend_ref.clone(); + Some(metrics::LabelHttpRsp::from( + metrics::labels::RouteBackend::from((parent, route, backend)), + )) + } +} + +// === impl Grpc === + impl filters::Apply for Grpc { #[inline] fn apply_request(&self, req: &mut ::http::Request) -> Result<()> { @@ -165,3 +174,18 @@ impl filters::Apply for Grpc { filters::apply_grpc_response(&self.params.filters, rsp) } } + +impl metrics::MkStreamLabel for Grpc { + type StatusLabels = metrics::labels::GrpcRouteBackendRsp; + type DurationLabels = metrics::labels::RouteBackend; + type StreamLabel = metrics::LabelGrpcRouteBackendRsp; + + fn mk_stream_labeler(&self, _: &::http::Request) -> Option { + let parent = self.params.concrete.parent_ref.clone(); + let route = self.params.route_ref.clone(); + let backend = self.params.concrete.backend_ref.clone(); + Some(metrics::LabelGrpcRsp::from( + metrics::labels::RouteBackend::from((parent, route, backend)), + )) + } +} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/backend/count_reqs.rs b/linkerd/app/outbound/src/http/logical/policy/route/backend/count_reqs.rs deleted file mode 100644 index 384e16d918..0000000000 --- a/linkerd/app/outbound/src/http/logical/policy/route/backend/count_reqs.rs +++ /dev/null @@ -1,110 +0,0 @@ -use linkerd_app_core::{metrics::prom, svc}; -use std::task::{Context, Poll}; - -#[derive(Clone, Debug)] -pub struct RequestCountFamilies(prom::Family); - -#[derive(Clone, Debug)] -pub struct RequestCount(prom::Counter); - -#[derive(Clone, Debug)] -pub struct NewCountRequests { - inner: N, - extract: X, -} - -#[derive(Clone, Debug)] -pub struct CountRequests { - inner: S, - requests: prom::Counter, -} - -impl RequestCountFamilies -where - L: prom::encoding::EncodeLabelSet + std::fmt::Debug + std::hash::Hash, - L: Eq + Clone + Send + Sync + 'static, -{ - pub fn register(registry: &mut prom::Registry) -> Self { - let requests = prom::Family::default(); - registry.register( - "requests", - "The total number of requests dispatched", - requests.clone(), - ); - Self(requests) - } - - pub fn metrics(&self, labels: &L) -> RequestCount { - RequestCount(self.0.get_or_create(labels).clone()) - } -} - -// === impl NewCountRequests === - -impl NewCountRequests { - pub fn new(extract: X, inner: N) -> Self { - Self { extract, inner } - } - - pub fn layer_via(extract: X) -> impl svc::Layer + Clone { - svc::layer::mk(move |inner| Self::new(extract.clone(), inner)) - } -} - -impl svc::NewService for NewCountRequests -where - X: svc::ExtractParam, - N: svc::NewService, -{ - type Service = CountRequests; - - fn new_service(&self, target: T) -> Self::Service { - let RequestCount(counter) = self.extract.extract_param(&target); - let inner = self.inner.new_service(target); - CountRequests::new(counter, inner) - } -} - -// === impl CountRequests === - -impl CountRequests { - fn new(requests: prom::Counter, inner: S) -> Self { - Self { requests, inner } - } -} - -impl svc::Service> for CountRequests -where - S: svc::Service>, -{ - type Response = S::Response; - type Error = S::Error; - type Future = S::Future; - - #[inline] - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.inner.poll_ready(cx) - } - - fn call(&mut self, req: http::Request) -> Self::Future { - self.requests.inc(); - self.inner.call(req) - } -} - -impl Default for RequestCountFamilies -where - L: prom::encoding::EncodeLabelSet + std::fmt::Debug + std::hash::Hash, - L: Eq + Clone + Send + Sync + 'static, -{ - fn default() -> Self { - Self(prom::Family::default()) - } -} - -impl RequestCount { - #[cfg(test)] - pub fn get(&self) -> u64 { - self.0.get() - } -} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics.rs b/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics.rs index 9f909183e3..ed7f0cded0 100644 --- a/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics.rs +++ b/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics.rs @@ -1,61 +1,161 @@ use crate::{BackendRef, ParentRef, RouteRef}; -use linkerd_app_core::{ - metrics::prom::{self, encoding::*, EncodeLabelSetMut}, - svc, +use linkerd_app_core::{metrics::prom, svc}; +use linkerd_http_prom::{ + body_data::response::{BodyDataMetrics, NewRecordBodyData, ResponseBodyFamilies}, + record_response::{self, NewResponseDuration, StreamLabel}, + NewCountRequests, RequestCount, RequestCountFamilies, }; -#[derive(Clone, Debug, Default)] -pub struct RouteBackendMetrics { - metrics: super::count_reqs::RequestCountFamilies, +pub use super::super::metrics::*; +pub use linkerd_http_prom::record_response::MkStreamLabel; + +#[cfg(test)] +mod tests; + +#[derive(Debug)] +pub struct RouteBackendMetrics { + requests: RequestCountFamilies, + responses: ResponseMetrics, + body_metrics: ResponseBodyFamilies, +} + +type ResponseMetrics = record_response::ResponseMetrics< + ::DurationLabels, + ::StatusLabels, +>; + +pub fn layer( + metrics: &RouteBackendMetrics, +) -> impl svc::Layer< + N, + Service = NewRecordBodyData< + ExtractRecordBodyDataParams, + NewCountRequests< + ExtractRequestCount, + NewResponseDuration>, N>, + >, + >, +> + Clone +where + T: MkStreamLabel, + N: svc::NewService, + NewRecordBodyData< + ExtractRecordBodyDataParams, + NewCountRequests< + ExtractRequestCount, + NewResponseDuration>, N>, + >, + >: svc::NewService, + NewCountRequests< + ExtractRequestCount, + NewResponseDuration>, N>, + >: svc::NewService, + NewResponseDuration>, N>: + svc::NewService, +{ + let RouteBackendMetrics { + requests, + responses, + body_metrics, + } = metrics.clone(); + + svc::layer::mk(move |inner| { + use svc::Layer; + NewRecordBodyData::layer_via(ExtractRecordBodyDataParams(body_metrics.clone())).layer( + NewCountRequests::layer_via(ExtractRequestCount(requests.clone())).layer( + NewRecordDuration::layer_via(ExtractRecordDurationParams(responses.clone())) + .layer(inner), + ), + ) + }) } -#[derive(Clone, Debug, Hash, PartialEq, Eq)] -struct RouteBackendLabels(ParentRef, RouteRef, BackendRef); +#[derive(Clone, Debug)] +pub struct ExtractRequestCount(RequestCountFamilies); + +#[derive(Clone, Debug)] +pub struct ExtractRecordBodyDataParams(ResponseBodyFamilies); // === impl RouteBackendMetrics === -impl RouteBackendMetrics { - pub fn register(reg: &mut prom::Registry) -> Self { +impl RouteBackendMetrics { + pub fn register(reg: &mut prom::Registry, histo: impl IntoIterator) -> Self { + let requests = RequestCountFamilies::register(reg); + let responses = record_response::ResponseMetrics::register(reg, histo); + let body_metrics = ResponseBodyFamilies::register(reg); Self { - metrics: super::count_reqs::RequestCountFamilies::register(reg), + requests, + responses, + body_metrics, } } #[cfg(test)] - pub(crate) fn request_count( + pub(crate) fn backend_request_count( &self, p: ParentRef, r: RouteRef, b: BackendRef, - ) -> super::count_reqs::RequestCount { - self.metrics.metrics(&RouteBackendLabels(p, r, b)) + ) -> linkerd_http_prom::RequestCount { + self.requests.metrics(&labels::RouteBackend(p, r, b)) + } + + #[cfg(test)] + pub(crate) fn get_statuses(&self, l: &L::StatusLabels) -> prom::Counter { + self.responses.get_statuses(l) + } + + #[cfg(test)] + pub(crate) fn get_response_body_metrics( + &self, + l: &labels::RouteBackend, + ) -> linkerd_http_prom::body_data::response::BodyDataMetrics { + self.body_metrics.metrics(l) } } -impl svc::ExtractParam for RouteBackendMetrics +impl Default for RouteBackendMetrics { + fn default() -> Self { + Self { + requests: Default::default(), + responses: Default::default(), + body_metrics: Default::default(), + } + } +} + +impl Clone for RouteBackendMetrics { + fn clone(&self) -> Self { + Self { + requests: self.requests.clone(), + responses: self.responses.clone(), + body_metrics: self.body_metrics.clone(), + } + } +} + +// === impl ExtractRequestCount === + +impl svc::ExtractParam for ExtractRequestCount where T: svc::Param + svc::Param + svc::Param, { - fn extract_param(&self, t: &T) -> super::count_reqs::RequestCount { - self.metrics - .metrics(&RouteBackendLabels(t.param(), t.param(), t.param())) + fn extract_param(&self, t: &T) -> RequestCount { + self.0 + .metrics(&labels::RouteBackend(t.param(), t.param(), t.param())) } } -// === impl RouteBackendLabels === +// === impl ExtractRecordBodyDataParams === -impl EncodeLabelSetMut for RouteBackendLabels { - fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { - let Self(parent, route, backend) = self; - parent.encode_label_set(enc)?; - route.encode_label_set(enc)?; - backend.encode_label_set(enc)?; - Ok(()) - } -} +impl svc::ExtractParam for ExtractRecordBodyDataParams +where + T: svc::Param + svc::Param + svc::Param, +{ + fn extract_param(&self, t: &T) -> BodyDataMetrics { + let Self(families) = self; + let labels = labels::RouteBackend(t.param(), t.param(), t.param()); -impl EncodeLabelSet for RouteBackendLabels { - fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { - self.encode_label_set(&mut enc) + families.metrics(&labels) } } diff --git a/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics/tests.rs b/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics/tests.rs new file mode 100644 index 0000000000..c1a30f2511 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/backend/metrics/tests.rs @@ -0,0 +1,517 @@ +use super::{ + super::{Backend, Grpc, Http}, + labels, + test_util::*, + LabelGrpcRouteBackendRsp, LabelHttpRouteBackendRsp, RouteBackendMetrics, +}; +use crate::http::{concrete, logical::Concrete}; +use bytes::Buf; +use linkerd_app_core::{ + svc::{self, http::BoxBody, Layer, NewService}, + transport::{Remote, ServerAddr}, + Error, +}; +use linkerd_proxy_client_policy as policy; + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_request_statuses() { + let _trace = linkerd_tracing::test::trace_init(); + + let metrics = super::RouteBackendMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let backend_ref = crate::BackendRef(policy::Meta::new_default("backend")); + let (mut svc, mut handle) = + mock_http_route_backend_metrics(&metrics, &parent_ref, &route_ref, &backend_ref); + + let route_backend = + labels::RouteBackend(parent_ref.clone(), route_ref.clone(), backend_ref.clone()); + + let requests = + metrics.backend_request_count(parent_ref.clone(), route_ref.clone(), backend_ref.clone()); + assert_eq!(requests.get(), 0); + + // Send one request and ensure it's counted. + let ok = metrics.get_statuses(&labels::Rsp( + route_backend.clone(), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: None, + }, + )); + send_assert_incremented(&ok, &mut handle, &mut svc, Default::default(), |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }) + .await; + assert_eq!(requests.get(), 1); + + // Send another request and ensure it's counted with a different response + // status. + let no_content = metrics.get_statuses(&labels::Rsp( + route_backend.clone(), + labels::HttpRsp { + status: Some(http::StatusCode::NO_CONTENT), + error: None, + }, + )); + send_assert_incremented( + &no_content, + &mut handle, + &mut svc, + Default::default(), + |tx| { + tx.send_response( + http::Response::builder() + .status(204) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(requests.get(), 2); + + // Emit a response with an error and ensure it's counted. + let unknown = metrics.get_statuses(&labels::Rsp( + route_backend.clone(), + labels::HttpRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented(&unknown, &mut handle, &mut svc, Default::default(), |tx| { + tx.send_error("a spooky ghost") + }) + .await; + assert_eq!(requests.get(), 3); + + // Emit a successful response with a body that fails and ensure that both + // the status and error are recorded. + let mixed = metrics.get_statuses(&labels::Rsp( + route_backend.clone(), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented(&mixed, &mut handle, &mut svc, Default::default(), |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::new(MockBody::error("a spooky ghost"))) + .unwrap(), + ) + }) + .await; + assert_eq!(requests.get(), 4); + + assert_eq!(unknown.get(), 1); + assert_eq!(ok.get(), 1); + assert_eq!(no_content.get(), 1); + assert_eq!(mixed.get(), 1); +} + +/// Tests that metrics count frames in the backend response body. +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn body_data_layer_records_frames() -> Result<(), Error> { + use http_body::Body; + use linkerd_app_core::proxy::http; + use linkerd_http_prom::body_data::response::BodyDataMetrics; + use tower::{Service, ServiceExt}; + + let _trace = linkerd_tracing::test::trace_init(); + + let metrics = super::RouteBackendMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let backend_ref = crate::BackendRef(policy::Meta::new_default("backend")); + + let (mut svc, mut handle) = + mock_http_route_backend_metrics(&metrics, &parent_ref, &route_ref, &backend_ref); + handle.allow(1); + + // Create a request. + let req = { + let empty = BoxBody::empty(); + let body = BoxBody::new(empty); + http::Request::builder().method("DOOT").body(body).unwrap() + }; + + // Call the service once it is ready to accept a request. + tracing::info!("calling service"); + svc.ready().await.expect("ready"); + let call = svc.call(req); + let (req, send_resp) = handle.next_request().await.unwrap(); + debug_assert_eq!(req.method().as_str(), "DOOT"); + + // Acquire the counters for this backend. + tracing::info!("acquiring response body metrics"); + let labels = labels::RouteBackend(parent_ref.clone(), route_ref.clone(), backend_ref.clone()); + let BodyDataMetrics { + // TODO(kate): currently, histograms do not expose their observation count or sum. so, + // we're left unable to exercise these metrics until prometheus/client_rust#242 lands. + // - https://github.com/prometheus/client_rust/pull/241 + // - https://github.com/prometheus/client_rust/pull/242 + #[cfg(feature = "prometheus-client-rust-242")] + frame_size, + .. + } = metrics.get_response_body_metrics(&labels); + + // Before we've sent a response, the counter should be zero. + #[cfg(feature = "prometheus-client-rust-242")] + { + assert_eq!(frame_size.count(), 0); + assert_eq!(frame_size.sum(), 0); + } + + // Create a response whose body is backed by a channel that we can send chunks to, send it. + tracing::info!("sending response"); + let mut resp_tx = { + let (tx, body) = http_body_util::channel::Channel::::new(1024); + let body = BoxBody::new(body); + let resp = http::Response::builder() + .status(http::StatusCode::IM_A_TEAPOT) + .body(body) + .unwrap(); + send_resp.send_response(resp); + tx + }; + + // Before we've sent any bytes, the counter should be zero. + #[cfg(feature = "prometheus-client-rust-242")] + { + assert_eq!(frame_size.count(), 0); + assert_eq!(frame_size.sum(), 0); + } + + // On the client end, poll our call future and await the response. + tracing::info!("polling service future"); + let (parts, body) = call.await?.into_parts(); + debug_assert_eq!(parts.status, 418); + + let mut body = Box::pin(body); + + /// Returns the next chunk from a boxed body. + async fn read_chunk(body: &mut std::pin::Pin>) -> Result, Error> { + use std::task::{Context, Poll}; + let mut ctx = Context::from_waker(futures_util::task::noop_waker_ref()); + let data = match body.as_mut().poll_frame(&mut ctx) { + Poll::Ready(Some(Ok(d))) => d, + _ => panic!("next chunk should be ready"), + }; + let chunk = data.into_data().ok().expect("data frame").chunk().to_vec(); + Ok(chunk) + } + + { + // Send a chunk, confirm that our counters are incremented. + tracing::info!("sending first chunk"); + resp_tx.send_data("hello".into()).await?; + let chunk = read_chunk(&mut body).await?; + debug_assert_eq!("hello".as_bytes(), chunk, "should get same value back out"); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frame_size.count(), 1); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frame_size.sum(), 5); + } + + { + // Send another chunk, confirm that our counters are incremented once more. + tracing::info!("sending second chunk"); + resp_tx.send_data(", world!".into()).await?; + let chunk = read_chunk(&mut body).await?; + debug_assert_eq!( + ", world!".as_bytes(), + chunk, + "should get same value back out" + ); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frame_size.count(), 2); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frame_size.sum(), 5 + 8); + } + + { + // Close the body, show that the counters remain at the same values. + use std::task::{Context, Poll}; + tracing::info!("closing response body"); + drop(resp_tx); + let mut ctx = Context::from_waker(futures_util::task::noop_waker_ref()); + match body.as_mut().poll_frame(&mut ctx) { + Poll::Ready(None) => {} + _ => panic!("got unexpected poll result"), + }; + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frame_size.count(), 2); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frame_size.sum(), 5 + 8); + } + + Ok(()) +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_ok() { + let _trace = linkerd_tracing::test::trace_init(); + + let metrics = super::RouteBackendMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let backend_ref = crate::BackendRef(policy::Meta::new_default("backend")); + let (mut svc, mut handle) = + mock_grpc_route_backend_metrics(&metrics, &parent_ref, &route_ref, &backend_ref); + + let requests = + metrics.backend_request_count(parent_ref.clone(), route_ref.clone(), backend_ref.clone()); + assert_eq!(requests.get(), 0); + + let ok = metrics.get_statuses(&labels::Rsp( + labels::RouteBackend(parent_ref.clone(), route_ref.clone(), backend_ref.clone()), + labels::GrpcRsp { + status: Some(tonic::Code::Ok), + error: None, + }, + )); + send_assert_incremented( + &ok, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .body(BoxBody::new(MockBody::grpc_status(0))) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(requests.get(), 1); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_not_found() { + let _trace = linkerd_tracing::test::trace_init(); + + let metrics = super::RouteBackendMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let backend_ref = crate::BackendRef(policy::Meta::new_default("backend")); + let (mut svc, mut handle) = + mock_grpc_route_backend_metrics(&metrics, &parent_ref, &route_ref, &backend_ref); + + let not_found = metrics.get_statuses(&labels::Rsp( + labels::RouteBackend(parent_ref.clone(), route_ref.clone(), backend_ref.clone()), + labels::GrpcRsp { + status: Some(tonic::Code::NotFound), + error: None, + }, + )); + send_assert_incremented( + ¬_found, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .body(BoxBody::new(MockBody::grpc_status(5))) + .unwrap(), + ) + }, + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_error_response() { + let _trace = linkerd_tracing::test::trace_init(); + + let metrics = super::RouteBackendMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let backend_ref = crate::BackendRef(policy::Meta::new_default("backend")); + let (mut svc, mut handle) = + mock_grpc_route_backend_metrics(&metrics, &parent_ref, &route_ref, &backend_ref); + + let unknown = metrics.get_statuses(&labels::Rsp( + labels::RouteBackend(parent_ref.clone(), route_ref.clone(), backend_ref.clone()), + labels::GrpcRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented( + &unknown, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| tx.send_error("a spooky ghost"), + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_error_body() { + let _trace = linkerd_tracing::test::trace_init(); + + let metrics = super::RouteBackendMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let backend_ref = crate::BackendRef(policy::Meta::new_default("backend")); + let (mut svc, mut handle) = + mock_grpc_route_backend_metrics(&metrics, &parent_ref, &route_ref, &backend_ref); + + let unknown = metrics.get_statuses(&labels::Rsp( + labels::RouteBackend(parent_ref.clone(), route_ref.clone(), backend_ref.clone()), + labels::GrpcRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented( + &unknown, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .body(BoxBody::new(MockBody::error("a spooky ghost"))) + .unwrap(), + ) + }, + ) + .await; +} + +// === Util === + +fn mock_http_route_backend_metrics( + metrics: &RouteBackendMetrics, + parent_ref: &crate::ParentRef, + route_ref: &crate::RouteRef, + backend_ref: &crate::BackendRef, +) -> (svc::BoxHttp, Handle) { + let req = http::Request::builder().body(()).unwrap(); + let (r#match, _) = policy::route::find( + &[policy::http::Route { + hosts: vec![], + rules: vec![policy::route::Rule { + matches: vec![policy::http::r#match::MatchRequest::default()], + policy: policy::http::Policy { + meta: route_ref.0.clone(), + filters: [].into(), + distribution: policy::RouteDistribution::Empty, + params: Default::default(), + }, + }], + }], + &req, + ) + .expect("find default route"); + + let (tx, handle) = tower_test::mock::pair::, http::Response>(); + let svc = super::layer(metrics) + .layer(move |_t: Http<()>| tx.clone()) + .new_service(Http { + r#match, + params: Backend { + route_ref: route_ref.clone(), + filters: [].into(), + concrete: Concrete { + target: concrete::Dispatch::Forward( + Remote(ServerAddr(std::net::SocketAddr::new( + [0, 0, 0, 0].into(), + 8080, + ))), + Default::default(), + ), + authority: None, + failure_accrual: Default::default(), + parent: (), + parent_ref: parent_ref.clone(), + backend_ref: backend_ref.clone(), + }, + }, + }); + + (svc::BoxHttp::new(svc), handle) +} + +fn mock_grpc_route_backend_metrics( + metrics: &RouteBackendMetrics, + parent_ref: &crate::ParentRef, + route_ref: &crate::RouteRef, + backend_ref: &crate::BackendRef, +) -> (svc::BoxHttp, Handle) { + let req = http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(()) + .unwrap(); + let (r#match, _) = policy::route::find( + &[policy::grpc::Route { + hosts: vec![], + rules: vec![policy::route::Rule { + matches: vec![policy::grpc::r#match::MatchRoute::default()], + policy: policy::grpc::Policy { + meta: route_ref.0.clone(), + filters: [].into(), + distribution: policy::RouteDistribution::Empty, + params: Default::default(), + }, + }], + }], + &req, + ) + .expect("find default route"); + + let (tx, handle) = tower_test::mock::pair::, http::Response>(); + let svc = super::layer(metrics) + .layer(move |_t: Grpc<()>| tx.clone()) + .new_service(Grpc { + r#match, + params: Backend { + route_ref: route_ref.clone(), + filters: [].into(), + concrete: Concrete { + target: concrete::Dispatch::Forward( + Remote(ServerAddr(std::net::SocketAddr::new( + [0, 0, 0, 0].into(), + 8080, + ))), + Default::default(), + ), + authority: None, + failure_accrual: Default::default(), + parent: (), + parent_ref: parent_ref.clone(), + backend_ref: backend_ref.clone(), + }, + }, + }); + + (svc::BoxHttp::new(svc), handle) +} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/extensions.rs b/linkerd/app/outbound/src/http/logical/policy/route/extensions.rs new file mode 100644 index 0000000000..59ca528a4f --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/extensions.rs @@ -0,0 +1,265 @@ +use super::retry::RetryPolicy; +use linkerd_app_core::{config::ExponentialBackoff, proxy::http, svc}; +use linkerd_proxy_client_policy as policy; +use std::task::{Context, Poll}; +use tokio::time; + +#[derive(Clone, Debug)] +pub struct Params { + pub retry: Option, + pub timeouts: policy::http::Timeouts, + pub allow_l5d_request_headers: bool, +} + +// A request extension that marks the number of times a request has been +// attempted. +#[derive(Clone, Debug)] +pub struct Attempt(pub std::num::NonZeroU16); + +#[derive(Clone, Debug)] +pub struct NewSetExtensions { + inner: N, +} + +#[derive(Clone, Debug)] +pub struct SetExtensions { + inner: S, + params: Params, +} + +// === impl NewSetExtensions === + +impl NewSetExtensions { + pub fn layer() -> impl svc::Layer + Clone { + svc::layer::mk(|inner| Self { inner }) + } +} + +impl svc::NewService for NewSetExtensions +where + T: svc::Param, + N: svc::NewService, +{ + type Service = SetExtensions; + + fn new_service(&self, target: T) -> Self::Service { + let params = target.param(); + let inner = self.inner.new_service(target); + SetExtensions { params, inner } + } +} + +// === impl SetExtensions === + +impl svc::Service> for SetExtensions +where + S: svc::Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: http::Request) -> Self::Future { + let retry = self.configure_retry(req.headers_mut()); + + // Ensure that we get response headers within the retry timeout. Note + // that this may be cleared super::retry::RetryPolicy::set_extensions. + let mut timeouts = self.configure_timeouts(req.headers_mut()); + timeouts.response_headers = retry.as_ref().and_then(|r| r.timeout); + + tracing::debug!(?retry, ?timeouts, "Initializing route extensions"); + if let Some(retry) = retry { + let _prior = req.extensions_mut().insert(retry); + debug_assert!(_prior.is_none(), "RetryPolicy must only be configured once"); + } + + let _prior = req.extensions_mut().insert(timeouts); + debug_assert!( + _prior.is_none(), + "StreamTimeouts must only be configured once" + ); + + let _prior = req.extensions_mut().insert(Attempt(1.try_into().unwrap())); + debug_assert!(_prior.is_none(), "Attempts must only be configured once"); + + self.inner.call(req) + } +} + +impl SetExtensions { + fn configure_retry(&self, req: &mut http::HeaderMap) -> Option { + if !self.params.allow_l5d_request_headers { + return self.params.retry.clone(); + } + + let user_retry_http = req + .remove("l5d-retry-http") + .and_then(|val| val.to_str().ok().and_then(parse_http_conditions)); + let user_retry_grpc = req + .remove("l5d-retry-grpc") + .and_then(|val| val.to_str().ok().and_then(parse_grpc_conditions)); + let user_retry_limit = req + .remove("l5d-retry-limit") + .and_then(|val| val.to_str().ok().and_then(|v| v.parse::().ok())); + let user_retry_timeout = req.remove("l5d-retry-timeout").and_then(parse_duration); + + if let Some(retry) = self.params.retry.clone() { + return Some(RetryPolicy { + timeout: user_retry_timeout.or(retry.timeout), + retryable_http_statuses: user_retry_http.or(retry.retryable_http_statuses.clone()), + retryable_grpc_statuses: user_retry_grpc.or(retry.retryable_grpc_statuses.clone()), + max_retries: user_retry_limit.unwrap_or(retry.max_retries), + ..retry + }); + } + + match ( + user_retry_http, + user_retry_grpc, + user_retry_limit, + user_retry_timeout, + ) { + (None, None, None, None) => None, + (retryable_http_statuses, retryable_grpc_statuses, retry_limit, timeout) => { + Some(RetryPolicy { + timeout, + retryable_http_statuses, + retryable_grpc_statuses, + max_retries: retry_limit.unwrap_or(1), + max_request_bytes: 64 * 1024, + backoff: Some(ExponentialBackoff::new_unchecked( + std::time::Duration::from_millis(25), + std::time::Duration::from_millis(250), + 1.0, + )), + }) + } + } + } + + fn configure_timeouts(&self, req: &mut http::HeaderMap) -> http::StreamTimeouts { + let mut timeouts = http::StreamTimeouts { + response_headers: None, + response_end: self.params.timeouts.response, + idle: self.params.timeouts.idle, + limit: self.params.timeouts.request.map(Into::into), + }; + + if !self.params.allow_l5d_request_headers { + return timeouts; + } + + // Accept both a shorthand and longer, more explicit version, the + // latter taking precedence. + if let Some(t) = req.remove("l5d-timeout").and_then(parse_duration) { + timeouts.limit = Some(t.into()); + } + if let Some(t) = req.remove("l5d-request-timeout").and_then(parse_duration) { + timeouts.limit = Some(t.into()); + } + + if let Some(t) = req.remove("l5d-response-timeout").and_then(parse_duration) { + timeouts.response_end = Some(t); + } + + timeouts + } +} + +fn parse_http_conditions(s: &str) -> Option { + fn to_code(s: &str) -> Option { + let code = s.parse::().ok()?; + if (100..600).contains(&code) { + Some(code) + } else { + None + } + } + + Some(policy::http::StatusRanges( + s.split(',') + .filter_map(|cond| { + if cond.eq_ignore_ascii_case("5xx") { + return Some(500..=599); + } + if cond.eq_ignore_ascii_case("gateway-error") { + return Some(502..=504); + } + + if let Some(code) = to_code(cond) { + return Some(code..=code); + } + if let Some((start, end)) = cond.split_once('-') { + if let (Some(s), Some(e)) = (to_code(start), to_code(end)) { + if s <= e { + return Some(s..=e); + } + } + } + + None + }) + .collect(), + )) +} + +fn parse_grpc_conditions(s: &str) -> Option { + Some(policy::grpc::Codes(std::sync::Arc::new( + s.split(',') + .filter_map(|cond| { + if cond.eq_ignore_ascii_case("cancelled") { + return Some(tonic::Code::Cancelled as u16); + } + if cond.eq_ignore_ascii_case("deadline-exceeded") { + return Some(tonic::Code::DeadlineExceeded as u16); + } + if cond.eq_ignore_ascii_case("internal") { + return Some(tonic::Code::Internal as u16); + } + if cond.eq_ignore_ascii_case("resource-exhausted") { + return Some(tonic::Code::ResourceExhausted as u16); + } + if cond.eq_ignore_ascii_case("unavailable") { + return Some(tonic::Code::Unavailable as u16); + } + None + }) + .collect(), + ))) +} + +// Copied from the policy controller so that we handle the same duration values +// as we do in the YAML config. +fn parse_duration(hv: http::HeaderValue) -> Option { + #[inline] + fn parse(s: &str) -> Option { + let s = s.trim(); + let offset = s.rfind(|c: char| c.is_ascii_digit())?; + let (magnitude, unit) = s.split_at(offset + 1); + let magnitude = magnitude.parse::().ok()?; + + let mul = match unit { + "" if magnitude == 0 => 0, + "ms" => 1, + "s" => 1000, + "m" => 1000 * 60, + "h" => 1000 * 60 * 60, + "d" => 1000 * 60 * 60 * 24, + _ => return None, + }; + + let ms = magnitude.checked_mul(mul)?; + Some(time::Duration::from_millis(ms)) + } + let s = hv.to_str().ok()?; + let Some(d) = parse(s) else { + tracing::debug!("Invalid duration: {:?}", s); + return None; + }; + Some(d) +} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/metrics.rs b/linkerd/app/outbound/src/http/logical/policy/route/metrics.rs new file mode 100644 index 0000000000..8096b8ef14 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/metrics.rs @@ -0,0 +1,280 @@ +use super::{backend::metrics as backend, retry}; +use linkerd_app_core::{ + metrics::prom::{self, EncodeLabelSetMut}, + proxy::http, + svc, +}; +use linkerd_http_prom::{ + body_data::request::{NewRecordBodyData, RequestBodyFamilies}, + record_response::{self, StreamLabel}, +}; + +pub use linkerd_http_prom::record_response::MkStreamLabel; + +pub mod labels; +#[cfg(test)] +pub(super) mod test_util; +#[cfg(test)] +mod tests; + +pub type RequestMetrics = record_response::RequestMetrics< + ::DurationLabels, + ::StatusLabels, +>; + +#[derive(Debug)] +pub struct RouteMetrics { + pub(super) retry: retry::RouteRetryMetrics, + pub(super) requests: RequestMetrics, + pub(super) backend: backend::RouteBackendMetrics, + pub(super) body_data: RequestBodyFamilies, +} + +pub type HttpRouteMetrics = RouteMetrics; +pub type GrpcRouteMetrics = RouteMetrics; + +/// Tracks HTTP streams to produce response labels. +#[derive(Clone, Debug)] +pub struct LabelHttpRsp { + parent: L, + status: Option, + error: Option, +} + +/// Tracks gRPC streams to produce response labels. +#[derive(Clone, Debug)] +pub struct LabelGrpcRsp { + parent: L, + status: Option, + error: Option, +} + +pub type LabelHttpRouteRsp = LabelHttpRsp; +pub type LabelGrpcRouteRsp = LabelGrpcRsp; + +pub type LabelHttpRouteBackendRsp = LabelHttpRsp; +pub type LabelGrpcRouteBackendRsp = LabelGrpcRsp; + +pub type NewRecordDuration = + record_response::NewRecordResponse, M, N>; + +#[derive(Clone, Debug)] +pub struct ExtractRecordDurationParams(pub M); + +pub fn layer( + metrics: &RequestMetrics, + body_data: &RequestBodyFamilies, +) -> impl svc::Layer< + N, + Service = NewRecordBodyData< + NewRecordDuration, N>, + (), + T, + labels::Route, + >, +> +where + T: Clone + MkStreamLabel, + T: svc::ExtractParam>, +{ + let record = NewRecordDuration::layer_via(ExtractRecordDurationParams(metrics.clone())); + let body_data = NewRecordBodyData::new((), body_data.clone()); + + svc::layer::mk(move |inner| { + use svc::Layer; + body_data.layer(record.layer(inner)) + }) +} + +// === impl RouteMetrics === + +impl RouteMetrics { + // There are two histograms for which we need to register metrics: request + // durations, measured on routes, and response durations, measured on + // route-backends. + // + // Response duration is probably the more meaninful metric + // operationally--and it includes more backend metadata--so we opt to + // preserve higher fidelity for response durations (especially for lower + // values). + // + // We elide several buckets for request durations to be conservative about + // the costs of tracking these two largely overlapping histograms + const REQUEST_BUCKETS: &'static [f64] = &[0.05, 0.5, 1.0, 10.0]; + const RESPONSE_BUCKETS: &'static [f64] = &[0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 10.0]; +} + +impl Default for RouteMetrics { + fn default() -> Self { + Self { + requests: Default::default(), + backend: Default::default(), + retry: Default::default(), + body_data: Default::default(), + } + } +} + +impl Clone for RouteMetrics { + fn clone(&self) -> Self { + Self { + requests: self.requests.clone(), + backend: self.backend.clone(), + retry: self.retry.clone(), + body_data: self.body_data.clone(), + } + } +} + +impl RouteMetrics { + pub fn register(reg: &mut prom::Registry) -> Self { + let requests = RequestMetrics::::register(reg, Self::REQUEST_BUCKETS.iter().copied()); + + let backend = backend::RouteBackendMetrics::register( + reg.sub_registry_with_prefix("backend"), + Self::RESPONSE_BUCKETS.iter().copied(), + ); + + let retry = retry::RouteRetryMetrics::register(reg.sub_registry_with_prefix("retry")); + let body_data = RequestBodyFamilies::register(reg); + + Self { + requests, + backend, + retry, + body_data, + } + } + + #[cfg(test)] + pub(crate) fn backend_request_count( + &self, + p: crate::ParentRef, + r: crate::RouteRef, + b: crate::BackendRef, + ) -> linkerd_http_prom::RequestCount { + self.backend.backend_request_count(p, r, b) + } +} + +// === impl ExtractRequestDurationParams === + +impl svc::ExtractParam, T> for ExtractRecordDurationParams +where + T: Clone + MkStreamLabel, + M: Clone, +{ + fn extract_param(&self, target: &T) -> record_response::Params { + record_response::Params { + labeler: target.clone(), + metric: self.0.clone(), + } + } +} + +// === impl LabelHttpRsp === + +impl

From

for LabelHttpRsp

{ + fn from(parent: P) -> Self { + Self { + parent, + status: None, + error: None, + } + } +} + +impl

StreamLabel for LabelHttpRsp

+where + P: EncodeLabelSetMut + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + type StatusLabels = labels::Rsp; + type DurationLabels = P; + + fn init_response(&mut self, rsp: &http::Response) { + self.status = Some(rsp.status()); + } + + fn end_response(&mut self, res: Result, &linkerd_app_core::Error>) { + if let Err(e) = res { + match labels::Error::new_or_status(e) { + Ok(l) => self.error = Some(l), + Err(code) => match http::StatusCode::from_u16(code) { + Ok(s) => self.status = Some(s), + // This is kind of pathological, so mark it as an unkown error. + Err(_) => self.error = Some(labels::Error::Unknown), + }, + } + } + } + + fn status_labels(&self) -> Self::StatusLabels { + labels::Rsp( + self.parent.clone(), + labels::HttpRsp { + status: self.status, + error: self.error, + }, + ) + } + + fn duration_labels(&self) -> Self::DurationLabels { + self.parent.clone() + } +} + +// === impl LabelGrpcRsp === + +impl

From

for LabelGrpcRsp

{ + fn from(parent: P) -> Self { + Self { + parent, + status: None, + error: None, + } + } +} + +impl

StreamLabel for LabelGrpcRsp

+where + P: EncodeLabelSetMut + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + type StatusLabels = labels::Rsp; + type DurationLabels = P; + + fn init_response(&mut self, rsp: &http::Response) { + self.status = rsp + .headers() + .get("grpc-status") + .map(|v| tonic::Code::from_bytes(v.as_bytes())); + } + + fn end_response(&mut self, res: Result, &linkerd_app_core::Error>) { + match res { + Ok(Some(trailers)) => { + self.status = trailers + .get("grpc-status") + .map(|v| tonic::Code::from_bytes(v.as_bytes())); + } + Ok(None) => {} + Err(e) => match labels::Error::new_or_status(e) { + Ok(l) => self.error = Some(l), + Err(code) => self.status = Some(tonic::Code::from_i32(i32::from(code))), + }, + } + } + + fn status_labels(&self) -> Self::StatusLabels { + labels::Rsp( + self.parent.clone(), + labels::GrpcRsp { + status: self.status, + error: self.error, + }, + ) + } + + fn duration_labels(&self) -> Self::DurationLabels { + self.parent.clone() + } +} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/metrics/labels.rs b/linkerd/app/outbound/src/http/logical/policy/route/metrics/labels.rs new file mode 100644 index 0000000000..46ecd7665d --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/metrics/labels.rs @@ -0,0 +1,298 @@ +//! Prometheus label types. +use linkerd_app_core::{ + dns, errors, metrics::prom::EncodeLabelSetMut, proxy::http, Error as BoxError, +}; +use prometheus_client::encoding::*; + +use crate::{BackendRef, ParentRef, RouteRef}; + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct Route { + parent: ParentRef, + route: RouteRef, + hostname: Option, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct RouteBackend(pub ParentRef, pub RouteRef, pub BackendRef); + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct Rsp(pub P, pub L); + +pub type RouteRsp = Rsp; +pub type HttpRouteRsp = RouteRsp; +pub type GrpcRouteRsp = RouteRsp; + +pub type RouteBackendRsp = Rsp; +pub type HttpRouteBackendRsp = RouteBackendRsp; +pub type GrpcRouteBackendRsp = RouteBackendRsp; + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct HttpRsp { + pub status: Option, + pub error: Option, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct GrpcRsp { + pub status: Option, + pub error: Option, +} + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub enum Error { + FailFast, + LoadShed, + RequestTimeout, + ResponseHeadersTimeout, + ResponseStreamTimeout, + IdleTimeout, + Cancel, + Refused, + EnhanceYourCalm, + Reset, + GoAway, + Io, + Unknown, +} + +// === impl Route === + +impl Route { + pub fn new(parent: ParentRef, route: RouteRef, uri: Option<&http::uri::Uri>) -> Self { + let hostname = uri + .and_then(http::uri::Uri::host) + .and_then(|h| dns::Name::try_from_ascii(h.as_bytes()).ok()); + + Self { + parent, + route, + hostname, + } + } + + #[cfg(test)] + pub(super) fn new_with_name( + parent: ParentRef, + route: RouteRef, + hostname: Option, + ) -> Self { + Self { + parent, + route, + hostname, + } + } +} + +impl EncodeLabelSetMut for Route { + fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { + let Self { + parent, + route, + hostname, + } = self; + + parent.encode_label_set(enc)?; + route.encode_label_set(enc)?; + ("hostname", hostname.as_deref()).encode(enc.encode_label())?; + + Ok(()) + } +} + +impl EncodeLabelSet for Route { + fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +// === impl RouteBackend === + +impl From<(ParentRef, RouteRef, BackendRef)> for RouteBackend { + fn from((parent, route, backend): (ParentRef, RouteRef, BackendRef)) -> Self { + Self(parent, route, backend) + } +} + +impl EncodeLabelSetMut for RouteBackend { + fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { + let Self(parent, route, backend) = self; + parent.encode_label_set(enc)?; + route.encode_label_set(enc)?; + backend.encode_label_set(enc)?; + Ok(()) + } +} + +impl EncodeLabelSet for RouteBackend { + fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +// === impl Rsp === + +impl EncodeLabelSetMut for Rsp { + fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { + let Self(route, rsp) = self; + route.encode_label_set(enc)?; + rsp.encode_label_set(enc)?; + Ok(()) + } +} + +impl EncodeLabelSet for Rsp { + fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +// === impl HttpRsp === + +impl EncodeLabelSetMut for HttpRsp { + fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { + let Self { status, error } = self; + + ("http_status", status.map(|c| c.as_u16())).encode(enc.encode_label())?; + ("error", *error).encode(enc.encode_label())?; + + Ok(()) + } +} + +impl EncodeLabelSet for HttpRsp { + fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +// === impl GrpcRsp === + +impl EncodeLabelSetMut for GrpcRsp { + fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { + let Self { status, error } = self; + + ( + "grpc_status", + match status.unwrap_or(tonic::Code::Unknown) { + tonic::Code::Ok => "OK", + tonic::Code::Cancelled => "CANCELLED", + tonic::Code::InvalidArgument => "INVALID_ARGUMENT", + tonic::Code::DeadlineExceeded => "DEADLINE_EXCEEDED", + tonic::Code::NotFound => "NOT_FOUND", + tonic::Code::AlreadyExists => "ALREADY_EXISTS", + tonic::Code::PermissionDenied => "PERMISSION_DENIED", + tonic::Code::ResourceExhausted => "RESOURCE_EXHAUSTED", + tonic::Code::FailedPrecondition => "FAILED_PRECONDITION", + tonic::Code::Aborted => "ABORTED", + tonic::Code::OutOfRange => "OUT_OF_RANGE", + tonic::Code::Unimplemented => "UNIMPLEMENTED", + tonic::Code::Internal => "INTERNAL", + tonic::Code::Unavailable => "UNAVAILABLE", + tonic::Code::DataLoss => "DATA_LOSS", + tonic::Code::Unauthenticated => "UNAUTHENTICATED", + _ => "UNKNOWN", + }, + ) + .encode(enc.encode_label())?; + + ("error", *error).encode(enc.encode_label())?; + + Ok(()) + } +} + +impl EncodeLabelSet for GrpcRsp { + fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +// === impl Error === + +impl Error { + pub fn new_or_status(error: &BoxError) -> Result { + use super::super::super::errors as policy; + use crate::http::h2::{H2Error, Reason}; + + // No available backend can be found for a request. + if errors::is_caused_by::(&**error) { + return Ok(Self::FailFast); + } + if errors::is_caused_by::(&**error) { + return Ok(Self::LoadShed); + } + + if let Some(policy::HttpRouteRedirect { status, .. }) = errors::cause_ref(&**error) { + return Err(status.as_u16()); + } + + // Policy-driven request failures. + if let Some(policy::HttpRouteInjectedFailure { status, .. }) = errors::cause_ref(&**error) { + return Err(status.as_u16()); + } + if let Some(policy::GrpcRouteInjectedFailure { code, .. }) = errors::cause_ref(&**error) { + return Err(*code); + } + + use http::stream_timeouts::{ + ResponseHeadersTimeoutError, ResponseStreamTimeoutError, StreamDeadlineError, + StreamIdleError, + }; + if errors::is_caused_by::(&**error) { + return Ok(Self::ResponseHeadersTimeout); + } + if errors::is_caused_by::(&**error) { + return Ok(Self::ResponseStreamTimeout); + } + if errors::is_caused_by::(&**error) { + return Ok(Self::RequestTimeout); + } + if errors::is_caused_by::(&**error) { + return Ok(Self::IdleTimeout); + } + + // HTTP/2 errors. + if let Some(h2e) = errors::cause_ref::(&**error) { + if h2e.is_reset() { + match h2e.reason() { + Some(Reason::CANCEL) => return Ok(Self::Cancel), + Some(Reason::REFUSED_STREAM) => return Ok(Self::Refused), + Some(Reason::ENHANCE_YOUR_CALM) => return Ok(Self::EnhanceYourCalm), + _ => return Ok(Self::Reset), + } + } + if h2e.is_go_away() { + return Ok(Self::GoAway); + } + if h2e.is_io() { + return Ok(Self::Io); + } + } + + tracing::debug!(?error, "Unlabeled error"); + Ok(Self::Unknown) + } +} + +impl EncodeLabelValue for Error { + fn encode(&self, enc: &mut LabelValueEncoder<'_>) -> std::fmt::Result { + use std::fmt::Write; + match self { + Self::FailFast => enc.write_str("FAIL_FAST"), + Self::LoadShed => enc.write_str("LOAD_SHED"), + Self::RequestTimeout => enc.write_str("REQUEST_TIMEOUT"), + Self::ResponseHeadersTimeout => enc.write_str("RESPONSE_HEADERS_TIMEOUT"), + Self::ResponseStreamTimeout => enc.write_str("RESPONSE_STREAM_TIMEOUT"), + Self::IdleTimeout => enc.write_str("IDLE_TIMEOUT"), + Self::Cancel => enc.write_str("CANCEL"), + Self::Refused => enc.write_str("REFUSED"), + Self::EnhanceYourCalm => enc.write_str("ENHANCE_YOUR_CALM"), + Self::Reset => enc.write_str("RESET"), + Self::GoAway => enc.write_str("GO_AWAY"), + Self::Io => enc.write_str("IO"), + Self::Unknown => enc.write_str("UNKNOWN"), + } + } +} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/metrics/test_util.rs b/linkerd/app/outbound/src/http/logical/policy/route/metrics/test_util.rs new file mode 100644 index 0000000000..9814acf087 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/metrics/test_util.rs @@ -0,0 +1,38 @@ +use http::Response; +use http_body::Body; +use http_body_util::BodyExt; +use linkerd_app_core::{ + metrics::prom::Counter, + svc::{self, http::BoxBody, Service, ServiceExt}, +}; + +pub use crate::test_util::MockBody; + +pub async fn send_assert_incremented( + counter: &Counter, + handle: &mut Handle, + svc: &mut svc::BoxHttp, + req: http::Request, + send: impl FnOnce(SendResponse), +) { + handle.allow(1); + let init = counter.get(); + svc.ready().await.expect("ready"); + let mut call = svc.call(req); + let (_req, tx) = tokio::select! { + _ = (&mut call) => unreachable!(), + res = handle.next_request() => res.unwrap(), + }; + assert_eq!(counter.get(), init); + send(tx); + if let Ok(mut body) = call.await.map(Response::into_body) { + if !body.is_end_stream() { + assert_eq!(counter.get(), 0); + while let Some(Ok(_)) = body.frame().await {} + } + } + assert_eq!(counter.get(), init + 1); +} + +pub type Handle = tower_test::mock::Handle, http::Response>; +pub type SendResponse = tower_test::mock::SendResponse>; diff --git a/linkerd/app/outbound/src/http/logical/policy/route/metrics/tests.rs b/linkerd/app/outbound/src/http/logical/policy/route/metrics/tests.rs new file mode 100644 index 0000000000..b497e549e0 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/metrics/tests.rs @@ -0,0 +1,1197 @@ +use super::{ + super::{Grpc, Http, Route}, + labels, + test_util::*, + LabelGrpcRouteRsp, LabelHttpRouteRsp, RequestMetrics, +}; +use bytes::{Buf, Bytes}; +use http_body::Body; +use http_body_util::BodyExt; +use linkerd_app_core::{ + dns, + svc::{ + self, + http::{uri::Uri, BoxBody}, + Layer, NewService, + }, +}; +use linkerd_http_prom::body_data::request::RequestBodyFamilies; +use linkerd_proxy_client_policy as policy; +use std::task::Poll; + +static GRPC_STATUS: http::HeaderName = http::HeaderName::from_static("grpc-status"); +static GRPC_STATUS_OK: http::HeaderValue = http::HeaderValue::from_static("0"); + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_request_statuses() { + const EXPORT_HOSTNAME_LABELS: bool = true; + let _trace = linkerd_tracing::test::trace_init(); + + let super::HttpRouteMetrics { + requests, + body_data, + .. + } = super::HttpRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_http_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Send one request and ensure it's counted. + let ok = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: None, + }, + )); + send_assert_incremented(&ok, &mut handle, &mut svc, Default::default(), |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }) + .await; + + // Send another request and ensure it's counted with a different response + // status. + let no_content = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: Some(http::StatusCode::NO_CONTENT), + error: None, + }, + )); + send_assert_incremented( + &no_content, + &mut handle, + &mut svc, + Default::default(), + |tx| { + tx.send_response( + http::Response::builder() + .status(204) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + + // Emit a response with an error and ensure it's counted. + let unknown = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented(&unknown, &mut handle, &mut svc, Default::default(), |tx| { + tx.send_error("a spooky ghost") + }) + .await; + + // Emit a successful response with a body that fails and ensure that both + // the status and error are recorded. + let mixed = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref, route_ref, None), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented(&mixed, &mut handle, &mut svc, Default::default(), |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::new(MockBody::error("a spooky ghost"))) + .unwrap(), + ) + }) + .await; + + assert_eq!(unknown.get(), 1); + assert_eq!(ok.get(), 1); + assert_eq!(no_content.get(), 1); + assert_eq!(mixed.get(), 1); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_request_hostnames() { + const EXPORT_HOSTNAME_LABELS: bool = true; + const HOST_1: &str = "great.website"; + const URI_1_1: &str = "https://great.website/path/to/index.html#fragment"; + const URI_1_2: &str = "https://great.website/another/index.html"; + const HOST_2: &str = "different.website"; + const URI_2: &str = "https://different.website/index.html"; + const URI_3: &str = "https://[3fff::]/index.html"; + + let _trace = linkerd_tracing::test::trace_init(); + + let super::HttpRouteMetrics { + requests, + body_data, + .. + } = super::HttpRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_http_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + let get_counter = |host: Option<&'static str>, status: Option| { + requests.get_statuses(&labels::Rsp( + labels::Route::new_with_name( + parent_ref.clone(), + route_ref.clone(), + host.map(str::parse::).map(Result::unwrap), + ), + labels::HttpRsp { + status, + error: None, + }, + )) + }; + + let host1_ok = get_counter(Some(HOST_1), Some(http::StatusCode::OK)); + let host1_teapot = get_counter(Some(HOST_1), Some(http::StatusCode::IM_A_TEAPOT)); + let host2_ok = get_counter(Some(HOST_2), Some(http::StatusCode::OK)); + let unlabeled_ok = get_counter(None, Some(http::StatusCode::OK)); + + // Send one request and ensure it's counted. + send_assert_incremented( + &host1_ok, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_1_1) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(host1_ok.get(), 1); + assert_eq!(host1_teapot.get(), 0); + assert_eq!(host2_ok.get(), 0); + + // Send another request to a different path on the same host. + send_assert_incremented( + &host1_teapot, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_1_2) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(418) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(host1_ok.get(), 1); + assert_eq!(host1_teapot.get(), 1); + assert_eq!(host2_ok.get(), 0); + + // Send a request to a different host. + send_assert_incremented( + &host2_ok, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_2) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(host1_ok.get(), 1); + assert_eq!(host1_teapot.get(), 1); + assert_eq!(host2_ok.get(), 1); + + // Send a request to a url with an ip address host component, show that it is not labeled. + send_assert_incremented( + &unlabeled_ok, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_3) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_request_hostnames_disabled() { + const EXPORT_HOSTNAME_LABELS: bool = false; + const HOST_1: &str = "great.website"; + const URI_1_1: &str = "https://great.website/path/to/index.html#fragment"; + const HOST_2: &str = "different.website"; + const URI_2: &str = "https://different.website/index.html"; + const URI_3: &str = "https://[3fff::]/index.html"; + + let _trace = linkerd_tracing::test::trace_init(); + + let super::HttpRouteMetrics { + requests, + body_data, + .. + } = super::HttpRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_http_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + let get_counter = |host: Option<&'static str>, status: Option| { + requests.get_statuses(&labels::Rsp( + labels::Route::new_with_name( + parent_ref.clone(), + route_ref.clone(), + host.map(str::parse::).map(Result::unwrap), + ), + labels::HttpRsp { + status, + error: None, + }, + )) + }; + + let host1_ok = get_counter(Some(HOST_1), Some(http::StatusCode::OK)); + let host2_ok = get_counter(Some(HOST_2), Some(http::StatusCode::OK)); + let unlabeled_ok = get_counter(None, Some(http::StatusCode::OK)); + + // Send one request and ensure it's counted. + send_assert_incremented( + &unlabeled_ok, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_1_1) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(host1_ok.get(), 0); + assert_eq!(host2_ok.get(), 0); + assert_eq!(unlabeled_ok.get(), 1); + + // Send a request to a different host. + send_assert_incremented( + &unlabeled_ok, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_2) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(host1_ok.get(), 0); + assert_eq!(host2_ok.get(), 0); + assert_eq!(unlabeled_ok.get(), 2); + + // Send a request to a url with an ip address host component, show that it is not labeled. + send_assert_incremented( + &unlabeled_ok, + &mut handle, + &mut svc, + http::Request::builder() + .uri(URI_3) + .body(BoxBody::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .status(200) + .body(BoxBody::default()) + .unwrap(), + ) + }, + ) + .await; + assert_eq!(unlabeled_ok.get(), 3); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_route_request_body_frames() { + use linkerd_http_prom::body_data::request::BodyDataMetrics; + + const EXPORT_HOSTNAME_LABELS: bool = true; + let _trace = linkerd_tracing::test::trace_init(); + + let super::HttpRouteMetrics { + requests, + body_data, + .. + } = super::HttpRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_http_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + handle.allow(1); + + let labels = labels::Route::new( + parent_ref, + route_ref, + Some(&Uri::from_static("http://frame.count.test/")), + ); + let BodyDataMetrics { + // TODO(kate): currently, histograms do not expose their observation count or sum. so, + // we're left unable to exercise these metrics until prometheus/client_rust#242 lands. + // - https://github.com/prometheus/client_rust/pull/241 + // - https://github.com/prometheus/client_rust/pull/242 + #[cfg(feature = "prometheus-client-rust-242")] + frame_size, + .. + } = body_data.metrics(&labels); + + // Create a request whose body is backed by a channel that we can send chunks to. + tracing::info!("creating request"); + let (req, tx) = { + let (tx, body) = + http_body_util::channel::Channel::::new(1024); + let body = BoxBody::new(body); + let req = http::Request::builder() + .uri("http://frame.count.test") + .method("BARK") + .body(body) + .unwrap(); + (req, tx) + }; + + // Before the service has been called, the counters should be zero. + #[cfg(feature = "prometheus-client-rust-242")] + { + assert_eq!(frame_size.count(), 0); + assert_eq!(frame_size.sum(), 0); + } + + // Call the service. + tracing::info!("sending request to service"); + let (fut, resp_tx, rx) = { + use tower::{Service, ServiceExt}; + tracing::info!("calling service"); + let fut = svc.ready().await.expect("ready").call(req); + let (req, send_resp) = handle.next_request().await.unwrap(); + let (parts, rx) = req.into_parts(); + debug_assert_eq!(parts.method.as_str(), "BARK"); + (fut, send_resp, rx) + }; + + // Before the client has sent any body chunks, the counters should be zero. + #[cfg(feature = "prometheus-client-rust-242")] + { + assert_eq!(frame_size.count(), 0); + assert_eq!(frame_size.sum(), 0); + } + + // Send a response back to the client. + tracing::info!("sending request to service"); + let resp = { + use http::{Response, StatusCode}; + let body = BoxBody::from_static("earl grey"); + let resp = Response::builder() + .status(StatusCode::IM_A_TEAPOT) + .body(body) + .unwrap(); + resp_tx.send_response(resp); + fut.await.expect("resp") + }; + + // The counters should still be zero. + #[cfg(feature = "prometheus-client-rust-242")] + { + assert_eq!(frame_size.count(), 0); + assert_eq!(frame_size.sum(), 0); + } + + // Read the response body. + tracing::info!("reading response body"); + { + use http_body_util::BodyExt; + let (parts, body) = resp.into_parts(); + debug_assert_eq!(parts.status, 418); + let bytes = body.collect().await.expect("resp body").to_bytes(); + debug_assert_eq!(bytes, "earl grey"); + } + + // Reading the response body should not affect the counters should still be zero. + #[cfg(feature = "prometheus-client-rust-242")] + { + assert_eq!(frame_size.count(), 0); + assert_eq!(frame_size.sum(), 0); + } + + /// Returns the next chunk from a boxed body. + async fn read_chunk(body: &mut std::pin::Pin>) -> Vec { + use { + bytes::Buf, + http_body::Body, + std::task::{Context, Poll}, + }; + let mut ctx = Context::from_waker(futures_util::task::noop_waker_ref()); + let frame = match body.as_mut().poll_frame(&mut ctx) { + Poll::Ready(Some(Ok(f))) => f, + _ => panic!("next chunk should be ready"), + }; + frame.into_data().ok().expect("data frame").chunk().to_vec() + } + + // And now, send request body bytes. + tracing::info!("sending request body bytes"); + { + // Get the client's sending half, and the server's receiving half of the request body. + let (mut tx, mut rx) = (tx, Box::pin(rx)); + + tx.send_data(b"milk".as_slice().into()).await.unwrap(); + let chunk = read_chunk(&mut rx).await; + debug_assert_eq!(chunk, b"milk"); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frames_total.get(), 1); // bytes are counted once polled. + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frames_bytes.get(), 4); + + tx.send_data(b"syrup".as_slice().into()).await.unwrap(); + let chunk = read_chunk(&mut rx).await; + debug_assert_eq!(chunk, b"syrup"); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frames_total.get(), 2); + #[cfg(feature = "prometheus-client-rust-242")] + assert_eq!(frames_bytes.get(), 4 + 5); + } + + tracing::info!("passed"); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_response_body_drop_on_eos() { + use linkerd_app_core::svc::{Service, ServiceExt}; + + const EXPORT_HOSTNAME_LABELS: bool = false; + let _trace = linkerd_tracing::test::trace_init(); + + let super::HttpRouteMetrics { + requests, + body_data, + .. + } = super::HttpRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_http_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Define a request and a response. + let req = http::Request::default(); + let rsp = http::Response::builder() + .status(200) + .body(BoxBody::from_static("contents")) + .unwrap(); + + // Two counters for 200 responses that do/don't have an error. + let ok = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: None, + }, + )); + let err = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: Some(labels::Error::Unknown), + }, + )); + debug_assert_eq!(ok.get(), 0); + debug_assert_eq!(err.get(), 0); + + // Send the request, and obtain the response. + let mut body = { + handle.allow(1); + svc.ready().await.expect("ready"); + let mut call = svc.call(req); + let (_req, tx) = tokio::select! { + _ = (&mut call) => unreachable!(), + res = handle.next_request() => res.unwrap(), + }; + assert_eq!(ok.get(), 0); + tx.send_response(rsp); + call.await.unwrap().into_body() + }; + + // The counters are not incremented yet. + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 0); + + // Poll a frame out of the body. + let data = body + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .ok() + .expect("yields data"); + assert_eq!(data.chunk(), "contents".as_bytes()); + assert_eq!(data.remaining(), "contents".len()); + + // Show that the body reports itself as being complete. + debug_assert!(body.is_end_stream()); + assert_eq!(ok.get(), 1); + assert_eq!(err.get(), 0); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_response_body_drop_early() { + use linkerd_app_core::svc::{Service, ServiceExt}; + + const EXPORT_HOSTNAME_LABELS: bool = false; + let _trace = linkerd_tracing::test::trace_init(); + + let super::HttpRouteMetrics { + requests, + body_data, + .. + } = super::HttpRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_http_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Define a request and a response. + let req = http::Request::default(); + let rsp = http::Response::builder() + .status(200) + .body(BoxBody::from_static("contents")) + .unwrap(); + + // Two counters for 200 responses that do/don't have an error. + let ok = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: None, + }, + )); + let err = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::HttpRsp { + status: Some(http::StatusCode::OK), + error: Some(labels::Error::Unknown), + }, + )); + debug_assert_eq!(ok.get(), 0); + debug_assert_eq!(err.get(), 0); + + // Send the request, and obtain the response. + let body = { + handle.allow(1); + svc.ready().await.expect("ready"); + let mut call = svc.call(req); + let (_req, tx) = tokio::select! { + _ = (&mut call) => unreachable!(), + res = handle.next_request() => res.unwrap(), + }; + assert_eq!(ok.get(), 0); + tx.send_response(rsp); + call.await.unwrap().into_body() + }; + + // The counters are not incremented yet. + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 0); + + // The body reports an error if it was not completed. + drop(body); + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 1); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_ok() { + const EXPORT_HOSTNAME_LABELS: bool = true; + let _trace = linkerd_tracing::test::trace_init(); + + let super::GrpcRouteMetrics { + requests, + body_data, + .. + } = super::GrpcRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + + let (mut svc, mut handle) = mock_grpc_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Send one request and ensure it's counted. + let ok = requests.get_statuses(&labels::Rsp( + labels::Route::new( + parent_ref.clone(), + route_ref.clone(), + Some(&Uri::from_static(MOCK_GRPC_REQ_URI)), + ), + labels::GrpcRsp { + status: Some(tonic::Code::Ok), + error: None, + }, + )); + send_assert_incremented( + &ok, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .body(BoxBody::new(MockBody::grpc_status(0))) + .unwrap(), + ) + }, + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_not_found() { + const EXPORT_HOSTNAME_LABELS: bool = true; + let _trace = linkerd_tracing::test::trace_init(); + + let super::GrpcRouteMetrics { + requests, + body_data, + .. + } = super::GrpcRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + + let (mut svc, mut handle) = mock_grpc_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Send another request and ensure it's counted with a different response + // status. + let not_found = requests.get_statuses(&labels::Rsp( + labels::Route::new( + parent_ref.clone(), + route_ref.clone(), + Some(&Uri::from_static(MOCK_GRPC_REQ_URI)), + ), + labels::GrpcRsp { + status: Some(tonic::Code::NotFound), + error: None, + }, + )); + send_assert_incremented( + ¬_found, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .body(BoxBody::new(MockBody::grpc_status(5))) + .unwrap(), + ) + }, + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_error_response() { + const EXPORT_HOSTNAME_LABELS: bool = true; + let _trace = linkerd_tracing::test::trace_init(); + + let super::GrpcRouteMetrics { + requests, + body_data, + .. + } = super::GrpcRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_grpc_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + let unknown = requests.get_statuses(&labels::Rsp( + labels::Route::new( + parent_ref.clone(), + route_ref.clone(), + Some(&Uri::from_static(MOCK_GRPC_REQ_URI)), + ), + labels::GrpcRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented( + &unknown, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| tx.send_error("a spooky ghost"), + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_request_statuses_error_body() { + const EXPORT_HOSTNAME_LABELS: bool = true; + let _trace = linkerd_tracing::test::trace_init(); + + let super::GrpcRouteMetrics { + requests, + body_data, + .. + } = super::GrpcRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_grpc_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + let unknown = requests.get_statuses(&labels::Rsp( + labels::Route::new( + parent_ref.clone(), + route_ref.clone(), + Some(&Uri::from_static(MOCK_GRPC_REQ_URI)), + ), + labels::GrpcRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + send_assert_incremented( + &unknown, + &mut handle, + &mut svc, + http::Request::builder() + .method("POST") + .uri("http://host/svc/method") + .body(Default::default()) + .unwrap(), + |tx| { + tx.send_response( + http::Response::builder() + .body(BoxBody::new(MockBody::error("a spooky ghost"))) + .unwrap(), + ) + }, + ) + .await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_response_body_drop_on_eos() { + use linkerd_app_core::svc::{Service, ServiceExt}; + + const EXPORT_HOSTNAME_LABELS: bool = false; + let _trace = linkerd_tracing::test::trace_init(); + + let super::GrpcRouteMetrics { + requests, + body_data, + .. + } = super::GrpcRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_grpc_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Define a request and a response. + let req = http::Request::default(); + let rsp = http::Response::builder() + .status(200) + .body({ + let data = Poll::Ready(Some(Ok(Bytes::from_static(b"contents")))); + let trailers = { + let mut trailers = http::HeaderMap::with_capacity(1); + trailers.insert(GRPC_STATUS.clone(), GRPC_STATUS_OK.clone()); + Poll::Ready(Some(Ok(trailers))) + }; + let body = linkerd_mock_http_body::MockBody::default() + .then_yield_data(data) + .then_yield_trailer(trailers); + BoxBody::new(body) + }) + .unwrap(); + + // Two counters for 200 responses that do/don't have an error. + let ok = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::GrpcRsp { + status: Some(tonic::Code::Ok), + error: None, + }, + )); + let err = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::GrpcRsp { + status: Some(tonic::Code::Ok), + error: Some(labels::Error::Unknown), + }, + )); + debug_assert_eq!(ok.get(), 0); + debug_assert_eq!(err.get(), 0); + + // Send the request, and obtain the response. + let mut body = { + handle.allow(1); + svc.ready().await.expect("ready"); + let mut call = svc.call(req); + let (_req, tx) = tokio::select! { + _ = (&mut call) => unreachable!(), + res = handle.next_request() => res.unwrap(), + }; + assert_eq!(ok.get(), 0); + tx.send_response(rsp); + call.await.unwrap().into_body() + }; + + // The counters are not incremented yet. + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 0); + + // Poll a frame out of the body. + let data = body + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .ok() + .expect("yields data"); + assert_eq!(data.chunk(), "contents".as_bytes()); + assert_eq!(data.remaining(), "contents".len()); + + // Poll the trailers out of the body. + let trls = body + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_trailers() + .ok() + .expect("yields trailers"); + assert_eq!(trls.get(&GRPC_STATUS).unwrap(), GRPC_STATUS_OK); + + // Show that the body reports itself as being complete. + debug_assert!(body.is_end_stream()); + assert_eq!(ok.get(), 1); + assert_eq!(err.get(), 0); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_response_body_drop_early() { + use linkerd_app_core::svc::{Service, ServiceExt}; + + const EXPORT_HOSTNAME_LABELS: bool = false; + let _trace = linkerd_tracing::test::trace_init(); + + let super::GrpcRouteMetrics { + requests, + body_data, + .. + } = super::GrpcRouteMetrics::default(); + let parent_ref = crate::ParentRef(policy::Meta::new_default("parent")); + let route_ref = crate::RouteRef(policy::Meta::new_default("route")); + let (mut svc, mut handle) = mock_grpc_route_metrics( + &requests, + &body_data, + &parent_ref, + &route_ref, + EXPORT_HOSTNAME_LABELS, + ); + + // Define a request and a response. + let req = http::Request::default(); + let rsp = http::Response::builder() + .status(200) + .body({ + let data = Poll::Ready(Some(Ok(Bytes::from_static(b"contents")))); + let trailers = { + let mut trailers = http::HeaderMap::with_capacity(1); + trailers.insert(GRPC_STATUS.clone(), GRPC_STATUS_OK.clone()); + Poll::Ready(Some(Ok(trailers))) + }; + let body = linkerd_mock_http_body::MockBody::default() + .then_yield_data(data) + .then_yield_trailer(trailers); + BoxBody::new(body) + }) + .unwrap(); + + // Two counters for 200 responses that do/don't have an error. + let ok = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::GrpcRsp { + status: Some(tonic::Code::Ok), + error: None, + }, + )); + let err = requests.get_statuses(&labels::Rsp( + labels::Route::new(parent_ref.clone(), route_ref.clone(), None), + labels::GrpcRsp { + status: None, + error: Some(labels::Error::Unknown), + }, + )); + debug_assert_eq!(ok.get(), 0); + debug_assert_eq!(err.get(), 0); + + // Send the request, and obtain the response. + let mut body = { + handle.allow(1); + svc.ready().await.expect("ready"); + let mut call = svc.call(req); + let (_req, tx) = tokio::select! { + _ = (&mut call) => unreachable!(), + res = handle.next_request() => res.unwrap(), + }; + assert_eq!(ok.get(), 0); + tx.send_response(rsp); + call.await.unwrap().into_body() + }; + + // The counters are not incremented yet. + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 0); + + // Poll a frame out of the body. + let data = body + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .ok() + .expect("yields data"); + assert_eq!(data.chunk(), "contents".as_bytes()); + assert_eq!(data.remaining(), "contents".len()); + + // The counters are not incremented yet. + debug_assert!(!body.is_end_stream()); + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 0); + + // Then, drop the body without polling the trailers. + drop(body); + assert_eq!(ok.get(), 0); + assert_eq!(err.get(), 1); +} + +// === Utils === + +const MOCK_GRPC_REQ_URI: &str = "http://host/svc/method"; + +pub fn mock_http_route_metrics( + metrics: &RequestMetrics, + body_data: &RequestBodyFamilies, + parent_ref: &crate::ParentRef, + route_ref: &crate::RouteRef, + export_hostname_labels: bool, +) -> (svc::BoxHttp, Handle) { + let req = http::Request::builder().body(()).unwrap(); + let (r#match, _) = policy::route::find( + &[policy::http::Route { + hosts: vec![], + rules: vec![policy::route::Rule { + matches: vec![policy::http::r#match::MatchRequest::default()], + policy: policy::http::Policy { + meta: route_ref.0.clone(), + filters: [].into(), + distribution: policy::RouteDistribution::Empty, + params: policy::http::RouteParams { + export_hostname_labels, + ..Default::default() + }, + }, + }], + }], + &req, + ) + .expect("find default route"); + + let (tx, handle) = tower_test::mock::pair::, http::Response>(); + let svc = super::layer(metrics, body_data) + .layer(move |_t: Http<()>| tx.clone()) + .new_service(Http { + r#match, + params: Route { + parent: (), + addr: std::net::SocketAddr::new([0, 0, 0, 0].into(), 8080).into(), + parent_ref: parent_ref.clone(), + route_ref: route_ref.clone(), + filters: [].into(), + distribution: Default::default(), + params: policy::http::RouteParams { + export_hostname_labels, + ..Default::default() + }, + }, + }); + + (svc::BoxHttp::new(svc), handle) +} + +pub fn mock_grpc_route_metrics( + metrics: &RequestMetrics, + body_data: &RequestBodyFamilies, + parent_ref: &crate::ParentRef, + route_ref: &crate::RouteRef, + export_hostname_labels: bool, +) -> (svc::BoxHttp, Handle) { + let req = http::Request::builder() + .method("POST") + .uri(MOCK_GRPC_REQ_URI) + .body(()) + .unwrap(); + let (r#match, _) = policy::route::find( + &[policy::grpc::Route { + hosts: vec![], + rules: vec![policy::route::Rule { + matches: vec![policy::grpc::r#match::MatchRoute::default()], + policy: policy::grpc::Policy { + meta: route_ref.0.clone(), + filters: [].into(), + distribution: policy::RouteDistribution::Empty, + params: policy::grpc::RouteParams { + export_hostname_labels, + ..Default::default() + }, + }, + }], + }], + &req, + ) + .expect("find default route"); + + let (tx, handle) = tower_test::mock::pair::, http::Response>(); + let svc = super::layer(metrics, body_data) + .layer(move |_t: Grpc<()>| tx.clone()) + .new_service(Grpc { + r#match, + params: Route { + parent: (), + addr: std::net::SocketAddr::new([0, 0, 0, 0].into(), 8080).into(), + parent_ref: parent_ref.clone(), + route_ref: route_ref.clone(), + filters: [].into(), + distribution: Default::default(), + params: policy::grpc::RouteParams { + export_hostname_labels, + ..Default::default() + }, + }, + }); + + (svc::BoxHttp::new(svc), handle) +} diff --git a/linkerd/app/outbound/src/http/logical/policy/route/retry.rs b/linkerd/app/outbound/src/http/logical/policy/route/retry.rs new file mode 100644 index 0000000000..8a7fe8aff9 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/policy/route/retry.rs @@ -0,0 +1,156 @@ +use super::{extensions, metrics::labels::Route as RouteLabels}; +use futures::future::{Either, Ready}; +use linkerd_app_core::{ + cause_ref, classify, + exp_backoff::ExponentialBackoff, + is_caused_by, + proxy::http::{self, stream_timeouts::ResponseTimeoutError}, + svc::{self, http::h2}, + Error, Result, +}; +use linkerd_http_retry::{self as retry, peek_trailers::PeekTrailersBody}; +use linkerd_proxy_client_policy as policy; +use tokio::time; + +pub type NewHttpRetry = retry::NewHttpRetry; + +#[derive(Clone, Debug)] +pub struct RetryPolicy { + pub timeout: Option, + pub retryable_http_statuses: Option, + pub retryable_grpc_statuses: Option, + + pub max_retries: usize, + pub max_request_bytes: usize, + pub backoff: Option, +} + +pub type RouteRetryMetrics = retry::MetricFamilies; + +// === impl RetryPolicy === + +impl svc::Param for RetryPolicy { + fn param(&self) -> retry::Params { + retry::Params { + max_retries: self.max_retries, + max_request_bytes: self.max_request_bytes, + backoff: self.backoff, + } + } +} + +impl retry::Policy for RetryPolicy { + type Future = Either>; + + fn is_retryable(&self, res: Result<&::http::Response, &Error>) -> bool { + let rsp = match res { + Ok(rsp) => rsp, + Err(error) => { + let retryable = Self::retryable_error(error); + tracing::debug!(retryable, %error); + return retryable; + } + }; + + if let Some(codes) = self.retryable_grpc_statuses.as_ref() { + let grpc_status = Self::grpc_status(rsp); + let retryable = grpc_status.map_or(false, |c| codes.contains(c)); + tracing::debug!(retryable, grpc.status = ?grpc_status); + if retryable { + return true; + } + } + + if let Some(statuses) = self.retryable_http_statuses.as_ref() { + let retryable = statuses.contains(rsp.status()); + tracing::debug!(retryable, http.status = %rsp.status()); + if retryable { + return true; + } + } + + false + } + + fn set_extensions(&self, dst: &mut ::http::Extensions, src: &::http::Extensions) { + let attempt = if let Some(extensions::Attempt(n)) = src.get::() { + n.saturating_add(1) + } else { + // There was an already a first attmept (the original extensions). + 2.try_into().unwrap() + }; + dst.insert(extensions::Attempt(attempt)); + + if let Some(mut timeouts) = src.get::().cloned() { + // If retries are exhausted, remove the response headers timeout, + // since we're not blocked on making a decision on a retry decision. + // This may give the request additional time to be satisfied. + let retries_remain = u16::from(attempt) as usize <= self.max_retries; + if !retries_remain { + tracing::debug!("Exhausted retries; removing response headers timeout"); + timeouts.response_headers = None; + } + dst.insert(timeouts); + } + + // The HTTP server sets a ClientHandle with the client's address and a means + // to close the server-side connection. + if let Some(client_handle) = src.get::().cloned() { + dst.insert(client_handle); + } + + // The legacy response classifier is set for the endpoint stack to use. + // This informs endpoint-level behavior (failure accrual, etc.). + // TODO(ver): This should ultimately be eliminated in favor of + // failure-accrual specific configuration. The endpoint metrics should + // be migrated, ultimately... + if let Some(classify) = src.get::().cloned() { + dst.insert(classify); + } + } +} + +impl RetryPolicy { + fn grpc_status(rsp: &http::Response) -> Option { + if let Some(header) = rsp.headers().get("grpc-status") { + return Some(header.to_str().ok()?.parse::().ok()?.into()); + } + + let Some(trailer) = rsp.body().peek_trailers() else { + tracing::debug!("No trailers"); + return None; + }; + let status = trailer.get("grpc-status")?; + Some(status.to_str().ok()?.parse::().ok()?.into()) + } + + fn retryable_error(error: &Error) -> bool { + // While LoadShed errors are not retryable, FailFast errors are, since + // retrying may put us in another backend that is available. + if is_caused_by::(&**error) { + return false; + } + if is_caused_by::(&**error) { + return true; + } + + if matches!( + cause_ref::(&**error), + Some(ResponseTimeoutError::Headers(_)) + ) { + return true; + } + + // HTTP/2 errors + if let Some(h2e) = cause_ref::(&**error) { + if matches!(h2e.reason(), Some(h2::Reason::REFUSED_STREAM)) { + return true; + } + } + + // TODO(ver) connection errors require changes to the endpoint stack so + // that they can be inspected. here. + + false + } +} diff --git a/linkerd/app/outbound/src/http/logical/policy/router.rs b/linkerd/app/outbound/src/http/logical/policy/router.rs index cb517849f8..85435ba01a 100644 --- a/linkerd/app/outbound/src/http/logical/policy/router.rs +++ b/linkerd/app/outbound/src/http/logical/policy/router.rs @@ -21,9 +21,9 @@ pub struct Params { } pub type HttpParams = - Params; + Params; pub type GrpcParams = - Params; + Params; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct Router { @@ -34,15 +34,15 @@ pub(crate) struct Router { } pub(super) type Http = - Router; + Router; pub(super) type Grpc = - Router; + Router; type NewBackendCache = distribute::NewBackendCache, (), N, S>; // === impl Router === -impl Router +impl Router where // Parent target type. T: Clone + Debug + Eq + Hash + Send + Sync + 'static, @@ -53,24 +53,29 @@ where // Request filter. F: Debug + Eq + Hash, F: Clone + Send + Sync + 'static, - // Failure policy. - E: Debug + Eq + Hash, - E: Clone + Send + Sync + 'static, + // Route policy. + P: Debug + Eq + Hash, + P: Clone + Send + Sync + 'static, // Assert that we can route for the given match and filter types. Self: svc::router::SelectRoute< http::Request, - Key = route::MatchedRoute, + Key = route::MatchedRoute, Error = NoRoute, >, - route::MatchedRoute: route::filters::Apply + svc::Param, - route::MatchedBackend: route::filters::Apply, - route::backend::RouteBackendMetrics: - svc::ExtractParam>, + route::MatchedRoute: route::filters::Apply + + svc::Param + + svc::Param + + route::metrics::MkStreamLabel + + svc::ExtractParam>, + route::MatchedBackend: route::filters::Apply + route::metrics::MkStreamLabel, { /// Builds a stack that applies routes to distribute requests over a cached /// set of inner services so that. pub(super) fn layer( - metrics: route::RouteMetrics, + metrics: route::Metrics< + route::MatchedRoute, + route::MatchedBackend, + >, ) -> impl svc::Layer> + Clone where // Inner stack. @@ -100,14 +105,14 @@ where } } -impl From<(Params, T)> for Router +impl From<(Params, T)> for Router where T: Eq + Hash + Clone + Debug, M: Clone, F: Clone, - E: Clone, + P: Clone, { - fn from((rts, parent): (Params, T)) -> Self { + fn from((rts, parent): (Params, T)) -> Self { let Params { addr, meta: parent_ref, @@ -118,6 +123,7 @@ where let mk_concrete = { let parent = parent.clone(); + let parent_ref = parent_ref.clone(); move |backend_ref: BackendRef, target: concrete::Dispatch| { // XXX With policies we don't have a top-level authority name at // the moment. So, instead, we use the concrete addr used for @@ -168,7 +174,6 @@ where route_ref: route_ref.clone(), filters, concrete, - request_timeout: rb.request_timeout, } }; @@ -189,23 +194,27 @@ where } }; - let mk_policy = |policy::RoutePolicy:: { - meta, - filters, - distribution, - failure_policy, - request_timeout, - }| { - let route_ref = RouteRef(meta); - let distribution = mk_distribution(&route_ref, &distribution); - route::Route { - addr: addr.clone(), - parent: parent.clone(), - route_ref, - filters, - failure_policy, - distribution, - request_timeout, + let mk_policy = { + let addr = addr.clone(); + let parent = parent.clone(); + let parent_ref = parent_ref.clone(); + move |policy::RoutePolicy:: { + meta, + filters, + distribution, + params, + }| { + let route_ref = RouteRef(meta); + let distribution = mk_distribution(&route_ref, &distribution); + route::Route { + addr: addr.clone(), + parent: parent.clone(), + parent_ref: parent_ref.clone(), + route_ref, + filters, + distribution, + params, + } } }; @@ -274,7 +283,7 @@ where } } -impl svc::Param for Router +impl svc::Param for Router where T: Eq + Hash + Clone + Debug, { @@ -283,7 +292,7 @@ where } } -impl svc::Param>> for Router +impl svc::Param>> for Router where T: Eq + Hash + Clone + Debug, { diff --git a/linkerd/app/outbound/src/http/logical/policy/tests.rs b/linkerd/app/outbound/src/http/logical/policy/tests.rs index f2b0e62e29..ccecc61008 100644 --- a/linkerd/app/outbound/src/http/logical/policy/tests.rs +++ b/linkerd/app/outbound/src/http/logical/policy/tests.rs @@ -47,12 +47,10 @@ async fn header_based_route() { section: None, }), filters: Arc::new([]), - failure_policy: Default::default(), - request_timeout: None, + params: Default::default(), distribution: policy::RouteDistribution::FirstAvailable(Arc::new([policy::RouteBackend { filters: Arc::new([]), backend, - request_timeout: None, }])), }; @@ -125,17 +123,17 @@ async fn header_based_route() { failure_accrual: Default::default(), }); - let metrics = RouteMetrics::default(); + let metrics = HttpRouteMetrics::default(); let router = Policy::layer(metrics.clone(), Default::default()) .layer(inner) .new_service(Policy::from((routes, ()))); - let default_reqs = metrics.request_count( + let default_reqs = metrics.backend_request_count( parent_ref.clone(), default_route_ref.clone(), default_backend_ref.clone(), ); - let special_reqs = metrics.request_count( + let special_reqs = metrics.backend_request_count( parent_ref.clone(), special_route_ref.clone(), special_backend_ref.clone(), @@ -211,8 +209,7 @@ async fn http_filter_request_headers() { matches: vec![route::http::MatchRequest::default()], policy: policy::RoutePolicy { meta: policy::Meta::new_default("turtles"), - failure_policy: Default::default(), - request_timeout: None, + params: Default::default(), filters: Arc::new([policy::http::Filter::RequestHeaders( policy::http::filter::ModifyHeader { add: vec![(PIZZA.clone(), TUBULAR.clone())], @@ -228,7 +225,6 @@ async fn http_filter_request_headers() { ..Default::default() }, )]), - request_timeout: None, }, ])), }, diff --git a/linkerd/app/outbound/src/http/logical/profile.rs b/linkerd/app/outbound/src/http/logical/profile.rs index 897e95e0e9..447c47426d 100644 --- a/linkerd/app/outbound/src/http/logical/profile.rs +++ b/linkerd/app/outbound/src/http/logical/profile.rs @@ -2,11 +2,11 @@ use super::{ super::{concrete, retry}, CanonicalDstHeader, Concrete, NoRoute, }; -use crate::{policy, BackendRef, ParentRef, UNKNOWN_META}; +use crate::{service_meta, BackendRef, ParentRef, UNKNOWN_META}; use linkerd_app_core::{ classify, metrics, proxy::http::{self, balance}, - svc, Error, NameAddr, + svc, Error, }; use linkerd_distribute as distribute; use std::{fmt::Debug, hash::Hash, sync::Arc, time}; @@ -107,26 +107,6 @@ where targets, } = routes; - fn service_meta(addr: &NameAddr) -> Option> { - let mut parts = addr.name().split('.'); - - let name = parts.next()?; - let namespace = parts.next()?; - - if !parts.next()?.eq_ignore_ascii_case("svc") { - return None; - } - - Some(Arc::new(policy::Meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - namespace: namespace.to_string(), - name: name.to_string(), - section: None, - port: Some(addr.port().try_into().ok()?), - })) - } - let parent_meta = service_meta(&addr).unwrap_or_else(|| UNKNOWN_META.clone()); // Create concrete targets for all of the profile's routes. diff --git a/linkerd/app/outbound/src/http/logical/tests.rs b/linkerd/app/outbound/src/http/logical/tests.rs index d6473f4e25..d1980a01b3 100644 --- a/linkerd/app/outbound/src/http/logical/tests.rs +++ b/linkerd/app/outbound/src/http/logical/tests.rs @@ -1,459 +1,32 @@ -use super::*; +use super::{policy, Outbound, ParentRef, Routes}; use crate::test_util::*; -use ::http::StatusCode; use linkerd_app_core::{ - errors, exp_backoff::ExponentialBackoff, svc::NewService, svc::ServiceExt, trace, + proxy::http::{self, Body, BoxBody, StatusCode}, + svc::{self, NewService, ServiceExt}, + transport::addrs::*, + Error, NameAddr, Result, }; use linkerd_proxy_client_policy as client_policy; use parking_lot::Mutex; use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; -use tokio::{task, time}; +use tokio::sync::watch; use tracing::Instrument; -const AUTHORITY: &str = "logical.test.svc.cluster.local"; -const PORT: u16 = 666; +mod basic; +mod failure_accrual; +mod headers; +mod retries; +mod timeouts; type Request = http::Request; type Response = http::Response; -#[tokio::test(flavor = "current_thread")] -async fn routes() { - let _trace = trace::test::trace_init(); - - let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); - let dest: NameAddr = format!("{AUTHORITY}:{PORT}") - .parse::() - .expect("dest addr is valid"); - let (svc, mut handle) = tower_test::mock::pair(); - let connect = HttpConnect::default().service(addr, svc); - let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); - let (rt, _shutdown) = runtime(); - let stack = Outbound::new(default_config(), rt, &mut Default::default()) - .with_stack(svc::ArcNewService::new(connect)) - .push_http_cached(resolve) - .into_inner(); - - let backend = default_backend(&dest); - let (_route_tx, routes) = - watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { - addr: dest.into(), - meta: ParentRef(client_policy::Meta::new_default("parent")), - backends: Arc::new([backend.clone()]), - routes: Arc::new([default_route(backend)]), - failure_accrual: client_policy::FailureAccrual::None, - }))); - let target = Target { - num: 1, - version: http::Version::H2, - routes, - }; - let svc = stack.new_service(target); - - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - serve_req(&mut handle, mk_rsp(StatusCode::OK, "good")).await; - assert_eq!( - rsp.await.expect("request must succeed").status(), - http::StatusCode::OK - ); -} - -#[tokio::test(flavor = "current_thread", start_paused = true)] -async fn consecutive_failures_accrue() { - let _trace = trace::test::with_default_filter(format!("{},trace", trace::test::DEFAULT_LOG)); - - let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); - let dest: NameAddr = format!("{AUTHORITY}:{PORT}") - .parse::() - .expect("dest addr is valid"); - let (svc, mut handle) = tower_test::mock::pair(); - let connect = HttpConnect::default().service(addr, svc); - let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); - let (rt, _shutdown) = runtime(); - let cfg = default_config(); - let stack = Outbound::new(cfg.clone(), rt, &mut Default::default()) - .with_stack(svc::ArcNewService::new(connect)) - .push_http_cached(resolve) - .into_inner(); - - let backend = default_backend(&dest); - // Ensure that the probe delay is longer than the failfast timeout, so that - // the service is only probed after it has entered failfast when the gate - // shuts. - let min_backoff = cfg.http_request_queue.failfast_timeout + Duration::from_secs(1); - let backoff = ExponentialBackoff::try_new( - min_backoff, - min_backoff * 6, - // no jitter --- ensure the test is deterministic - 0.0, - ) - .unwrap(); - let mut backoffs = backoff.stream(); - let (_route_tx, routes) = - watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { - addr: dest.into(), - meta: ParentRef(client_policy::Meta::new_default("parent")), - backends: Arc::new([backend.clone()]), - routes: Arc::new([default_route(backend)]), - failure_accrual: client_policy::FailureAccrual::ConsecutiveFailures { - max_failures: 3, - backoff, - }, - }))); - let target = Target { - num: 1, - version: http::Version::H2, - routes, - }; - let svc = stack.new_service(target); - - tracing::info!("Sending good request"); - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - serve_req(&mut handle, mk_rsp(StatusCode::OK, "good")).await; - assert_rsp(rsp, StatusCode::OK, "good").await; - - // fail 3 requests so that we hit the consecutive failures accrual limit - for i in 1..=3 { - tracing::info!("Sending bad request {i}/3"); - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - serve_req( - &mut handle, - mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "bad"), - ) - .await; - assert_rsp(rsp, StatusCode::INTERNAL_SERVER_ERROR, "bad").await; - } - - // Ensure that the error is because of the breaker, and not because the - // underlying service doesn't poll ready. - tracing::info!("Sending request while in failfast"); - handle.allow(1); - // We are now in failfast. - let error = send_req(svc.clone(), http::Request::get("/")) - .await - .expect_err("service should be in failfast"); - assert!( - errors::is_caused_by::(error.as_ref()), - "service should be in failfast" - ); - - tracing::info!("Sending request while in loadshed"); - let error = send_req(svc.clone(), http::Request::get("/")) - .await - .expect_err("service should be in failfast"); - assert!( - errors::is_caused_by::(error.as_ref()), - "service should be in failfast" - ); - - // After the probation period, a subsequent request should be failed by - // hitting the service. - tracing::info!("Waiting for probation"); - backoffs.next().await; - task::yield_now().await; - - tracing::info!("Sending a bad request while in probation"); - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - tracing::info!("Serving response"); - tokio::time::timeout( - time::Duration::from_secs(10), - serve_req( - &mut handle, - mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "bad"), - ), - ) - .await - .expect("no timeouts"); - assert_rsp(rsp, StatusCode::INTERNAL_SERVER_ERROR, "bad").await; - - // We are now in failfast. - tracing::info!("Sending a failfast request while the circuit is broken"); - handle.allow(1); - let error = send_req(svc.clone(), http::Request::get("/")) - .await - .expect_err("service should be in failfast"); - assert!( - errors::is_caused_by::(error.as_ref()), - "service should be in failfast" - ); - - // Wait out the probation period again - tracing::info!("Waiting for probation again"); - backoffs.next().await; - task::yield_now().await; - - // The probe request succeeds - tracing::info!("Sending a good request while in probation"); - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - tokio::time::timeout( - time::Duration::from_secs(10), - serve_req(&mut handle, mk_rsp(StatusCode::OK, "good")), - ) - .await - .expect("no timeouts"); - assert_rsp(rsp, StatusCode::OK, "good").await; - - // The gate is now open again - tracing::info!("Sending a final good request"); - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - tokio::time::timeout( - time::Duration::from_secs(10), - serve_req(&mut handle, mk_rsp(StatusCode::OK, "good")), - ) - .await - .expect("no timeouts"); - assert_rsp(rsp, StatusCode::OK, "good").await; -} - -#[tokio::test(flavor = "current_thread", start_paused = true)] -async fn balancer_doesnt_select_tripped_breakers() { - let _trace = trace::test::with_default_filter(format!( - "{},linkerd_app_outbound=trace,linkerd_stack=trace,linkerd2_proxy_http_balance=trace", - trace::test::DEFAULT_LOG - )); - - let addr1 = SocketAddr::new([192, 0, 2, 41].into(), PORT); - let addr2 = SocketAddr::new([192, 0, 2, 42].into(), PORT); - let dest: NameAddr = format!("{AUTHORITY}:{PORT}") - .parse::() - .expect("dest addr is valid"); - let (svc1, mut handle1) = tower_test::mock::pair(); - let (svc2, mut handle2) = tower_test::mock::pair(); - let connect = HttpConnect::default() - .service(addr1, svc1) - .service(addr2, svc2); - let resolve = support::resolver(); - let mut dest_tx = resolve.endpoint_tx(dest.clone()); - dest_tx - .add([(addr1, Default::default()), (addr2, Default::default())]) - .unwrap(); - let (rt, _shutdown) = runtime(); - let cfg = default_config(); - let stack = Outbound::new(cfg.clone(), rt, &mut Default::default()) - .with_stack(svc::ArcNewService::new(connect)) - .push_http_cached(resolve) - .into_inner(); - - let backend = default_backend(&dest); - // Ensure that the probe delay is longer than the failfast timeout, so that - // the service is only probed after it has entered failfast when the gate - // shuts. - let min_backoff = cfg.http_request_queue.failfast_timeout + Duration::from_secs(1); - let backoff = ExponentialBackoff::try_new( - min_backoff, - min_backoff * 6, - // no jitter --- ensure the test is deterministic - 0.0, - ) - .unwrap(); - let (_route_tx, routes) = - watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { - addr: dest.into(), - meta: ParentRef(client_policy::Meta::new_default("parent")), - backends: Arc::new([backend.clone()]), - routes: Arc::new([default_route(backend)]), - failure_accrual: client_policy::FailureAccrual::ConsecutiveFailures { - max_failures: 3, - backoff, - }, - }))); - let target = Target { - num: 1, - version: http::Version::H2, - routes, - }; - let svc = stack.new_service(target); - - // fail 3 requests so that we hit the consecutive failures accrual limit - let mut failed = 0; - while failed < 3 { - handle1.allow(1); - handle2.allow(1); - tracing::info!(failed); - let rsp = send_req(svc.clone(), http::Request::get("/")); - let (expected_status, expected_body) = tokio::select! { - _ = serve_req(&mut handle1, mk_rsp(StatusCode::OK, "endpoint 1")) => { - tracing::info!("Balancer selected good endpoint"); - (StatusCode::OK, "endpoint 1") - } - _ = serve_req(&mut handle2, mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "endpoint 2")) => { - tracing::info!("Balancer selected bad endpoint"); - failed += 1; - (StatusCode::INTERNAL_SERVER_ERROR, "endpoint 2") - } - }; - assert_rsp(rsp, expected_status, expected_body).await; - task::yield_now().await; - } - - handle1.allow(1); - handle2.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - // The load balancer will select endpoint 1, because endpoint 2 isn't ready. - serve_req(&mut handle1, mk_rsp(StatusCode::OK, "endpoint 1")).await; - assert_rsp(rsp, StatusCode::OK, "endpoint 1").await; - - // The load balancer should continue selecting the non-failing endpoint. - for _ in 0..8 { - handle1.allow(1); - handle2.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - serve_req(&mut handle1, mk_rsp(StatusCode::OK, "endpoint 1")).await; - assert_rsp(rsp, StatusCode::OK, "endpoint 1").await; - } -} - -#[tokio::test(flavor = "current_thread")] -async fn route_request_timeout() { - tokio::time::pause(); - let _trace = trace::test::trace_init(); - const REQUEST_TIMEOUT: Duration = std::time::Duration::from_secs(2); - - let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); - let dest: NameAddr = format!("{AUTHORITY}:{PORT}") - .parse::() - .expect("dest addr is valid"); - let (svc, mut handle) = tower_test::mock::pair(); - let connect = HttpConnect::default().service(addr, svc); - let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); - let (rt, _shutdown) = runtime(); - let stack = Outbound::new(default_config(), rt, &mut Default::default()) - .with_stack(svc::ArcNewService::new(connect)) - .push_http_cached(resolve) - .into_inner(); - - let (_route_tx, routes) = { - let backend = default_backend(&dest); - // Set a request timeout for the route, and no backend request timeout - // on the backend. - let route = timeout_route(backend.clone(), Some(REQUEST_TIMEOUT), None); - watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { - addr: dest.into(), - meta: ParentRef(client_policy::Meta::new_default("parent")), - backends: Arc::new([backend]), - routes: Arc::new([route]), - failure_accrual: client_policy::FailureAccrual::None, - }))) - }; - let target = Target { - num: 1, - version: http::Version::H2, - routes, - }; - let svc = stack.new_service(target); - - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - serve_req(&mut handle, mk_rsp(StatusCode::OK, "good")).await; - assert_eq!( - rsp.await.expect("request must succeed").status(), - http::StatusCode::OK - ); - - // now, time out... - let rsp = send_req(svc.clone(), http::Request::get("/")); - tokio::time::sleep(REQUEST_TIMEOUT).await; - let error = rsp.await.expect_err("request must fail with a timeout"); - assert!( - error.is::(), - "error must originate in the logical stack" - ); - assert!(errors::is_caused_by::( - error.as_ref() - )); -} - -#[tokio::test(flavor = "current_thread")] -async fn backend_request_timeout() { - tokio::time::pause(); - let _trace = trace::test::trace_init(); - // must be less than the `default_config` failfast timeout, or we'll hit - // that instead. - const ROUTE_REQUEST_TIMEOUT: Duration = std::time::Duration::from_secs(2); - const BACKEND_REQUEST_TIMEOUT: Duration = std::time::Duration::from_secs(1); - - let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); - let dest: NameAddr = format!("{AUTHORITY}:{PORT}") - .parse::() - .expect("dest addr is valid"); - let (svc, mut handle) = tower_test::mock::pair(); - let connect = HttpConnect::default().service(addr, svc); - let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); - let (rt, _shutdown) = runtime(); - let stack = Outbound::new(default_config(), rt, &mut Default::default()) - .with_stack(svc::ArcNewService::new(connect)) - .push_http_cached(resolve) - .into_inner(); - - let (_route_tx, routes) = { - let backend = default_backend(&dest); - // Set both a route request timeout and a backend request timeout. - let route = timeout_route( - backend.clone(), - Some(ROUTE_REQUEST_TIMEOUT), - Some(BACKEND_REQUEST_TIMEOUT), - ); - watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { - addr: dest.into(), - meta: ParentRef(client_policy::Meta::new_default("parent")), - backends: Arc::new([backend]), - routes: Arc::new([route]), - failure_accrual: client_policy::FailureAccrual::None, - }))) - }; - let target = Target { - num: 1, - version: http::Version::H2, - routes, - }; - let svc = stack.new_service(target); - - handle.allow(1); - let rsp = send_req(svc.clone(), http::Request::get("/")); - serve_req(&mut handle, mk_rsp(StatusCode::OK, "good")).await; - assert_eq!( - rsp.await.expect("request must succeed").status(), - http::StatusCode::OK - ); - - // Now, time out... - let rsp = send_req(svc.clone(), http::Request::get("/")); - // Wait until we actually get the request --- this timeout only starts once - // the service has been acquired. - handle.allow(1); - let (_, send_rsp) = handle - .next_request() - .await - .expect("service must receive request"); - tokio::time::sleep(BACKEND_REQUEST_TIMEOUT + Duration::from_millis(1)).await; - // Still send a response, so that if we didn't hit the backend timeout - // timeout, we don't hit the route timeout and succeed incorrectly. - send_rsp.send_response(mk_rsp(StatusCode::OK, "good")); - let error = rsp.await.expect_err("request must fail with a timeout"); - assert!(errors::is_caused_by::( - error.as_ref() - )); - - // The route request timeout should still apply to time spent before - // the backend is acquired. - let rsp = send_req(svc.clone(), http::Request::get("/")); - tokio::time::sleep(ROUTE_REQUEST_TIMEOUT + Duration::from_millis(1)).await; - handle.allow(1); - let error = rsp.await.expect_err("request must fail with a timeout"); - assert!(errors::is_caused_by::( - error.as_ref() - )); -} +// === Utils === #[derive(Clone, Debug)] struct Target { num: usize, - version: http::Version, + version: http::Variant, routes: watch::Receiver, } @@ -480,8 +53,8 @@ impl std::hash::Hash for Target { } } -impl svc::Param for Target { - fn param(&self) -> http::Version { +impl svc::Param for Target { + fn param(&self) -> http::Variant { self.version } } @@ -515,14 +88,15 @@ impl>> svc::NewService for HttpConnect { } } +// === Utils === + #[track_caller] fn send_req( svc: impl svc::Service + Send + 'static, - builder: ::http::request::Builder, -) -> impl Future> + Send + 'static { - let mut req = builder.body(http::BoxBody::default()).unwrap(); + mut req: ::http::Request, +) -> impl Future> + Send + 'static { let span = tracing::info_span!( "send_req", "{} {} {:?}", @@ -546,37 +120,79 @@ fn send_req( async move { rsp.await.expect("request task must not panic") } } -fn mk_rsp(status: StatusCode, body: impl ToString) -> Response { - http::Response::builder() +async fn mk_rsp(status: StatusCode, body: impl ToString) -> Result { + Ok(http::Response::builder() .status(status) .body(http::BoxBody::new(body.to_string())) - .unwrap() + .unwrap()) +} + +async fn mk_grpc_rsp(code: tonic::Code) -> Result { + Ok(http::Response::builder() + .version(::http::Version::HTTP_2) + .header("content-type", "application/grpc") + .body(BoxBody::new(MockBody::grpc_status(code as u8))) + .unwrap()) } async fn assert_rsp( - rsp: impl Future>, + rsp: impl Future>, status: StatusCode, expected_body: T, ) where bytes::Bytes: PartialEq, { + use http_body_util::BodyExt; let rsp = rsp.await.expect("response must not fail"); assert_eq!(rsp.status(), status, "expected status code to be {status}"); - let body = hyper::body::to_bytes(rsp.into_body()) + let body = rsp + .into_body() + .collect() .await - .expect("body must not fail"); + .expect("body must not fail") + .to_bytes(); assert_eq!(body, expected_body, "expected body to be {expected_body:?}"); } -async fn serve_req(handle: &mut tower_test::mock::Handle, rsp: Response) { - tracing::debug!("Awaiting request"); - let (req, send_rsp) = handle +async fn serve( + handle: &mut tower_test::mock::Handle, + call: impl Future> + Send + 'static, +) { + let (req, tx) = handle .next_request() .await .expect("service must receive request"); tracing::debug!(?req, "Received request"); - send_rsp.send_response(rsp); - tracing::debug!(?req, "Response sent"); + + // Ensure the whole request is processed. + let (parts, mut body) = req.into_parts(); + if !body.is_end_stream() { + use http_body_util::BodyExt; + while body + .frame() + .await + .transpose() + .expect("request body must not error") + .is_some() + {} + } + drop((parts, body)); + + tokio::spawn( + async move { + let res = call.await; + tracing::debug!(?res, "Sending response"); + match res { + Ok(rsp) => tx.send_response(rsp), + Err(e) => tx.send_error(e), + } + } + .in_current_span(), + ); +} + +fn http_get() -> http::Request { + http::Request::get("/").body(Default::default()).unwrap() } fn default_backend(path: impl ToString) -> client_policy::Backend { @@ -615,43 +231,93 @@ fn default_route(backend: client_policy::Backend) -> client_policy::http::Route policy: Policy { meta: Meta::new_default("test_route"), filters: NO_FILTERS.clone(), - failure_policy: Default::default(), - request_timeout: None, + params: http::RouteParams::default(), distribution: RouteDistribution::FirstAvailable(Arc::new([RouteBackend { filters: NO_FILTERS.clone(), backend, - request_timeout: None, }])), }, }], } } -fn timeout_route( +type Handle = tower_test::mock::Handle; + +fn mock_http(params: client_policy::http::RouteParams) -> (svc::BoxCloneHttp, Handle) { + let dest = "example.com:1234".parse::().unwrap(); + let backend = default_backend(&dest); + let route = mk_route(backend.clone(), params); + mock(policy::Params::Http(policy::HttpParams { + addr: dest.into(), + meta: ParentRef(client_policy::Meta::new_default("parent")), + backends: Arc::new([backend]), + routes: Arc::new([route]), + failure_accrual: client_policy::FailureAccrual::None, + })) +} + +fn mock_grpc(params: client_policy::grpc::RouteParams) -> (svc::BoxCloneHttp, Handle) { + let dest = "example.com:1234".parse::().unwrap(); + let backend = default_backend(&dest); + let route = mk_route(backend.clone(), params); + mock(policy::Params::Grpc(policy::GrpcParams { + addr: dest.into(), + meta: ParentRef(client_policy::Meta::new_default("parent")), + backends: Arc::new([backend]), + routes: Arc::new([route]), + failure_accrual: client_policy::FailureAccrual::None, + })) +} + +fn mock(params: policy::Params) -> (svc::BoxCloneHttp, Handle) { + let (inner, handle) = tower_test::mock::pair(); + + let addr = SocketAddr::new([192, 0, 2, 41].into(), 1234); + let connect = HttpConnect::default().service(addr, inner); + let resolve = support::resolver().endpoint_exists( + params.addr().name_addr().unwrap().clone(), + addr, + Default::default(), + ); + let (rt, shutdown) = runtime(); + let stack = Outbound::new(default_config(), rt, &mut Default::default()) + .with_stack(svc::ArcNewService::new(connect)) + .push_http_cached(resolve) + .into_inner(); + + let (tx, routes) = watch::channel(Routes::Policy(params)); + tokio::spawn(async move { + tx.closed().await; + drop(shutdown); + }); + + let svc = stack.new_service(Target { + num: 1, + version: http::Variant::H2, + routes, + }); + + (svc, handle) +} + +fn mk_route( backend: client_policy::Backend, - route_timeout: Option, - backend_timeout: Option, -) -> client_policy::http::Route { - use client_policy::{ - http::{self, Filter, Policy, Route, Rule}, - Meta, RouteBackend, RouteDistribution, - }; - use once_cell::sync::Lazy; - static NO_FILTERS: Lazy> = Lazy::new(|| Arc::new([])); - Route { + params: P, +) -> client_policy::route::Route> { + use client_policy::*; + + route::Route { hosts: vec![], - rules: vec![Rule { - matches: vec![http::r#match::MatchRequest::default()], - policy: Policy { - meta: Meta::new_default("test_route"), - filters: NO_FILTERS.clone(), - failure_policy: Default::default(), - request_timeout: route_timeout, + rules: vec![route::Rule { + matches: vec![M::default()], + policy: RoutePolicy { + meta: Meta::new_default("route"), + filters: [].into(), distribution: RouteDistribution::FirstAvailable(Arc::new([RouteBackend { - filters: NO_FILTERS.clone(), + filters: [].into(), backend, - request_timeout: backend_timeout, }])), + params, }, }], } diff --git a/linkerd/app/outbound/src/http/logical/tests/basic.rs b/linkerd/app/outbound/src/http/logical/tests/basic.rs new file mode 100644 index 0000000000..be2986b7a4 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/tests/basic.rs @@ -0,0 +1,52 @@ +use super::*; +use linkerd_app_core::{ + proxy::http::{self, StatusCode}, + svc, trace, NameAddr, +}; +use linkerd_proxy_client_policy as client_policy; +use std::{net::SocketAddr, sync::Arc}; +use tokio::sync::watch; + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn routes() { + let _trace = trace::test::trace_init(); + + const AUTHORITY: &str = "logical.test.svc.cluster.local"; + const PORT: u16 = 666; + let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); + let dest: NameAddr = format!("{AUTHORITY}:{PORT}") + .parse::() + .expect("dest addr is valid"); + let (svc, mut handle) = tower_test::mock::pair(); + let connect = HttpConnect::default().service(addr, svc); + let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); + let (rt, _shutdown) = runtime(); + let stack = Outbound::new(default_config(), rt, &mut Default::default()) + .with_stack(svc::ArcNewService::new(connect)) + .push_http_cached(resolve) + .into_inner(); + + let backend = default_backend(&dest); + let (_route_tx, routes) = + watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { + addr: dest.into(), + meta: ParentRef(client_policy::Meta::new_default("parent")), + backends: Arc::new([backend.clone()]), + routes: Arc::new([default_route(backend)]), + failure_accrual: client_policy::FailureAccrual::None, + }))); + let target = Target { + num: 1, + version: http::Variant::H2, + routes, + }; + let svc = stack.new_service(target); + + handle.allow(1); + let rsp = send_req(svc.clone(), http_get()); + serve(&mut handle, mk_rsp(StatusCode::OK, "good")).await; + assert_eq!( + rsp.await.expect("request must succeed").status(), + http::StatusCode::OK + ); +} diff --git a/linkerd/app/outbound/src/http/logical/tests/failure_accrual.rs b/linkerd/app/outbound/src/http/logical/tests/failure_accrual.rs new file mode 100644 index 0000000000..5c8bf979c4 --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/tests/failure_accrual.rs @@ -0,0 +1,264 @@ +use super::*; +use linkerd_app_core::{ + errors, + exp_backoff::ExponentialBackoff, + proxy::http::{self, StatusCode}, + svc, trace, NameAddr, +}; +use linkerd_proxy_client_policy as client_policy; +use std::{net::SocketAddr, sync::Arc, time::Duration}; +use tokio::{sync::watch, task, time}; +use tracing::info; + +const AUTHORITY: &str = "logical.test.svc.cluster.local"; +const PORT: u16 = 666; + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn consecutive_failures_accrue() { + let _trace = trace::test::with_default_filter(format!("{},trace", trace::test::DEFAULT_LOG)); + + let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); + let dest: NameAddr = format!("{AUTHORITY}:{PORT}") + .parse::() + .expect("dest addr is valid"); + let (svc, mut handle) = tower_test::mock::pair(); + let connect = HttpConnect::default().service(addr, svc); + let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); + let (rt, _shutdown) = runtime(); + let cfg = default_config(); + let stack = Outbound::new(cfg.clone(), rt, &mut Default::default()) + .with_stack(svc::ArcNewService::new(connect)) + .push_http_cached(resolve) + .into_inner(); + + let backend = default_backend(&dest); + // Ensure that the probe delay is longer than the failfast timeout, so that + // the service is only probed after it has entered failfast when the gate + // shuts. + let min_backoff = cfg.http_request_queue.failfast_timeout + Duration::from_secs(1); + let backoff = ExponentialBackoff::try_new( + min_backoff, + min_backoff * 6, + // no jitter --- ensure the test is deterministic + 0.0, + ) + .unwrap(); + let mut backoffs = backoff.stream(); + let (_route_tx, routes) = + watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { + addr: dest.into(), + meta: ParentRef(client_policy::Meta::new_default("parent")), + backends: Arc::new([backend.clone()]), + routes: Arc::new([default_route(backend)]), + failure_accrual: client_policy::FailureAccrual::ConsecutiveFailures { + max_failures: 3, + backoff, + }, + }))); + let target = Target { + num: 1, + version: http::Variant::H2, + routes, + }; + let svc = stack.new_service(target); + + info!("Sending good request"); + handle.allow(1); + let rsp = send_req(svc.clone(), http_get()); + serve(&mut handle, mk_rsp(StatusCode::OK, "good")).await; + assert_rsp(rsp, StatusCode::OK, "good").await; + + // fail 3 requests so that we hit the consecutive failures accrual limit + for i in 1..=3 { + info!("Sending bad request {i}/3"); + handle.allow(1); + let rsp = send_req(svc.clone(), http_get()); + serve( + &mut handle, + mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "bad"), + ) + .await; + assert_rsp(rsp, StatusCode::INTERNAL_SERVER_ERROR, "bad").await; + } + + // Ensure that the error is because of the breaker, and not because the + // underlying service doesn't poll ready. + info!("Sending request while in failfast"); + handle.allow(1); + // We are now in failfast. + let error = send_req(svc.clone(), http_get()) + .await + .expect_err("service should be in failfast"); + assert!( + errors::is_caused_by::(error.as_ref()), + "service should be in failfast" + ); + + info!("Sending request while in loadshed"); + let error = send_req(svc.clone(), http_get()) + .await + .expect_err("service should be in failfast"); + assert!( + errors::is_caused_by::(error.as_ref()), + "service should be in failfast" + ); + + // After the probation period, a subsequent request should be failed by + // hitting the service. + info!("Waiting for probation"); + backoffs.next().await; + task::yield_now().await; + + info!("Sending a bad request while in probation"); + handle.allow(1); + let rsp = send_req(svc.clone(), http_get()); + info!("Serving response"); + tokio::time::timeout( + time::Duration::from_secs(10), + serve( + &mut handle, + mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "bad"), + ), + ) + .await + .expect("no timeouts"); + assert_rsp(rsp, StatusCode::INTERNAL_SERVER_ERROR, "bad").await; + + // We are now in failfast. + info!("Sending a failfast request while the circuit is broken"); + handle.allow(1); + let error = send_req(svc.clone(), http_get()) + .await + .expect_err("service should be in failfast"); + assert!( + errors::is_caused_by::(error.as_ref()), + "service should be in failfast" + ); + + // Wait out the probation period again + info!("Waiting for probation again"); + backoffs.next().await; + task::yield_now().await; + + // The probe request succeeds + info!("Sending a good request while in probation"); + handle.allow(1); + let rsp = send_req(svc.clone(), http_get()); + tokio::time::timeout( + time::Duration::from_secs(10), + serve(&mut handle, mk_rsp(StatusCode::OK, "good")), + ) + .await + .expect("no timeouts"); + assert_rsp(rsp, StatusCode::OK, "good").await; + + // The gate is now open again + info!("Sending a final good request"); + handle.allow(1); + let rsp = send_req(svc.clone(), http_get()); + tokio::time::timeout( + time::Duration::from_secs(10), + serve(&mut handle, mk_rsp(StatusCode::OK, "good")), + ) + .await + .expect("no timeouts"); + assert_rsp(rsp, StatusCode::OK, "good").await; +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn balancer_doesnt_select_tripped_breakers() { + let _trace = trace::test::with_default_filter(format!( + "{},linkerd_app_outbound=trace,linkerd_stack=trace,linkerd2_proxy_http_balance=trace", + trace::test::DEFAULT_LOG + )); + + let addr1 = SocketAddr::new([192, 0, 2, 41].into(), PORT); + let addr2 = SocketAddr::new([192, 0, 2, 42].into(), PORT); + let dest: NameAddr = format!("{AUTHORITY}:{PORT}") + .parse::() + .expect("dest addr is valid"); + let (svc1, mut handle1) = tower_test::mock::pair(); + let (svc2, mut handle2) = tower_test::mock::pair(); + let connect = HttpConnect::default() + .service(addr1, svc1) + .service(addr2, svc2); + let resolve = support::resolver(); + let mut dest_tx = resolve.endpoint_tx(dest.clone()); + dest_tx + .add([(addr1, Default::default()), (addr2, Default::default())]) + .unwrap(); + let (rt, _shutdown) = runtime(); + let cfg = default_config(); + let stack = Outbound::new(cfg.clone(), rt, &mut Default::default()) + .with_stack(svc::ArcNewService::new(connect)) + .push_http_cached(resolve) + .into_inner(); + + let backend = default_backend(&dest); + // Ensure that the probe delay is longer than the failfast timeout, so that + // the service is only probed after it has entered failfast when the gate + // shuts. + let min_backoff = cfg.http_request_queue.failfast_timeout + Duration::from_secs(1); + let backoff = ExponentialBackoff::try_new( + min_backoff, + min_backoff * 6, + // no jitter --- ensure the test is deterministic + 0.0, + ) + .unwrap(); + let (_route_tx, routes) = + watch::channel(Routes::Policy(policy::Params::Http(policy::HttpParams { + addr: dest.into(), + meta: ParentRef(client_policy::Meta::new_default("parent")), + backends: Arc::new([backend.clone()]), + routes: Arc::new([default_route(backend)]), + failure_accrual: client_policy::FailureAccrual::ConsecutiveFailures { + max_failures: 3, + backoff, + }, + }))); + let target = Target { + num: 1, + version: http::Variant::H2, + routes, + }; + let svc = stack.new_service(target); + + // fail 3 requests so that we hit the consecutive failures accrual limit + let mut failed = 0; + while failed < 3 { + handle1.allow(1); + handle2.allow(1); + info!(failed); + let rsp = send_req(svc.clone(), http_get()); + let (expected_status, expected_body) = tokio::select! { + _ = serve(&mut handle1, mk_rsp(StatusCode::OK, "endpoint 1")) => { + info!("Balancer selected good endpoint"); + (StatusCode::OK, "endpoint 1") + } + _ = serve(&mut handle2, mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "endpoint 2")) => { + info!("Balancer selected bad endpoint"); + failed += 1; + (StatusCode::INTERNAL_SERVER_ERROR, "endpoint 2") + } + }; + assert_rsp(rsp, expected_status, expected_body).await; + task::yield_now().await; + } + + handle1.allow(1); + handle2.allow(1); + let rsp = send_req(svc.clone(), http_get()); + // The load balancer will select endpoint 1, because endpoint 2 isn't ready. + serve(&mut handle1, mk_rsp(StatusCode::OK, "endpoint 1")).await; + assert_rsp(rsp, StatusCode::OK, "endpoint 1").await; + + // The load balancer should continue selecting the non-failing endpoint. + for _ in 0..8 { + handle1.allow(1); + handle2.allow(1); + let rsp = send_req(svc.clone(), http_get()); + serve(&mut handle1, mk_rsp(StatusCode::OK, "endpoint 1")).await; + assert_rsp(rsp, StatusCode::OK, "endpoint 1").await; + } +} diff --git a/linkerd/app/outbound/src/http/logical/tests/headers.rs b/linkerd/app/outbound/src/http/logical/tests/headers.rs new file mode 100644 index 0000000000..cc4eccd6cc --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/tests/headers.rs @@ -0,0 +1,255 @@ +use super::*; +use http_body_util::BodyExt; +use linkerd_app_core::{proxy::http::StatusCode, trace}; +use linkerd_proxy_client_policy::http::RouteParams as HttpParams; +use tokio::time; +use tracing::{info, Instrument}; + +// === HTTP === + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + allow_l5d_request_headers: true, + ..Default::default() + }); + + info!("Sending a request that will initially fail and then succeed"); + tokio::spawn( + async move { + handle.allow(3); + serve(&mut handle, async move { + info!("Failing the first request"); + mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "").await + }) + .await; + serve(&mut handle, async move { + info!("Delaying the second request"); + time::sleep(TIMEOUT * 2).await; + mk_rsp(StatusCode::IM_A_TEAPOT, "").await + }) + .await; + serve(&mut handle, async move { + info!("Serving the third request"); + mk_rsp(StatusCode::NO_CONTENT, "").await + }) + .await; + handle + } + .in_current_span(), + ); + + info!("Verifying that we see the successful response"); + let rsp = time::timeout( + TIMEOUT * 4, + send_req( + svc.clone(), + http::Request::get("/") + .header("l5d-retry-limit", "2") + .header("l5d-retry-http", "5xx") + .header("l5d-retry-timeout", "100ms") + .body(Default::default()) + .unwrap(), + ), + ) + .await + .expect("response"); + assert_eq!(rsp.expect("response").status(), StatusCode::NO_CONTENT); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_requires_allow() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + allow_l5d_request_headers: false, + ..Default::default() + }); + + tokio::spawn( + async move { + handle.allow(2); + serve(&mut handle, mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "")).await; + serve(&mut handle, mk_rsp(StatusCode::NO_CONTENT, "")).await; + handle + } + .in_current_span(), + ); + + info!("Sending a request that will initially fail and then succeed"); + let rsp = time::timeout( + TIMEOUT, + send_req( + svc.clone(), + http::Request::get("/") + .header("l5d-retry-limit", "1") + .header("l5d-retry-http", "5xx") + .body(Default::default()) + .unwrap(), + ), + ) + .await + .expect("response"); + + info!("Verifying that we see the successful response"); + assert_eq!( + rsp.expect("response").status(), + StatusCode::INTERNAL_SERVER_ERROR + ); +} + +// === gRPC === + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + allow_l5d_request_headers: true, + ..Default::default() + }); + + info!("Sending a request that will initially fail and then succeed"); + tokio::spawn( + async move { + handle.allow(3); + serve(&mut handle, async move { + info!("Failing the first request"); + mk_grpc_rsp(tonic::Code::Unavailable).await + }) + .await; + serve(&mut handle, async move { + info!("Delaying the second request"); + time::sleep(TIMEOUT).await; + mk_grpc_rsp(tonic::Code::NotFound).await + }) + .await; + serve(&mut handle, async move { + info!("Serving the third request"); + mk_grpc_rsp(tonic::Code::Ok).await + }) + .await; + handle + } + .in_current_span(), + ); + + info!("Verifying that we see the successful response"); + let (parts, mut body) = time::timeout( + TIMEOUT * 4, + send_req( + svc.clone(), + http::Request::get("/") + .version(::http::Version::HTTP_2) + .header("content-type", "application/grpc") + .header("l5d-retry-limit", "2") + .header("l5d-retry-grpc", "unavailable") + .header("l5d-retry-timeout", "100ms") + .body(Default::default()) + .unwrap(), + ), + ) + .await + .expect("response") + .expect("response") + .into_parts(); + assert_eq!(parts.status, StatusCode::OK); + let trailers = body + .frame() + .await + .expect("a result") + .expect("a frame") + .into_trailers() + .ok() + .expect("trailers frame"); + assert_eq!( + trailers + .get("grpc-status") + .expect("grpc-status") + .to_str() + .unwrap() + .parse::() + .unwrap(), + tonic::Code::Ok as u8 + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_requires_allow() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + allow_l5d_request_headers: false, + ..Default::default() + }); + + info!("Sending a request that will initially fail and then succeed"); + tokio::spawn( + async move { + handle.allow(3); + serve(&mut handle, async move { + info!("Failing the first request"); + mk_grpc_rsp(tonic::Code::Unavailable).await + }) + .await; + serve(&mut handle, async move { + info!("Delaying the second request"); + time::sleep(TIMEOUT).await; + mk_grpc_rsp(tonic::Code::NotFound).await + }) + .await; + serve(&mut handle, async move { + info!("Serving the third request"); + mk_grpc_rsp(tonic::Code::Ok).await + }) + .await; + handle + } + .in_current_span(), + ); + + info!("Verifying that we see the successful response"); + let (parts, mut body) = time::timeout( + TIMEOUT * 4, + send_req( + svc.clone(), + http::Request::get("/") + .version(::http::Version::HTTP_2) + .header("content-type", "application/grpc") + .header("l5d-retry-limit", "2") + .header("l5d-retry-grpc", "unavailable") + .header("l5d-retry-timeout", "100ms") + .body(Default::default()) + .unwrap(), + ), + ) + .await + .expect("response") + .expect("response") + .into_parts(); + assert_eq!(parts.status, StatusCode::OK); + let trailers = body + .frame() + .await + .expect("a result") + .expect("a frame") + .into_trailers() + .ok() + .expect("trailers frame"); + assert_eq!( + trailers + .get("grpc-status") + .expect("grpc-status") + .to_str() + .unwrap() + .parse::() + .unwrap(), + tonic::Code::Unavailable as u8 + ); +} diff --git a/linkerd/app/outbound/src/http/logical/tests/retries.rs b/linkerd/app/outbound/src/http/logical/tests/retries.rs new file mode 100644 index 0000000000..08a647941d --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/tests/retries.rs @@ -0,0 +1,452 @@ +use super::*; +use http_body_util::BodyExt; +use linkerd_app_core::{ + errors, + proxy::http::{self, StatusCode}, + svc::http::stream_timeouts::StreamDeadlineError, + trace, +}; +use linkerd_proxy_client_policy::{ + self as client_policy, + grpc::{Codes, RouteParams as GrpcParams}, + http::{RouteParams as HttpParams, Timeouts}, +}; +use std::collections::BTreeSet; +use tokio::time; +use tonic::Code; +use tracing::{info, Instrument}; + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_5xx() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + retry: Some(client_policy::http::Retry { + max_retries: 1, + status_ranges: Default::default(), + max_request_bytes: 1000, + timeout: None, + backoff: None, + }), + ..Default::default() + }); + + tokio::spawn( + async move { + handle.allow(2); + serve(&mut handle, mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "")).await; + serve(&mut handle, mk_rsp(StatusCode::NO_CONTENT, "")).await; + handle + } + .in_current_span(), + ); + + info!("Sending a request that will initially fail and then succeed"); + let rsp = time::timeout(TIMEOUT, send_req(svc.clone(), http_get())) + .await + .expect("response"); + info!("Verifying that we see the successful response"); + assert_eq!(rsp.expect("response").status(), StatusCode::NO_CONTENT); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_5xx_limited() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + retry: Some(client_policy::http::Retry { + max_retries: 2, + status_ranges: Default::default(), + max_request_bytes: 1000, + timeout: None, + backoff: None, + }), + ..Default::default() + }); + + info!("Sending a request that will initially fail and then succeed"); + tokio::spawn( + async move { + handle.allow(3); + serve(&mut handle, async move { + info!("Failing the first request"); + mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "").await + }) + .await; + serve(&mut handle, async move { + info!("Failing the second request"); + mk_rsp(StatusCode::INTERNAL_SERVER_ERROR, "").await + }) + .await; + serve(&mut handle, async move { + info!("Failing the third request"); + mk_rsp(StatusCode::GATEWAY_TIMEOUT, "").await + }) + .await; + info!("Prepping the fourth request (shouldn't be served)"); + serve(&mut handle, async move { + mk_rsp(StatusCode::NO_CONTENT, "").await + }) + .await; + handle + } + .in_current_span(), + ); + + info!("Verifying that the response fails with the expected error"); + let rsp = time::timeout(TIMEOUT, send_req(svc.clone(), http_get())) + .await + .expect("response"); + assert_eq!(rsp.expect("response").status(), StatusCode::GATEWAY_TIMEOUT); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_timeout() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + retry: Some(client_policy::http::Retry { + max_retries: 1, + status_ranges: Default::default(), + max_request_bytes: 1000, + timeout: Some(TIMEOUT / 4), + backoff: None, + }), + ..Default::default() + }); + + info!("Sending a request that will initially timeout and then succeed"); + tokio::spawn( + async move { + handle.allow(2); + + serve(&mut handle, async move { + info!("Delaying the first request"); + time::sleep(TIMEOUT / 2).await; + mk_rsp(StatusCode::NOT_FOUND, "").await + }) + .await; + + serve(&mut handle, async move { + info!("Serving the second request"); + mk_rsp(StatusCode::NO_CONTENT, "").await + }) + .await; + + handle + } + .in_current_span(), + ); + + info!("Verifying that the response fails with the expected error"); + let rsp = time::timeout(TIMEOUT, send_req(svc.clone(), http_get())) + .await + .expect("response"); + assert_eq!(rsp.expect("response").status(), StatusCode::NO_CONTENT); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_timeout_on_limit() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(HttpParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + retry: Some(client_policy::http::Retry { + max_retries: 1, + status_ranges: Default::default(), + max_request_bytes: 1000, + timeout: Some(TIMEOUT / 4), + backoff: None, + }), + ..Default::default() + }); + + tokio::spawn( + async move { + handle.allow(2); + + serve(&mut handle, async move { + info!("Delaying the first request"); + time::sleep(TIMEOUT / 3).await; + mk_rsp(StatusCode::NOT_FOUND, "").await + }) + .await; + + serve(&mut handle, async move { + info!("Delaying the second request"); + time::sleep(TIMEOUT / 3).await; + mk_rsp(StatusCode::NO_CONTENT, "").await + }) + .await; + + handle + } + .in_current_span(), + ); + + info!("Testing that a retry timeout does not apply when max retries is reached"); + let rsp = time::timeout(TIMEOUT, send_req(svc.clone(), http_get())) + .await + .expect("response"); + + info!("Verifying that the initial request was retried"); + assert_eq!(rsp.expect("response").status(), StatusCode::NO_CONTENT); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn http_timeout_with_request_timeout() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_millis(100); + let (svc, mut handle) = mock_http(HttpParams { + timeouts: Timeouts { + request: Some(TIMEOUT * 5), + ..Default::default() + }, + retry: Some(client_policy::http::Retry { + max_retries: 2, + status_ranges: Default::default(), + max_request_bytes: 1000, + timeout: Some(TIMEOUT), + backoff: None, + }), + ..Default::default() + }); + + info!("Sending a request that will initially timeout and then succeed"); + tokio::spawn( + async move { + handle.allow(6); + + // First request. + + serve(&mut handle, async move { + info!("Delaying the first request"); + time::sleep(TIMEOUT * 2).await; + mk_rsp(StatusCode::IM_A_TEAPOT, "").await + }) + .await; + + serve(&mut handle, async move { + info!("Delaying the second request"); + time::sleep(TIMEOUT * 2).await; + mk_rsp(StatusCode::IM_A_TEAPOT, "").await + }) + .await; + + serve(&mut handle, async move { + info!("Delaying the third request"); + mk_rsp(StatusCode::NO_CONTENT, "").await + }) + .await; + + // Second request + + serve(&mut handle, async move { + info!("Delaying the fourth request"); + time::sleep(TIMEOUT * 2).await; + mk_rsp(StatusCode::IM_A_TEAPOT, "").await + }) + .await; + + serve(&mut handle, async move { + info!("Delaying the fifth request"); + time::sleep(TIMEOUT * 2).await; + mk_rsp(StatusCode::IM_A_TEAPOT, "").await + }) + .await; + + serve(&mut handle, async move { + info!("Delaying the sixth request"); + time::sleep(TIMEOUT * 5).await; + mk_rsp(StatusCode::NO_CONTENT, "").await + }) + .await; + + handle + } + .in_current_span(), + ); + + info!("Verifying that the response succeeds despite retry timeouts"); + let rsp = time::timeout(TIMEOUT * 10, send_req(svc.clone(), http_get())) + .await + .expect("response timed out") + .expect("response ok"); + assert_eq!(rsp.status(), StatusCode::NO_CONTENT); + + info!("Verifying that retried requests fail with a request timeout"); + let error = time::timeout(TIMEOUT * 10, send_req(svc.clone(), http_get())) + .await + .expect("response timed out") + .expect_err("response should timeout"); + assert!(errors::is_caused_by::(&*error)); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_internal() { + let _trace = trace::test::with_default_filter("linkerd=debug"); + + const TIMEOUT: time::Duration = time::Duration::from_millis(100); + let (svc, mut handle) = mock_grpc(GrpcParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + retry: Some(client_policy::grpc::Retry { + max_retries: 1, + codes: Codes( + Some(Code::Internal as u16) + .into_iter() + .collect::>() + .into(), + ), + max_request_bytes: 1000, + timeout: None, + backoff: None, + }), + ..Default::default() + }); + + info!("Sending a request that will initially fail and then succeed"); + tokio::spawn( + async move { + handle.allow(2); + info!("Failing the first request"); + serve(&mut handle, mk_grpc_rsp(tonic::Code::Internal)).await; + info!("Serving the second request"); + serve(&mut handle, mk_grpc_rsp(tonic::Code::Ok)).await; + handle + } + .in_current_span(), + ); + + info!("Verifying that we see the successful response"); + let (parts, mut body) = time::timeout( + TIMEOUT * 10, + send_req( + svc.clone(), + http::Request::post("/svc/method") + .body(Default::default()) + .unwrap(), + ), + ) + .await + .expect("response") + .expect("response ok") + .into_parts(); + assert_eq!(parts.status, StatusCode::OK); + let trailers = loop { + match body.frame().await { + Some(Ok(frame)) => { + if let Ok(trailers) = frame.into_trailers() { + break trailers; + } else { + continue; + } + } + None | Some(Err(_)) => panic!("body did not yield trailers"), + } + }; + assert_eq!( + trailers + .get("grpc-status") + .expect("grpc-status") + .to_str() + .unwrap(), + "0" + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn grpc_timeout() { + let _trace = trace::test::with_default_filter("linkerd=debug"); + + const TIMEOUT: time::Duration = time::Duration::from_millis(100); + let (svc, mut handle) = mock_grpc(GrpcParams { + timeouts: Timeouts { + request: Some(TIMEOUT * 5), + ..Default::default() + }, + retry: Some(client_policy::grpc::Retry { + max_retries: 1, + timeout: Some(TIMEOUT), + codes: Codes(Default::default()), + max_request_bytes: 1000, + backoff: None, + }), + ..Default::default() + }); + + info!("Sending a request that will initially fail and then succeed"); + tokio::spawn( + async move { + handle.allow(2); + info!("Delaying the first request"); + serve(&mut handle, async move { + time::sleep(TIMEOUT * 2).await; + mk_grpc_rsp(tonic::Code::NotFound).await + }) + .await; + info!("Serving the second request"); + serve(&mut handle, mk_grpc_rsp(tonic::Code::Ok)).await; + handle + } + .in_current_span(), + ); + + info!("Verifying that we see the successful response"); + let (parts, mut body) = time::timeout( + TIMEOUT * 10, + send_req( + svc.clone(), + http::Request::post("/svc/method") + .body(Default::default()) + .unwrap(), + ), + ) + .await + .expect("response") + .expect("response ok") + .into_parts(); + assert_eq!(parts.status, StatusCode::OK); + let trailers = loop { + match body.frame().await { + Some(Ok(frame)) => { + if let Ok(trailers) = frame.into_trailers() { + break trailers; + } else { + continue; + } + } + None | Some(Err(_)) => panic!("body did not yield trailers"), + } + }; + assert_eq!( + trailers + .get("grpc-status") + .expect("grpc-status") + .to_str() + .unwrap(), + "0" + ); +} diff --git a/linkerd/app/outbound/src/http/logical/tests/timeouts.rs b/linkerd/app/outbound/src/http/logical/tests/timeouts.rs new file mode 100644 index 0000000000..80ebc0b6da --- /dev/null +++ b/linkerd/app/outbound/src/http/logical/tests/timeouts.rs @@ -0,0 +1,302 @@ +use super::{super::LogicalError, *}; +use http_body_util::BodyExt; +use linkerd_app_core::{ + errors, + proxy::http::{ + self, + stream_timeouts::{BodyTimeoutError, ResponseTimeoutError}, + BoxBody, + }, + trace, +}; +use linkerd_proxy_client_policy::{self as client_policy, http::Timeouts}; +use tokio::time; +use tracing::{info, Instrument}; + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn request_timeout_response_headers() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + info!("Sending a request that does not respond within the timeout"); + handle.allow(1); + let call = send_req(svc.clone(), http_get()); + serve(&mut handle, async move { + time::sleep(TIMEOUT * 2).await; + Ok(http::Response::builder() + .status(204) + .body(http::BoxBody::default()) + .unwrap()) + }) + .await; + + info!("Verifying that the response fails with the expected error"); + let error = time::timeout(TIMEOUT * 4, call) + .await + .expect("request must fail with a timeout") + .expect_err("request must fail with a timeout"); + assert!( + error.is::(), + "error must originate in the logical stack" + ); + assert!( + matches!( + errors::cause_ref(error.as_ref()), + Some(ResponseTimeoutError::Lifetime(_)), + ), + "expected response timeout, got {error}" + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn request_timeout_request_body() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + info!("Sending a request that does not respond within the timeout"); + handle.allow(1); + let call = send_req( + svc.clone(), + http::Request::builder() + .method("POST") + .body(BoxBody::new(MockBody::pending())) + .unwrap(), + ); + + info!("Verifying that the response fails with the expected error"); + let error = time::timeout(TIMEOUT * 2, call) + .await + .expect("request must fail with a timeout") + .expect_err("request must fail with a timeout"); + assert!( + error.is::(), + "error must originate in the logical stack" + ); + assert!( + matches!( + errors::cause_ref(error.as_ref()), + Some(ResponseTimeoutError::Lifetime(_)), + ), + "expected response timeout, got {error}" + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn request_timeout_response_body() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + request: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + info!("Sending a request that responds immediately but does not complete"); + handle.allow(1); + let call = send_req(svc.clone(), http_get()); + serve( + &mut handle, + future::ok( + http::Response::builder() + .status(200) + .body(BoxBody::new(MockBody::pending())) + .unwrap(), + ), + ) + .await; + + info!("Verifying that the request body times out with the expected stream error"); + let mut rsp = call.await.unwrap().into_body(); + let error = time::timeout(TIMEOUT * 2, rsp.frame()) + .await + .expect("should timeout internally") + .expect("should timeout internally") + .err() + .expect("should timeout internally"); + assert!( + matches!( + errors::cause_ref(error.as_ref()), + Some(BodyTimeoutError::Lifetime(_)), + ), + "expected response timeout, got {error:?}" + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn response_timeout_response_headers() { + let _trace = trace::test::with_default_filter("linkerd=trace"); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + response: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + info!("Sending a request that does not respond within the timeout"); + handle.allow(1); + let call = send_req(svc.clone(), http_get()); + serve(&mut handle, async move { + time::sleep(TIMEOUT * 2).await; + Ok(http::Response::builder() + .status(204) + .body(http::BoxBody::default()) + .unwrap()) + }) + .await; + + info!("Verifying that the response fails with the expected error"); + let error = time::timeout(TIMEOUT * 4, call) + .await + .expect("should timeout internally") + .expect_err("should timeout internally"); + assert!( + matches!( + errors::cause_ref(error.as_ref()), + Some(ResponseTimeoutError::Response(_)), + ), + "expected response timeout, got {error:?}" + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn response_timeout_response_body() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + response: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + tokio::spawn( + async move { + handle.allow(1); + serve(&mut handle, async move { + info!("Serving a response that never completes"); + Ok(http::Response::builder() + .status(200) + .body(http::BoxBody::new(MockBody::pending())) + .unwrap()) + }) + .await; + } + .in_current_span(), + ); + + info!("Sending a request that responds immediately but does not complete"); + let mut rsp = send_req(svc.clone(), http_get()).await.unwrap().into_body(); + + info!("Verifying that the request body times out with the expected stream error"); + let error = time::timeout(TIMEOUT * 2, rsp.frame()) + .await + .expect("should timeout internally") + .expect("should timeout internally") + .err() + .expect("should timeout internally"); + assert!( + matches!( + errors::cause_ref(error.as_ref()), + Some(BodyTimeoutError::Response(_)), + ), + "expected response timeout, got {error:?}" + ); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn response_timeout_ignores_request_body() { + let _trace = trace::test::with_default_filter("linkerd=trace"); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + response: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + info!("Sending a request that exceeds the response timeout"); + handle.allow(1); + let call = send_req(svc.clone(), http_get()); + serve(&mut handle, async move { + info!("Serving a response that never completes"); + Ok(http::Response::builder() + .status(200) + .body(http::BoxBody::new(MockBody::pending())) + .unwrap()) + }) + .await; + + info!("Verifying that the response succeeds despite slow request time"); + time::timeout(TIMEOUT * 4, call) + .await + .expect("should succeed") + .expect("should succed"); +} + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn idle_timeout_response_body() { + let _trace = trace::test::trace_init(); + + const TIMEOUT: time::Duration = time::Duration::from_secs(2); + let (svc, mut handle) = mock_http(client_policy::http::RouteParams { + timeouts: Timeouts { + idle: Some(TIMEOUT), + ..Default::default() + }, + ..Default::default() + }); + + info!("Sending a request that is served immediately with a body that does not update"); + handle.allow(1); + let call = send_req(svc.clone(), http_get()); + serve(&mut handle, async move { + info!("Serving a response that never completes"); + Ok(http::Response::builder() + .status(200) + .body(http::BoxBody::new(MockBody::pending())) + .unwrap()) + }) + .await; + + info!("Verifying that the request body times out with the expected stream error"); + let mut rsp = call.await.unwrap().into_body(); + let error = time::timeout(TIMEOUT * 2, rsp.frame()) + .await + .expect("should timeout internally") + .expect("should timeout internally") + .err() + .expect("should timeout internally"); + assert!( + matches!( + errors::cause_ref(error.as_ref()), + Some(BodyTimeoutError::Idle(_)), + ), + "expected idle timeout, got {error:?}" + ); +} diff --git a/linkerd/app/outbound/src/http/retry.rs b/linkerd/app/outbound/src/http/retry.rs index 625ff19648..1eca23c730 100644 --- a/linkerd/app/outbound/src/http/retry.rs +++ b/linkerd/app/outbound/src/http/retry.rs @@ -4,13 +4,13 @@ use linkerd_app_core::{ http_metrics::retries::Handle, metrics::{self, ProfileRouteLabels}, profiles::{self, http::Route}, - proxy::http::{ClientHandle, EraseResponse, HttpBody}, + proxy::http::{Body, ClientHandle, EraseResponse}, svc::{layer, Either, Param}, - Error, + Error, Result, }; use linkerd_http_classify::{Classify, ClassifyEos, ClassifyResponse}; use linkerd_http_retry::{ - with_trailers::{self, WithTrailers}, + peek_trailers::{self, PeekTrailersBody}, ReplayBody, }; use linkerd_retry as retry; @@ -19,11 +19,10 @@ use std::sync::Arc; pub fn layer( metrics: metrics::HttpProfileRouteRetry, ) -> impl layer::Layer>> + Clone { - retry::layer(NewRetryPolicy::new(metrics)) - // Because we wrap the response body type on retries, we must include a - // `Proxy` middleware for unifying the response body types of the retry - // and non-retry services. - .with_proxy(EraseResponse::new(())) + // Because we wrap the response body type on retries, we must include a + // `Proxy` middleware for unifying the response body types of the retry + // and non-retry services. + retry::NewRetry::layer(NewRetryPolicy::new(metrics), EraseResponse::new(())) } #[derive(Clone, Debug)] @@ -34,7 +33,7 @@ pub struct NewRetryPolicy { #[derive(Clone, Debug)] pub struct RetryPolicy { metrics: Handle, - budget: Arc, + budget: Arc, response_classes: profiles::http::ResponseClasses, } @@ -68,20 +67,23 @@ where // === impl Retry === -impl retry::Policy>, http::Response>, E> +impl + retry::Policy>, http::Response>, Error> for RetryPolicy where - A: HttpBody + Unpin, - A::Error: Into, - B: HttpBody + Unpin, + ReqB: Body + Unpin, + ReqB::Error: Into, + RspB: Body + Unpin, { - type Future = future::Ready; + type Future = future::Ready<()>; fn retry( - &self, - req: &http::Request>, - result: Result<&http::Response>, &E>, + &mut self, + req: &mut http::Request>, + result: &mut Result>, Error>, ) -> Option { + use retry::Budget as _; + let retryable = match result { Err(_) => false, Ok(rsp) => { @@ -89,12 +91,21 @@ where let is_failure = classify::Request::from(self.response_classes.clone()) .classify(req) .start(rsp) - .eos(rsp.body().trailers()) + .eos(rsp.body().peek_trailers()) .is_failure(); // did the body exceed the maximum length limit? let exceeded_max_len = req.body().is_capped(); - let retryable = is_failure && !exceeded_max_len; - tracing::trace!(is_failure, exceeded_max_len, retryable); + let retryable = if let Some(false) = exceeded_max_len { + // If the body hasn't exceeded our length limit, we should + // retry the request if it's a failure of some sort. + is_failure + } else { + // We received a response before the request body was fully + // finished streaming. To be safe, we will consider this + // as an unretryable request. + false + }; + tracing::trace!(is_failure, ?exceeded_max_len, retryable); retryable } }; @@ -104,19 +115,19 @@ where return None; } - let withdrew = self.budget.withdraw().is_ok(); + let withdrew = self.budget.withdraw(); self.metrics.incr_retryable(withdrew); if !withdrew { return None; } - Some(future::ready(self.clone())) + Some(future::ready(())) } fn clone_request( - &self, - req: &http::Request>, - ) -> Option>> { + &mut self, + req: &http::Request>, + ) -> Option>> { // Since the body is already wrapped in a ReplayBody, it must not be obviously too large to // buffer/clone. let mut clone = http::Request::new(req.body().clone()); @@ -139,25 +150,27 @@ where } } -impl retry::PrepareRetry, http::Response, E> for RetryPolicy +impl retry::PrepareRetry, http::Response> for RetryPolicy where - A: HttpBody + Unpin, - A::Error: Into, - B: HttpBody + Unpin + Send + 'static, - B::Data: Unpin + Send, - B::Error: Unpin + Send, + ReqB: Body + Unpin, + ReqB::Error: Into, + RspB: Body + Unpin + Send + 'static, + RspB::Data: Unpin + Send, + RspB::Error: Unpin + Send, { - type RetryRequest = http::Request>; - type RetryResponse = http::Response>; + type RetryRequest = http::Request>; + type RetryResponse = http::Response>; type ResponseFuture = future::Map< - with_trailers::WithTrailersFuture, - fn(http::Response>) -> Result>, E>, + peek_trailers::WithPeekTrailersBody, + fn( + http::Response>, + ) -> Result>>, >; fn prepare_request( - &self, - req: http::Request, - ) -> Either> { + self, + req: http::Request, + ) -> Either<(Self, Self::RetryRequest), http::Request> { let (head, body) = req.into_parts(); let replay_body = match ReplayBody::try_new(body, MAX_BUFFERED_BYTES) { Ok(body) => body, @@ -166,18 +179,18 @@ where size = body.size_hint().lower(), "Body is too large to buffer" ); - return Either::B(http::Request::from_parts(head, body)); + return Either::Right(http::Request::from_parts(head, body)); } }; // The body may still be too large to be buffered if the body's length was not known. // `ReplayBody` handles this gracefully. - Either::A(http::Request::from_parts(head, replay_body)) + Either::Left((self, http::Request::from_parts(head, replay_body))) } /// If the response is HTTP/2, return a future that checks for a `TRAILERS` /// frame immediately after the first frame of the response. - fn prepare_response(rsp: http::Response) -> Self::ResponseFuture { - WithTrailers::map_response(rsp).map(Ok) + fn prepare_response(rsp: http::Response) -> Self::ResponseFuture { + PeekTrailersBody::map_response(rsp).map(Ok) } } diff --git a/linkerd/app/outbound/src/http/server.rs b/linkerd/app/outbound/src/http/server.rs index 91e150aa35..70cc87cbee 100644 --- a/linkerd/app/outbound/src/http/server.rs +++ b/linkerd/app/outbound/src/http/server.rs @@ -47,7 +47,7 @@ impl Outbound> { .check_new_service::>() .push(ServerRescue::layer(config.emit_headers)) .check_new_service::>() - // Initiates OpenCensus tracing. + // Initiates OpenTelemetry tracing. .push_on_service(http_tracing::server(rt.span_sink.clone(), trace_labels())) .push_on_service(http::BoxResponse::layer()) // Convert origin form HTTP/1 URIs to absolute form for Hyper's @@ -68,7 +68,7 @@ impl Outbound { > where // Target - T: svc::Param, + T: svc::Param, T: Clone + Send + Unpin + 'static, // Server-side socket I: io::AsyncRead + io::AsyncWrite + io::PeerAddr + Send + Unpin + 'static, @@ -121,6 +121,24 @@ impl errors::HttpRescue for ServerRescue { fn rescue(&self, error: Error) -> Result { use super::logical::policy::errors as policy; + // No available backend can be found for a request. + if errors::is_caused_by::(&*error) { + // XXX(ver) This should probably be SERVICE_UNAVAILABLE, because + // this is basically no different from a LoadShedError, but that + // would be a change in behavior. + return Ok(errors::SyntheticHttpResponse::gateway_timeout(error)); + } + if errors::is_caused_by::(&*error) { + return Ok(errors::SyntheticHttpResponse::unavailable(error)); + } + + // Handle policy-driven timeouts. + if errors::is_caused_by::(&*error) { + return Ok(errors::SyntheticHttpResponse::gateway_timeout_nonfatal( + error, + )); + } + // A profile configured request timeout was encountered. if errors::is_caused_by::(&*error) { return Ok(errors::SyntheticHttpResponse::gateway_timeout(error)); @@ -132,16 +150,6 @@ impl errors::HttpRescue for ServerRescue { return Ok(errors::SyntheticHttpResponse::bad_gateway(error)); } - // No available backend can be found for a request. - if errors::is_caused_by::(&*error) { - // XXX(ver) This should probably be SERVICE_UNAVAILABLE, because - // this is basically no different from a LoadShedError, but that - // would be a change in behavior. - return Ok(errors::SyntheticHttpResponse::gateway_timeout(error)); - } - if errors::is_caused_by::(&*error) { - return Ok(errors::SyntheticHttpResponse::unavailable(error)); - } if errors::is_caused_by::(&*error) { return Ok(errors::SyntheticHttpResponse::bad_gateway(error)); } @@ -188,7 +196,7 @@ impl errors::HttpRescue for ServerRescue { impl svc::ExtractParam for ExtractServerParams where - T: svc::Param, + T: svc::Param, { #[inline] fn extract_param(&self, t: &T) -> http::ServerParams { diff --git a/linkerd/app/outbound/src/ingress.rs b/linkerd/app/outbound/src/ingress.rs index 06534f73fd..bba788d57c 100644 --- a/linkerd/app/outbound/src/ingress.rs +++ b/linkerd/app/outbound/src/ingress.rs @@ -1,6 +1,6 @@ use crate::{http, opaq, policy, Discovery, Outbound, ParentRef}; use linkerd_app_core::{ - detect, errors, io, profiles, + errors, io, profiles, proxy::{ api_resolve::{ConcreteAddr, Metadata}, core::Resolve, @@ -19,11 +19,14 @@ use tracing::Instrument; #[derive(Clone, Debug, PartialEq, Eq, Hash)] struct Http { parent: T, - version: http::Version, + version: http::Variant, } -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct Opaq(Discovery); +#[derive(Clone, Debug)] +struct Opaq { + orig_dst: OrigDstAddr, + routes: watch::Receiver, +} #[derive(Clone, Debug)] struct SelectTarget(Http); @@ -91,7 +94,7 @@ impl Outbound<()> { let discover = discover.clone(); self.to_tcp_connect() .push_opaq_cached(resolve.clone()) - .map_stack(|_, _, stk| stk.push_map_target(Opaq)) + .map_stack(|_, _, stk| stk.push_map_target(Opaq::from)) .push_discover(svc::mk(move |OrigDstAddr(addr)| { discover.clone().oneshot(DiscoverAddr(addr.into())) })) @@ -271,7 +274,14 @@ impl Outbound { NSvc::Future: Send, { self.map_stack(|config, rt, inner| { - let detect_http = config.proxy.detect_http(); + let detect_params = http::DetectParams { + read_timeout: config.proxy.detect_protocol_timeout, + metrics: rt + .metrics + .prom + .http_detect + .metrics(ParentRef(policy::Meta::new_default("ingress"))), + }; // Route requests with destinations that can be discovered via the // `l5d-dst-override` header through the (load balanced) logical @@ -289,10 +299,12 @@ impl Outbound { .push_on_service( svc::layers() .push(http::BoxRequest::layer()) - .push(http::strip_header::request::layer(DST_OVERRIDE_HEADER)) + .push(http::strip_header::request::layer(DST_OVERRIDE_HEADER)), ) .lift_new() - .push(svc::NewOneshotRoute::layer_via(|t: &Http| SelectTarget(t.clone()))) + .push(svc::NewOneshotRoute::layer_via(|t: &Http| { + SelectTarget(t.clone()) + })) .check_new_service::, http::Request<_>>(); // HTTP detection is **always** performed. If detection fails, then we @@ -304,26 +316,29 @@ impl Outbound { let h2 = config.proxy.server.http2.clone(); let drain = rt.drain.clone(); move |http: &Http| http::ServerParams { - version: http.version, - http2: h2.clone(), - drain: drain.clone() + version: http.version, + http2: h2.clone(), + drain: drain.clone(), } })) .check_new_service::, I>() .push_switch( - |(detected, target): (detect::Result, T)| -> Result<_, Infallible> { - if let Some(version) = detect::allow_timeout(detected) { - return Ok(svc::Either::A(Http { - version, - parent: target, - })); + |(detected, parent): (http::Detection, T)| -> Result<_, Infallible> { + match detected { + http::Detection::Http(version) => { + return Ok(svc::Either::Left(Http { version, parent })); + } + http::Detection::ReadTimeout(timeout) => { + tracing::info!("Continuing after timeout: {timeout:?}"); + } + _ => {} } - Ok(svc::Either::B(target)) + Ok(svc::Either::Right(parent)) }, fallback, ) .lift_new_with_target() - .push(detect::NewDetectService::layer(detect_http)) + .push(http::NewDetect::layer(svc::CloneParam::from(detect_params))) .arc_new_tcp() }) } @@ -351,8 +366,8 @@ where // Use the request's version. let version = match req.version() { - ::http::Version::HTTP_2 => http::Version::H2, - ::http::Version::HTTP_10 | ::http::Version::HTTP_11 => http::Version::Http1, + ::http::Version::HTTP_2 => http::Variant::H2, + ::http::Version::HTTP_10 | ::http::Version::HTTP_11 => http::Variant::Http1, _ => unreachable!("Only HTTP/1 and HTTP/2 are supported"), }; @@ -365,8 +380,8 @@ where // === impl Http === -impl svc::Param for Http { - fn param(&self) -> http::Version { +impl svc::Param for Http { + fn param(&self) -> http::Variant { self.version } } @@ -517,7 +532,7 @@ fn mk_profile_routes( fn policy_routes( addr: Addr, - version: http::Version, + version: http::Variant, policy: &policy::ClientPolicy, ) -> Option { let meta = ParentRef(policy.parent.clone()); @@ -528,8 +543,8 @@ fn policy_routes( .. } => { let (routes, failure_accrual) = match version { - http::Version::Http1 => (http1.routes.clone(), http1.failure_accrual), - http::Version::H2 => (http2.routes.clone(), http2.failure_accrual), + http::Variant::Http1 => (http1.routes.clone(), http1.failure_accrual), + http::Variant::H2 => (http2.routes.clone(), http2.failure_accrual), }; Some(http::Routes::Policy(http::policy::Params::Http( http::policy::HttpParams { @@ -600,42 +615,41 @@ impl std::hash::Hash for Logical { } } -// === impl Opaq === +impl From> for Opaq +where + T: svc::Param, +{ + fn from(discovery: Discovery) -> Self { + use svc::Param; -impl std::ops::Deref for Opaq { - type Target = T; + let orig_dst: OrigDstAddr = discovery.param(); + let routes = opaq::routes_from_discovery( + Addr::Socket(orig_dst.into()), + discovery.param(), + discovery.param(), + ); - fn deref(&self) -> &Self::Target { - &self.0 + Self { routes, orig_dst } } } -impl svc::Param> for Opaq -where - T: svc::Param, -{ - fn param(&self) -> Remote { - let OrigDstAddr(addr) = (*self.0).param(); - Remote(ServerAddr(addr)) +impl PartialEq for Opaq { + fn eq(&self, other: &Self) -> bool { + self.orig_dst == other.orig_dst } } -impl svc::Param for Opaq -where - T: svc::Param, -{ - fn param(&self) -> opaq::Logical { - if let Some(profile) = svc::Param::>::param(&self.0) { - if let Some(profiles::LogicalAddr(addr)) = profile.logical_addr() { - return opaq::Logical::Route(addr, profile); - } +impl Eq for Opaq {} - if let Some((addr, metadata)) = profile.endpoint() { - return opaq::Logical::Forward(Remote(ServerAddr(addr)), metadata); - } - } +impl std::hash::Hash for Opaq { + fn hash(&self, state: &mut H) { + self.orig_dst.hash(state); + } +} - opaq::Logical::Forward(self.param(), Default::default()) +impl svc::Param> for Opaq { + fn param(&self) -> watch::Receiver { + self.routes.clone() } } diff --git a/linkerd/app/outbound/src/lib.rs b/linkerd/app/outbound/src/lib.rs index 75ee6c70d5..17405e2bb0 100644 --- a/linkerd/app/outbound/src/lib.rs +++ b/linkerd/app/outbound/src/lib.rs @@ -10,7 +10,7 @@ use linkerd_app_core::{ config::{ProxyConfig, QueueConfig}, drain, exp_backoff::ExponentialBackoff, - http_tracing::OpenCensusSink, + http_tracing::SpanSink, identity, io, metrics::prom, profiles, @@ -21,9 +21,9 @@ use linkerd_app_core::{ tap, }, svc::{self, ServiceExt}, - tls, + tls::ConnectMeta as TlsConnectMeta, transport::addrs::*, - AddrMatch, Error, ProxyRuntime, + AddrMatch, Error, NameAddr, ProxyRuntime, }; use linkerd_tonic_stream::ReceiveLimits; use std::{ @@ -46,6 +46,8 @@ mod sidecar; pub mod tcp; #[cfg(any(test, feature = "test-util"))] pub mod test_util; +pub mod tls; +mod zone; pub use self::discover::{spawn_synthesized_profile_policy, synthesize_forward_policy, Discovery}; use self::metrics::OutboundMetrics; @@ -95,11 +97,11 @@ struct Runtime { metrics: OutboundMetrics, identity: identity::NewClient, tap: tap::Registry, - span_sink: OpenCensusSink, + span_sink: Option, drain: drain::Watch, } -pub type ConnectMeta = tls::ConnectMeta>; +pub type ConnectMeta = TlsConnectMeta>; /// A reference to a frontend/apex resource, usually a service. #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -141,20 +143,27 @@ impl Outbound<()> { client: C, backoff: ExponentialBackoff, limits: ReceiveLimits, + export_hostname_labels: bool, ) -> impl policy::GetPolicy where C: tonic::client::GrpcService, C: Clone + Unpin + Send + Sync + 'static, - C::ResponseBody: proxy::http::HttpBody, - C::ResponseBody: Default + Send + 'static, + C::ResponseBody: proxy::http::Body, + C::ResponseBody: Send + 'static, C::Future: Send, { - policy::Api::new(workload, limits, Duration::from_secs(10), client) - .into_watch(backoff) - .map_result(|response| match response { - Err(e) => Err(e.into()), - Ok(rsp) => Ok(rsp.into_inner()), - }) + policy::Api::new( + workload, + limits, + Duration::from_secs(10), + export_hostname_labels, + client, + ) + .into_watch(backoff) + .map_result(|res| match res { + Err(e) => Err(e.into()), + Ok(rsp) => Ok(rsp.into_inner()), + }) } #[cfg(any(test, feature = "test-util"))] @@ -340,3 +349,23 @@ impl EndpointRef { static UNKNOWN_META: once_cell::sync::Lazy> = once_cell::sync::Lazy::new(|| policy::Meta::new_default("unknown")); + +pub(crate) fn service_meta(addr: &NameAddr) -> Option> { + let mut parts = addr.name().split('.'); + + let name = parts.next()?; + let namespace = parts.next()?; + + if !parts.next()?.eq_ignore_ascii_case("svc") { + return None; + } + + Some(Arc::new(policy::Meta::Resource { + group: "core".to_string(), + kind: "Service".to_string(), + namespace: namespace.to_string(), + name: name.to_string(), + section: None, + port: Some(addr.port().try_into().ok()?), + })) +} diff --git a/linkerd/app/outbound/src/metrics.rs b/linkerd/app/outbound/src/metrics.rs index 7a7fef5a47..8f1f7a2041 100644 --- a/linkerd/app/outbound/src/metrics.rs +++ b/linkerd/app/outbound/src/metrics.rs @@ -16,6 +16,7 @@ use linkerd_app_core::{ use std::fmt::Write; pub(crate) mod error; +pub(crate) mod transport; pub use linkerd_app_core::{metrics::*, proxy::balance}; /// Holds LEGACY outbound proxy metrics. @@ -35,8 +36,12 @@ pub struct OutboundMetrics { #[derive(Clone, Debug, Default)] pub(crate) struct PromMetrics { + pub(crate) protocol: crate::protocol::MetricsFamilies, + pub(crate) http_detect: crate::http::DetectMetricsFamilies, pub(crate) http: crate::http::HttpMetrics, pub(crate) opaq: crate::opaq::OpaqMetrics, + pub(crate) tls: crate::tls::TlsMetrics, + pub(crate) zone: crate::zone::TcpZoneMetrics, } #[derive(Clone, Debug, Hash, PartialEq, Eq)] @@ -47,7 +52,7 @@ pub struct BalancerMetricsParams(balance::MetricFamilies); struct ScopedKey<'a, 'b>(&'a str, &'b str); -// === impl BalancerMetricsPaarams === +// === impl BalancerMetricsParams === impl BalancerMetricsParams where @@ -80,17 +85,35 @@ where Self(balance::MetricFamilies::default()) } } + // === impl PromMetrics === impl PromMetrics { pub fn register(registry: &mut prom::Registry) -> Self { + let protocol = crate::protocol::MetricsFamilies::register( + registry.sub_registry_with_prefix("tcp_protocol"), + ); + let http_detect = crate::http::DetectMetricsFamilies::register( + // Scoped consistently with the inbound metrics. + registry.sub_registry_with_prefix("tcp_detect_http"), + ); + // NOTE: HTTP metrics are scoped internally, since this configures both // HTTP and gRPC scopes. let http = crate::http::HttpMetrics::register(registry); let opaq = crate::opaq::OpaqMetrics::register(registry.sub_registry_with_prefix("tcp")); + let zone = crate::zone::TcpZoneMetrics::register(registry.sub_registry_with_prefix("tcp")); + let tls = crate::tls::TlsMetrics::register(registry.sub_registry_with_prefix("tls")); - Self { http, opaq } + Self { + protocol, + http_detect, + http, + opaq, + tls, + zone, + } } } diff --git a/linkerd/app/outbound/src/metrics/transport.rs b/linkerd/app/outbound/src/metrics/transport.rs new file mode 100644 index 0000000000..c2a2c6889e --- /dev/null +++ b/linkerd/app/outbound/src/metrics/transport.rs @@ -0,0 +1,263 @@ +use crate::{opaq, tls}; +use linkerd_app_core::{ + io, + metrics::prom::{self, encoding::*, registry::Registry, EncodeLabelSetMut, Family}, + svc::{layer, NewService, Param, Service}, + Error, +}; +use std::{fmt::Debug, hash::Hash}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +#[derive(Clone, Debug)] +pub(crate) struct TransportRouteMetricsFamily { + open: Family, + close: Family, prom::Counter>, +} + +#[derive(Clone, Debug)] +struct TransportRouteMetrics { + open: prom::Counter, + close_no_err: prom::Counter, + close_forbidden: prom::Counter, + close_invalid_backend: prom::Counter, + close_invalid_policy: prom::Counter, + close_unexpected: prom::Counter, +} + +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +enum ErrorKind { + Forbidden, + InvalidBackend, + InvalidPolicy, + Unexpected, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +struct ConnectionsClosedLabels { + labels: L, + error: Option, +} + +#[derive(Clone, Debug)] +pub(crate) struct NewTransportRouteMetrics { + inner: N, + family: TransportRouteMetricsFamily, +} + +#[derive(Clone, Debug)] +pub(crate) struct TransportRouteMetricsService { + inner: S, + metrics: TransportRouteMetrics, +} +// === impl TransportRouteMetricsFamily === + +impl Default for TransportRouteMetricsFamily +where + L: Clone + Hash + Eq, +{ + fn default() -> Self { + Self { + open: prom::Family::default(), + close: prom::Family::default(), + } + } +} + +impl TransportRouteMetricsFamily +where + L: Clone + Hash + Eq + EncodeLabelSetMut + Debug + Send + Sync + 'static, +{ + pub(crate) fn register(registry: &mut Registry) -> Self { + let open = prom::Family::::default(); + registry.register("open", "The number of connections opened", open.clone()); + + let close = prom::Family::, prom::Counter>::default(); + registry.register("close", "The number of connections closed", close.clone()); + + Self { open, close } + } + + fn closed_counter(&self, labels: &L, error: Option) -> prom::Counter { + self.close + .get_or_create(&ConnectionsClosedLabels { + labels: labels.clone(), + error, + }) + .clone() + } + + fn metrics(&self, labels: L) -> TransportRouteMetrics { + TransportRouteMetrics { + open: self.open.get_or_create(&labels).clone(), + close_no_err: self.closed_counter(&labels, None), + close_forbidden: self.closed_counter(&labels, Some(ErrorKind::Forbidden)), + close_invalid_backend: self.closed_counter(&labels, Some(ErrorKind::InvalidBackend)), + close_invalid_policy: self.closed_counter(&labels, Some(ErrorKind::InvalidPolicy)), + close_unexpected: self.closed_counter(&labels, Some(ErrorKind::Unexpected)), + } + } +} + +impl ErrorKind { + fn mk(err: &(dyn std::error::Error + 'static)) -> Self { + if err.is::() { + ErrorKind::Forbidden + } else if err.is::() { + ErrorKind::InvalidBackend + } else if err.is::() { + ErrorKind::InvalidPolicy + } else if err.is::() { + ErrorKind::Forbidden + } else if err.is::() { + ErrorKind::InvalidBackend + } else if err.is::() { + ErrorKind::InvalidPolicy + } else if let Some(e) = err.source() { + Self::mk(e) + } else { + ErrorKind::Unexpected + } + } +} + +impl std::fmt::Display for ErrorKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Forbidden => write!(f, "forbidden"), + Self::InvalidBackend => write!(f, "invalid_backend"), + Self::InvalidPolicy => write!(f, "invalid_policy"), + Self::Unexpected => write!(f, "unexpected"), + } + } +} + +// === impl ConnectionsClosedLabels === + +impl EncodeLabelSetMut for ConnectionsClosedLabels +where + L: Clone + Hash + Eq + EncodeLabelSetMut + Debug + Send + Sync + 'static, +{ + fn encode_label_set(&self, enc: &mut LabelSetEncoder<'_>) -> std::fmt::Result { + self.labels.encode_label_set(enc)?; + match self.error { + Some(error) => ("error", error.to_string()).encode(enc.encode_label())?, + None => ("error", "").encode(enc.encode_label())?, + } + + Ok(()) + } +} + +impl EncodeLabelSet for ConnectionsClosedLabels +where + L: Clone + Hash + Eq + EncodeLabelSetMut + Debug + Send + Sync + 'static, +{ + fn encode(&self, mut enc: LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +// === impl NewTransportRouteMetrics === + +impl NewTransportRouteMetrics { + pub fn layer( + family: TransportRouteMetricsFamily, + ) -> impl layer::Layer + Clone { + layer::mk(move |inner| Self { + inner, + family: family.clone(), + }) + } +} + +impl NewService for NewTransportRouteMetrics +where + N: NewService, + L: Clone + Hash + Eq + EncodeLabelSetMut + Debug + Send + Sync + 'static, + T: Param + Clone, +{ + type Service = TransportRouteMetricsService; + + fn new_service(&self, target: T) -> Self::Service { + let labels: L = target.param(); + let metrics = self.family.metrics(labels); + let svc = self.inner.new_service(target); + TransportRouteMetricsService::new(svc, metrics) + } +} + +// === impl TransportRouteMetricsService === + +impl TransportRouteMetricsService { + fn new(inner: S, metrics: TransportRouteMetrics) -> Self { + Self { inner, metrics } + } +} + +impl Service for TransportRouteMetricsService +where + I: io::AsyncRead + io::AsyncWrite + Send + 'static, + S: Service + Send + Clone + 'static, + S::Error: Into, + S::Future: Send, +{ + type Response = S::Response; + type Error = Error; + type Future = Pin> + Send + 'static>>; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, io: I) -> Self::Future { + let metrics = self.metrics.clone(); + + self.metrics.inc_open(); + let call = self.inner.call(io); + + Box::pin(async move { + match call.await.map_err(Into::into) { + Ok(result) => { + metrics.inc_closed(None); + Ok(result) + } + Err(error) => { + metrics.inc_closed(Some(&*error)); + Err(error) + } + } + }) + } +} + +// === impl TransportRouteMetrics === + +impl TransportRouteMetrics { + fn inc_open(&self) { + self.open.inc(); + } + fn inc_closed(&self, err: Option<&(dyn std::error::Error + 'static)>) { + match err.map(ErrorKind::mk) { + Some(ErrorKind::Forbidden) => { + self.close_forbidden.inc(); + } + Some(ErrorKind::InvalidBackend) => { + self.close_invalid_backend.inc(); + } + Some(ErrorKind::InvalidPolicy) => { + self.close_invalid_policy.inc(); + } + Some(ErrorKind::Unexpected) => { + self.close_unexpected.inc(); + } + None => { + self.close_no_err.inc(); + } + } + } +} diff --git a/linkerd/app/outbound/src/opaq.rs b/linkerd/app/outbound/src/opaq.rs index a1a402fd7c..c126aa2d2d 100644 --- a/linkerd/app/outbound/src/opaq.rs +++ b/linkerd/app/outbound/src/opaq.rs @@ -1,4 +1,4 @@ -use crate::{tcp, Outbound}; +use crate::{policy, service_meta, tcp, Outbound, ParentRef, UNKNOWN_META}; use linkerd_app_core::{ io, metrics::prom, @@ -9,21 +9,24 @@ use linkerd_app_core::{ }, svc, transport::addrs::*, - Error, + Addr, Error, NameAddr, }; -use std::{fmt::Debug, hash::Hash}; +use once_cell::sync::Lazy; +use std::{fmt::Debug, hash::Hash, sync::Arc}; +use tokio::sync::watch; mod concrete; mod logical; -pub use self::logical::Logical; +pub use self::logical::{route::filters::errors::*, Concrete, Logical, Routes}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct Opaq(Logical); +struct Opaq(T); #[derive(Clone, Debug, Default)] pub struct OpaqMetrics { balance: concrete::BalancerMetrics, + route: logical::route::TcpRouteMetrics, } // === impl Outbound === @@ -36,8 +39,8 @@ impl Outbound { pub fn push_opaq_cached(self, resolve: R) -> Outbound> where // Opaque target - T: svc::Param, - T: Clone + Send + Sync + 'static, + T: Clone + Debug + PartialEq + Eq + Hash + Send + Sync + 'static, + T: svc::Param>, // Server-side connection I: io::AsyncRead + io::AsyncWrite + io::PeerAddr, I: Debug + Send + Sync + Unpin + 'static, @@ -57,7 +60,7 @@ impl Outbound { stk.push_new_idle_cached(config.discovery_idle_timeout) // Use a dedicated target type to configure parameters for // the opaque stack. It also helps narrow the cache key. - .push_map_target(|t: T| Opaq(t.param())) + .push_map_target(Opaq) .arc_new_clone_tcp() }) } @@ -65,18 +68,12 @@ impl Outbound { // === impl Opaq === -impl svc::Param for Opaq { - fn param(&self) -> Logical { - self.0.clone() - } -} - -impl svc::Param> for Opaq { - fn param(&self) -> Option { - match self.0.param() { - Logical::Route(_, rx) => Some(rx), - _ => None, - } +impl svc::Param> for Opaq +where + T: svc::Param>, +{ + fn param(&self) -> watch::Receiver { + self.0.param() } } @@ -86,6 +83,176 @@ impl OpaqMetrics { pub fn register(registry: &mut prom::Registry) -> Self { let balance = concrete::BalancerMetrics::register(registry.sub_registry_with_prefix("balancer")); - Self { balance } + let route = + logical::route::TcpRouteMetrics::register(registry.sub_registry_with_prefix("route")); + Self { balance, route } } } + +/// Given both profiles and policy information, this function constructs `opaq::Routes``. +/// The decision on whether profiles or policy should be used is made by inspecting the +/// returned profiles and checking whether there are any targets defined. This is done +/// in order to support traffic splits. Everything else should be delivered through client +/// policy. +pub fn routes_from_discovery( + addr: Addr, + profile: Option, + mut policy: policy::Receiver, +) -> watch::Receiver { + let profile_addr = if let Some(mut rx) = profile.map(watch::Receiver::from) { + let profile = rx.borrow_and_update(); + if let Some(profiles::LogicalAddr(addr)) = profile.addr.clone() { + if profile.has_targets() { + tracing::debug!(%addr, "Using ServiceProfile"); + let init = routes_from_profile(addr.clone(), &profile); + drop(profile); + return spawn_routes(rx, init, move |profile: &profiles::Profile| { + Some(routes_from_profile(addr.clone(), profile)) + }); + } + } + + profile + .addr + .clone() + .map(|profiles::LogicalAddr(pa)| Addr::Name(pa)) + } else { + None + }; + + // Prefer a named address if the given address is a socket address. + let addr = match addr { + Addr::Name(na) => Addr::Name(na), + addr => profile_addr.unwrap_or(addr), + }; + + tracing::debug!(%addr, "Using ClientPolicy routes"); + let init = routes_from_policy(addr.clone(), &policy.borrow_and_update()) + .expect("initial policy must be opaque"); + + spawn_routes(policy, init, move |policy: &policy::ClientPolicy| { + routes_from_policy(addr.clone(), policy) + }) +} + +fn routes_from_policy(addr: Addr, policy: &policy::ClientPolicy) -> Option { + let routes = match policy.protocol { + policy::Protocol::Opaque(policy::opaq::Opaque { ref routes }) => routes.clone(), + // we support a detect stack to cover the case when we do detection and fallback to opaq + policy::Protocol::Detect { ref opaque, .. } => opaque.routes.clone(), + _ => { + tracing::info!("Ignoring a discovery update that changed a route from opaq"); + return None; + } + }; + + Some(Routes { + logical: Logical { + addr, + meta: ParentRef(policy.parent.clone()), + }, + routes, + backends: policy.backends.clone(), + }) +} + +fn routes_from_profile(addr: NameAddr, profile: &profiles::Profile) -> Routes { + // TODO: make configurable + let queue = { + policy::Queue { + capacity: 100, + failfast_timeout: std::time::Duration::from_secs(3), + } + }; + + const EWMA: policy::Load = policy::Load::PeakEwma(policy::PeakEwma { + default_rtt: std::time::Duration::from_millis(30), + decay: std::time::Duration::from_secs(10), + }); + + // TODO(ver) use resource metadata from the profile response. + let parent_meta = service_meta(&addr).unwrap_or_else(|| UNKNOWN_META.clone()); + + let backends: Vec<(policy::RouteBackend, u32)> = profile + .targets + .iter() + .map(|target| { + // TODO(ver) use resource metadata from the profile response. + let backend_meta = service_meta(&target.addr).unwrap_or_else(|| UNKNOWN_META.clone()); + let backend = policy::RouteBackend { + backend: policy::Backend { + meta: backend_meta, + queue, + dispatcher: policy::BackendDispatcher::BalanceP2c( + EWMA, + policy::EndpointDiscovery::DestinationGet { + path: target.addr.to_string(), + }, + ), + }, + filters: std::sync::Arc::new([]), + }; + + (backend, target.weight) + }) + .collect(); + + let distribution = policy::RouteDistribution::RandomAvailable(backends.clone().into()); + + static ROUTE_META: Lazy> = + Lazy::new(|| policy::Meta::new_default("serviceprofile")); + let route = policy::opaq::Route { + policy: policy::opaq::Policy { + // TODO(ver) use resource metadata from the profile response. + meta: ROUTE_META.clone(), + params: (), + filters: std::sync::Arc::new([]), + distribution, + }, + }; + + Routes { + logical: Logical { + addr: addr.into(), + meta: ParentRef(parent_meta), + }, + backends: backends.into_iter().map(|(b, _)| b.backend).collect(), + routes: Some(route), + } +} + +fn spawn_routes( + mut route_rx: watch::Receiver, + init: Routes, + mut mk: impl FnMut(&T) -> Option + Send + Sync + 'static, +) -> watch::Receiver +where + T: Send + Sync + 'static, +{ + let (tx, rx) = watch::channel(init); + + tokio::spawn(async move { + loop { + let res = tokio::select! { + biased; + _ = tx.closed() => return, + res = route_rx.changed() => res, + }; + + if res.is_err() { + // Drop the `tx` sender when the profile sender is + // dropped. + return; + } + + if let Some(routes) = (mk)(&*route_rx.borrow_and_update()) { + if tx.send(routes).is_err() { + // Drop the `tx` sender when all of its receivers are dropped. + return; + } + } + } + }); + + rx +} diff --git a/linkerd/app/outbound/src/opaq/concrete.rs b/linkerd/app/outbound/src/opaq/concrete.rs index 1c01fd7f30..b0f43806a7 100644 --- a/linkerd/app/outbound/src/opaq/concrete.rs +++ b/linkerd/app/outbound/src/opaq/concrete.rs @@ -1,12 +1,18 @@ -use crate::{metrics::BalancerMetricsParams, stack_labels, BackendRef, Outbound, ParentRef}; +use super::Logical; +use crate::{ + metrics::BalancerMetricsParams, + stack_labels, + zone::{tcp_zone_labels, TcpZoneLabels}, + BackendRef, Outbound, ParentRef, +}; use linkerd_app_core::{ config::QueueConfig, drain, io, metrics::{ self, prom::{self, EncodeLabelSetMut}, + OutboundZoneLocality, }, - profiles, proxy::{ api_resolve::{ConcreteAddr, Metadata}, core::Resolve, @@ -25,10 +31,18 @@ use tracing::info_span; /// Parameter configuring dispatcher behavior. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum Dispatch { - Balance(NameAddr, NameAddr, balance::EwmaConfig), + Balance(NameAddr, balance::EwmaConfig), Forward(Remote, Metadata), + /// A backend dispatcher that explicitly fails all requests. + Fail { + message: Arc, + }, } +#[derive(Debug, thiserror::Error)] +#[error("{0}")] +pub struct DispatcherFailed(Arc); + /// Wraps errors encountered in this module. #[derive(Debug, thiserror::Error)] #[error("concrete service {addr}: {source}")] @@ -52,15 +66,17 @@ pub type BalancerMetrics = BalancerMetricsParams; /// A target configuring a load balancer stack. #[derive(Clone, Debug, PartialEq, Eq)] struct Balance { - logical: NameAddr, - concrete: NameAddr, + addr: NameAddr, ewma: balance::EwmaConfig, queue: QueueConfig, parent: T, } +// TODO: Use crate::metrics::ConcreteLabels once we do not need the logical and concrete labels anymore #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct ConcreteLabels { + parent: ParentRef, + backend: BackendRef, logical: Arc, concrete: Arc, } @@ -68,7 +84,8 @@ pub struct ConcreteLabels { impl prom::EncodeLabelSetMut for ConcreteLabels { fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { use prom::encoding::EncodeLabel; - + self.parent.encode_label_set(enc)?; + self.backend.encode_label_set(enc)?; ("logical", &*self.logical).encode(enc.encode_label())?; ("concrete", &*self.concrete).encode(enc.encode_label())?; Ok(()) @@ -81,11 +98,18 @@ impl prom::encoding::EncodeLabelSet for ConcreteLabels { } } -impl svc::ExtractParam> for BalancerMetricsParams { +impl svc::ExtractParam> for BalancerMetricsParams +where + T: svc::Param, + T: svc::Param, +{ fn extract_param(&self, bal: &Balance) -> balance::Metrics { + let Logical { addr, meta: parent } = bal.parent.param(); self.metrics(&ConcreteLabels { - logical: bal.logical.to_string().into(), - concrete: bal.concrete.to_string().into(), + parent, + logical: addr.to_string().into(), + backend: bal.parent.param(), + concrete: bal.addr.to_string().into(), }) } } @@ -112,9 +136,12 @@ impl Outbound { >, > where - // Logical target.c + // Logical target + T: svc::Param, T: svc::Param, T: Clone + Debug + Send + Sync + 'static, + T: svc::Param, + T: svc::Param, // Server-side socket. I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, // Endpoint resolution. @@ -128,7 +155,7 @@ impl Outbound { C: Send + Sync + 'static, { let resolve = - svc::MapTargetLayer::new(|t: Balance| -> ConcreteAddr { ConcreteAddr(t.concrete) }) + svc::MapTargetLayer::new(|t: Balance| -> ConcreteAddr { ConcreteAddr(t.addr) }) .layer(resolve.into_service()); self.map_stack(|config, rt, inner| { @@ -157,6 +184,10 @@ impl Outbound { ) .instrument(|e: &Endpoint| info_span!("endpoint", addr = %e.addr)); + let fail = svc::ArcNewService::new(|message: Arc| { + svc::mk(move |_| futures::future::ready(Err(DispatcherFailed(message.clone())))) + }); + let inbound_ips = config.inbound_ips.clone(); let balance = endpoint .push_map_target( @@ -183,28 +214,34 @@ impl Outbound { .stack .layer(stack_labels("opaq", "balance")), ) - .instrument(|t: &Balance| info_span!("balance", addr = %t.concrete)); + .instrument(|t: &Balance| info_span!("balance", addr = %t.addr)); balance + .push_switch(Ok::<_, Infallible>, forward.into_inner()) .push_switch( move |parent: T| -> Result<_, Infallible> { Ok(match parent.param() { - Dispatch::Balance(logical, concrete, ewma) => svc::Either::A(Balance { - logical, - concrete, - ewma, - queue, - parent, - }), - Dispatch::Forward(addr, meta) => svc::Either::B(Endpoint { - addr, - is_local: false, - metadata: meta, - parent, - }), + Dispatch::Balance(addr, ewma) => { + svc::Either::Left(svc::Either::Left(Balance { + addr, + ewma, + queue, + parent, + })) + } + + Dispatch::Forward(addr, meta) => { + svc::Either::Left(svc::Either::Right(Endpoint { + addr, + is_local: false, + metadata: meta, + parent, + })) + } + Dispatch::Fail { message } => svc::Either::Right(message), }) }, - forward.into_inner(), + svc::stack(fail).check_new_clone().into_inner(), ) .push_on_service(tcp::Forward::layer()) .push_on_service(drain::Retain::layer(rt.drain.clone())) @@ -218,7 +255,7 @@ impl Outbound { impl From<(&Balance, Error)> for ConcreteError { fn from((target, source): (&Balance, Error)) -> Self { Self { - addr: target.concrete.clone(), + addr: target.addr.clone(), source, } } @@ -303,7 +340,7 @@ impl svc::Param> for Endpoint { impl svc::Param for Endpoint where - T: svc::Param>, + T: svc::Param, { fn param(&self) -> transport::labels::Key { transport::labels::Key::OutboundClient(self.param()) @@ -312,26 +349,41 @@ where impl svc::Param for Endpoint where - T: svc::Param>, + T: svc::Param, { fn param(&self) -> metrics::OutboundEndpointLabels { let authority = self .parent .param() - .as_ref() - .map(|profiles::LogicalAddr(a)| a.as_http_authority()); + .addr + .name_addr() + .map(|a| a.as_http_authority()); + metrics::OutboundEndpointLabels { authority, labels: metrics::prefix_labels("dst", self.metadata.labels().iter()), + zone_locality: self.param(), server_id: self.param(), target_addr: self.addr.into(), } } } +impl svc::Param for Endpoint { + fn param(&self) -> OutboundZoneLocality { + OutboundZoneLocality::new(&self.metadata) + } +} + +impl svc::Param for Endpoint { + fn param(&self) -> TcpZoneLabels { + tcp_zone_labels(self.param()) + } +} + impl svc::Param for Endpoint where - T: svc::Param>, + T: svc::Param, { fn param(&self) -> metrics::EndpointLabels { metrics::EndpointLabels::from(svc::Param::::param(self)) diff --git a/linkerd/app/outbound/src/opaq/logical.rs b/linkerd/app/outbound/src/opaq/logical.rs index 83216d03db..15044ba27f 100644 --- a/linkerd/app/outbound/src/opaq/logical.rs +++ b/linkerd/app/outbound/src/opaq/logical.rs @@ -1,30 +1,35 @@ use super::concrete; -use crate::Outbound; -use linkerd_app_core::{ - io, - profiles::{self, Profile}, - proxy::{api_resolve::Metadata, tcp::balance}, - svc, - transport::addrs::*, - Error, Infallible, NameAddr, -}; -use linkerd_distribute as distribute; -use std::{fmt::Debug, hash::Hash, time}; +use crate::{BackendRef, Outbound, ParentRef}; +use linkerd_app_core::{io, svc, Addr, Error}; +use linkerd_proxy_client_policy as client_policy; +use std::{fmt::Debug, hash::Hash, sync::Arc}; use tokio::sync::watch; +pub mod route; +pub mod router; + #[cfg(test)] mod tests; -#[derive(Clone, Debug)] -pub enum Logical { - Route(NameAddr, profiles::Receiver), - Forward(Remote, Metadata), +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Logical { + pub meta: ParentRef, + pub addr: Addr, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Routes { + pub logical: Logical, + pub routes: Option, + pub backends: Arc<[client_policy::Backend]>, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Concrete { target: concrete::Dispatch, parent: T, + logical: Logical, + backend_ref: BackendRef, } #[derive(Debug, thiserror::Error)] @@ -32,39 +37,13 @@ pub struct Concrete { pub struct NoRoute; #[derive(Debug, thiserror::Error)] -#[error("logical service {addr}: {source}")] +#[error("logical service {0}: {source}", logical.addr)] pub struct LogicalError { - addr: NameAddr, + logical: Logical, #[source] source: Error, } -#[derive(Clone, Debug, PartialEq, Eq)] -struct Params { - parent: T, - route: RouteParams, - backends: distribute::Backends>, -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct RouteParams { - parent: T, - distribution: Distribution, -} - -type NewBackendCache = distribute::NewBackendCache, (), N, S>; -type NewDistribute = distribute::NewDistribute, (), N>; -type Distribution = distribute::Distribution>; - -#[derive(Clone, Debug)] -struct Routable { - parent: T, - addr: NameAddr, - profile: profiles::Receiver, -} - -// === impl Outbound === - impl Outbound { /// Builds a `NewService` that produces a router service for each logical /// target. @@ -77,7 +56,7 @@ impl Outbound { pub fn push_opaq_logical(self) -> Outbound> where // Opaque logical target. - T: svc::Param, + T: svc::Param>, T: Eq + Hash + Clone + Debug + Send + Sync + 'static, // Server-side socket. I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, @@ -87,173 +66,31 @@ impl Outbound { NSvc::Future: Send, NSvc::Error: Into, { - self.map_stack(|_, _, concrete| { - let route = svc::layers() - .lift_new() - .push(NewDistribute::layer()) - // The router does not take the backend's availability into - // consideration, so we must eagerly fail requests to prevent - // leaking tasks onto the runtime. - .push_on_service(svc::LoadShed::layer()); + self.map_stack(|_config, rt, concrete| { + let metrics = rt.metrics.prom.opaq.route.clone(); - // A `NewService`--instantiated once per logical target--that caches - // a set of concrete services so that, as the watch provides new - // `Params`, we can reuse inner services. - let router = svc::layers() - // Each `RouteParams` provides a `Distribution` that is used to - // choose a concrete service for a given route. - .lift_new() - .push(NewBackendCache::layer()) - // Lazily cache a service for each `RouteParams` - // returned from the `SelectRoute` impl. - .push_on_service(route) - .push(svc::NewOneshotRoute::, _, _>::layer_cached()); - - // For each `Routable` target, watch its `Profile`, maintaining a - // cache of all concrete services used by the router. concrete - .clone() - // Share the concrete stack with each router stack. .lift_new() - // Rebuild this router stack every time the profile changes. - .push_on_service(router) - .push(svc::NewSpawnWatch::::layer_into::>()) - .push(svc::NewMapErr::layer_from_target::()) - .push_switch( - |parent: T| -> Result<_, Infallible> { - Ok(match parent.param() { - Logical::Route(addr, profile) => svc::Either::A(Routable { - addr, - parent, - profile, - }), - Logical::Forward(addr, meta) => svc::Either::B(Concrete { - target: concrete::Dispatch::Forward(addr, meta), - parent, - }), - }) - }, - concrete.into_inner(), - ) + .push_on_service(router::Router::layer(metrics.clone())) + .push_on_service(svc::NewMapErr::layer_from_target::()) + // Rebuild the inner router stack every time the watch changes. + .push(svc::NewSpawnWatch::::layer_into::< + router::Router, + >()) .arc_new_clone_tcp() }) } } -// === impl Routable === - -impl svc::Param> for Routable { - fn param(&self) -> watch::Receiver { - self.profile.clone().into() - } -} - -// === impl Params === +// === impl LogicalError === -impl From<(Profile, Routable)> for Params +impl From<(&router::Router, Error)> for LogicalError where T: Eq + Hash + Clone + Debug, { - fn from((profile, routable): (Profile, Routable)) -> Self { - const EWMA: balance::EwmaConfig = balance::EwmaConfig { - default_rtt: time::Duration::from_millis(30), - decay: time::Duration::from_secs(10), - }; - - // Create concrete targets for all of the profile's routes. - let (backends, distribution) = if profile.targets.is_empty() { - let concrete = Concrete { - target: concrete::Dispatch::Balance(routable.addr.clone(), routable.addr, EWMA), - parent: routable.parent.clone(), - }; - let backends = std::iter::once(concrete.clone()).collect(); - let distribution = Distribution::first_available(std::iter::once(concrete)); - (backends, distribution) - } else { - let backends = profile - .targets - .iter() - .map(|t| Concrete { - target: concrete::Dispatch::Balance( - routable.addr.clone(), - t.addr.clone(), - EWMA, - ), - parent: routable.parent.clone(), - }) - .collect(); - let distribution = Distribution::random_available(profile.targets.iter().cloned().map( - |profiles::Target { addr, weight }| { - let concrete = Concrete { - target: concrete::Dispatch::Balance(routable.addr.clone(), addr, EWMA), - parent: routable.parent.clone(), - }; - (concrete, weight) - }, - )) - .expect("distribution must be valid"); - - (backends, distribution) - }; - - let route = RouteParams { - parent: routable.parent.clone(), - distribution, - }; - - Self { - parent: routable.parent, - backends, - route, - } - } -} - -impl svc::Param>> for Params -where - T: Clone + Eq + Hash + Debug, -{ - fn param(&self) -> distribute::Backends> { - self.backends.clone() - } -} - -impl svc::Param for Params -where - T: svc::Param, - T: Clone + Eq + Hash + Debug, -{ - fn param(&self) -> profiles::LogicalAddr { - self.parent.param() - } -} - -impl svc::router::SelectRoute for Params -where - T: Clone + Eq + Hash + Debug, -{ - type Key = RouteParams; - type Error = std::convert::Infallible; - - fn select(&self, _: &I) -> Result { - Ok(self.route.clone()) - } -} - -// === impl RouteParams === - -impl svc::Param> for RouteParams { - fn param(&self) -> Distribution { - self.distribution.clone() - } -} - -// === impl LogicalError === - -impl From<(&Routable, Error)> for LogicalError { - fn from((target, source): (&Routable, Error)) -> Self { + fn from((target, source): (&router::Router, Error)) -> Self { Self { - addr: target.addr.clone(), + logical: target.logical.clone(), source, } } @@ -261,55 +98,26 @@ impl From<(&Routable, Error)> for LogicalError { // === impl Concrete === -impl std::ops::Deref for Concrete { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.parent - } -} - -impl svc::Param> for Concrete -where - T: svc::Param>, -{ - fn param(&self) -> Option { - (**self).param()?.logical_addr() - } -} - impl svc::Param for Concrete { fn param(&self) -> concrete::Dispatch { self.target.clone() } } -// === impl Logical === - -impl std::cmp::PartialEq for Logical { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::Route(laddr, _), Self::Route(raddr, _)) => laddr == raddr, - (Self::Forward(laddr, lmeta), Self::Forward(raddr, rmeta)) => { - laddr == raddr && lmeta == rmeta - } - _ => false, - } +impl svc::Param for Concrete { + fn param(&self) -> Logical { + self.logical.clone() } } -impl std::cmp::Eq for Logical {} +impl svc::Param for Concrete { + fn param(&self) -> ParentRef { + self.logical.meta.clone() + } +} -impl std::hash::Hash for Logical { - fn hash(&self, state: &mut H) { - match self { - Self::Route(addr, _) => { - addr.hash(state); - } - Self::Forward(addr, meta) => { - addr.hash(state); - meta.hash(state); - } - } +impl svc::Param for Concrete { + fn param(&self) -> BackendRef { + self.backend_ref.clone() } } diff --git a/linkerd/app/outbound/src/opaq/logical/route.rs b/linkerd/app/outbound/src/opaq/logical/route.rs new file mode 100644 index 0000000000..2bf6d15690 --- /dev/null +++ b/linkerd/app/outbound/src/opaq/logical/route.rs @@ -0,0 +1,182 @@ +use super::{super::Concrete, Logical}; +use crate::{ + metrics::transport::{NewTransportRouteMetrics, TransportRouteMetricsFamily}, + ParentRef, RouteRef, +}; +use linkerd_app_core::{io, metrics::prom, svc, Addr, Error}; +use linkerd_distribute as distribute; +use linkerd_proxy_client_policy as policy; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + +pub(crate) mod filters; + +pub type TcpRouteMetrics = TransportRouteMetricsFamily; + +#[derive(Debug, PartialEq, Eq, Hash)] +pub(crate) struct Backend { + pub(crate) route_ref: RouteRef, + pub(crate) concrete: Concrete, + pub(super) filters: Arc<[policy::opaq::Filter]>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct MatchedRoute { + pub(super) params: Route, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct Route { + pub(super) parent: T, + pub(super) logical: Logical, + pub(super) route_ref: RouteRef, + pub(super) filters: Arc<[policy::opaq::Filter]>, + pub(super) distribution: BackendDistribution, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct RouteLabels { + parent: ParentRef, + route: RouteRef, + addr: Addr, +} + +pub(crate) type BackendDistribution = distribute::Distribution>; +pub(crate) type NewDistribute = distribute::NewDistribute, (), N>; + +/// Wraps errors with route metadata. +#[derive(Debug, thiserror::Error)] +#[error("route {}: {source}", route.0)] +struct RouteError { + route: RouteRef, + #[source] + source: Error, +} + +// === impl Backend === + +impl Clone for Backend { + fn clone(&self) -> Self { + Self { + route_ref: self.route_ref.clone(), + concrete: self.concrete.clone(), + filters: self.filters.clone(), + } + } +} + +// === impl MatchedRoute === + +impl MatchedRoute +where + // Parent target. + T: Debug + Eq + Hash, + T: Clone + Send + Sync + 'static, +{ + /// Builds a route stack that applies policy filters to requests and + /// distributes requests over each route's backends. These [`Concrete`] + /// backends are expected to be cached/shared by the inner stack. + pub(crate) fn layer( + metrics: TcpRouteMetrics, + ) -> impl svc::Layer> + Clone + where + I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, + // Inner stack. + N: svc::NewService, Service = NSvc> + Clone + Send + Sync + 'static, + NSvc: svc::Service + Clone + Send + Sync + 'static, + NSvc::Future: Send, + NSvc::Error: Into, + { + svc::layer::mk(move |inner| { + svc::stack(inner) + .push_map_target(|t| t) + .push_map_target(|b: Backend| b.concrete) + // apply backend filters + .push(filters::NewApplyFilters::layer()) + .lift_new() + .push(NewDistribute::layer()) + // The router does not take the backend's availability into + // consideration, so we must eagerly fail requests to prevent + // leaking tasks onto the runtime. + .push_on_service(svc::LoadShed::layer()) + // apply route level filters + .push(filters::NewApplyFilters::layer()) + .push(svc::NewMapErr::layer_with(|rt: &Self| { + let route = rt.params.route_ref.clone(); + move |source| RouteError { + route: route.clone(), + source, + } + })) + .push(NewTransportRouteMetrics::layer(metrics.clone())) + .arc_new_clone_tcp() + .into_inner() + }) + } +} + +impl svc::Param> for MatchedRoute { + fn param(&self) -> BackendDistribution { + self.params.distribution.clone() + } +} + +impl svc::Param> for MatchedRoute { + fn param(&self) -> Arc<[policy::opaq::Filter]> { + self.params.filters.clone() + } +} + +impl svc::Param> for Backend { + fn param(&self) -> Arc<[policy::opaq::Filter]> { + self.filters.clone() + } +} + +impl svc::Param for MatchedRoute +where + T: Eq + Hash + Clone + Debug, +{ + fn param(&self) -> RouteLabels { + RouteLabels { + route: self.params.route_ref.clone(), + parent: self.params.logical.meta.clone(), + addr: self.params.logical.addr.clone(), + } + } +} + +// === impl RouteLabels === + +impl prom::EncodeLabelSetMut for RouteLabels { + fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::encoding::*; + let Self { + parent, + route, + addr, + } = self; + + parent.encode_label_set(enc)?; + route.encode_label_set(enc)?; + + ( + "target_ip", + match addr { + Addr::Socket(ref a) => Some(a.ip().to_string()), + Addr::Name(_) => None, + }, + ) + .encode(enc.encode_label())?; + + ("target_port", addr.port()).encode(enc.encode_label())?; + + Ok(()) + } +} + +impl prom::encoding::EncodeLabelSet for RouteLabels { + fn encode(&self, mut enc: prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::EncodeLabelSetMut; + self.encode_label_set(&mut enc) + } +} diff --git a/linkerd/app/outbound/src/opaq/logical/route/filters.rs b/linkerd/app/outbound/src/opaq/logical/route/filters.rs new file mode 100644 index 0000000000..0cf4305b19 --- /dev/null +++ b/linkerd/app/outbound/src/opaq/logical/route/filters.rs @@ -0,0 +1,111 @@ +use futures::{future, TryFutureExt}; +use linkerd_app_core::{io, svc, Error}; +use linkerd_proxy_client_policy::opaq; +use std::{ + fmt::Debug, + sync::Arc, + task::{Context, Poll}, +}; + +#[derive(Clone, Debug)] +pub struct NewApplyFilters { + inner: N, +} + +#[derive(Clone, Debug)] +pub struct ApplyFilters { + inner: S, + filters: Arc<[opaq::Filter]>, +} + +// === impl NewApplyFilters === + +impl NewApplyFilters { + pub fn layer() -> impl svc::layer::Layer + Clone { + svc::layer::mk(move |inner| Self { inner }) + } +} + +impl svc::NewService for NewApplyFilters +where + N: svc::NewService, + T: svc::Param>, +{ + type Service = ApplyFilters; + + fn new_service(&self, target: T) -> Self::Service { + let filters: Arc<[opaq::Filter]> = target.param(); + let svc = self.inner.new_service(target); + ApplyFilters { + inner: svc, + filters, + } + } +} + +// === impl ApplyFilters === + +impl ApplyFilters { + fn apply_filters(&self) -> Result<(), Error> { + if let Some(filter) = self.filters.iter().next() { + match filter { + opaq::Filter::Forbidden => { + return Err(errors::TCPForbiddenRoute.into()); + } + + opaq::Filter::Invalid(message) => { + return Err(errors::TCPInvalidBackend(message.clone()).into()); + } + + opaq::Filter::InternalError(message) => { + return Err(errors::TCPInvalidPolicy(message).into()); + } + } + } + + Ok(()) + } +} + +impl svc::Service for ApplyFilters +where + I: io::AsyncRead + io::AsyncWrite + Send + 'static, + S: svc::Service + Send + Clone + 'static, + S::Error: Into, + S::Future: Send, +{ + type Response = S::Response; + type Error = Error; + type Future = future::Either< + future::ErrInto, + future::Ready>, + >; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, io: I) -> Self::Future { + if let Err(e) = self.apply_filters() { + return future::Either::Right(future::err(e)); + } + future::Either::Left(self.inner.call(io).err_into()) + } +} + +pub mod errors { + use super::*; + + #[derive(Debug, thiserror::Error)] + #[error("forbidden TCP route")] + pub struct TCPForbiddenRoute; + + #[derive(Debug, thiserror::Error)] + #[error("invalid TCP backend: {0}")] + pub struct TCPInvalidBackend(pub Arc); + + #[derive(Debug, thiserror::Error)] + #[error("invalid client policy: {0}")] + pub struct TCPInvalidPolicy(pub &'static str); +} diff --git a/linkerd/app/outbound/src/opaq/logical/router.rs b/linkerd/app/outbound/src/opaq/logical/router.rs new file mode 100644 index 0000000000..c9006dc579 --- /dev/null +++ b/linkerd/app/outbound/src/opaq/logical/router.rs @@ -0,0 +1,197 @@ +use super::{ + super::{concrete, Concrete}, + route, Logical, NoRoute, +}; +use crate::{BackendRef, EndpointRef, RouteRef, ServerAddr}; +use linkerd_app_core::{io, proxy::http, svc, transport::addrs::*, Error, NameAddr, Result}; +use linkerd_distribute as distribute; +use linkerd_opaq_route as opaq_route; +use linkerd_proxy_client_policy as policy; +use std::{fmt::Debug, hash::Hash}; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct Router { + pub(super) parent: T, + pub(super) logical: Logical, + pub(super) routes: Option>>, + pub(super) backends: distribute::Backends>, +} + +type NewBackendCache = distribute::NewBackendCache, (), N, S>; + +// === impl Router === +impl Router +where + // Parent target type. + T: Eq + Hash + Clone + Debug + Send + Sync + 'static, +{ + pub fn layer( + metrics: route::TcpRouteMetrics, + ) -> impl svc::Layer> + Clone + where + I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, + // Concrete stack. + N: svc::NewService, Service = NSvc> + Clone + Send + Sync + 'static, + NSvc: svc::Service + Clone + Send + Sync + 'static, + NSvc::Future: Send, + NSvc::Error: Into, + { + svc::layer::mk(move |inner| { + svc::stack(inner) + .lift_new() + // Each route builds over concrete backends. All of these + // backends are cached here and shared across routes. + .push(NewBackendCache::layer()) + .push_on_service(route::MatchedRoute::layer(metrics.clone())) + .push(svc::NewOneshotRoute::::layer_cached()) + .arc_new_clone_tcp() + .into_inner() + }) + } +} + +impl From<(crate::opaq::Routes, T)> for Router +where + T: Eq + Hash + Clone + Debug, +{ + fn from((rts, parent): (crate::opaq::Routes, T)) -> Self { + let crate::opaq::Routes { + logical, + routes, + backends, + } = rts; + + let mk_concrete = { + let parent = parent.clone(); + let logical = logical.clone(); + + move |backend_ref: BackendRef, target: concrete::Dispatch| Concrete { + target, + parent: parent.clone(), + backend_ref, + logical: logical.clone(), + } + }; + + let mk_dispatch = move |bke: &policy::Backend| match bke.dispatcher { + policy::BackendDispatcher::BalanceP2c( + policy::Load::PeakEwma(policy::PeakEwma { decay, default_rtt }), + policy::EndpointDiscovery::DestinationGet { ref path }, + ) => mk_concrete( + BackendRef(bke.meta.clone()), + concrete::Dispatch::Balance( + path.parse::() + .expect("destination must be a nameaddr"), + http::balance::EwmaConfig { decay, default_rtt }, + ), + ), + policy::BackendDispatcher::Forward(addr, ref md) => mk_concrete( + EndpointRef::new(md, addr.port().try_into().expect("port must not be 0")).into(), + concrete::Dispatch::Forward(Remote(ServerAddr(addr)), md.clone()), + ), + policy::BackendDispatcher::Fail { ref message } => mk_concrete( + BackendRef(policy::Meta::new_default("fail")), + concrete::Dispatch::Fail { + message: message.clone(), + }, + ), + }; + + let mk_route_backend = + |route_ref: &RouteRef, rb: &policy::RouteBackend| { + let concrete = mk_dispatch(&rb.backend); + route::Backend { + route_ref: route_ref.clone(), + filters: rb.filters.clone(), + concrete, + } + }; + + let mk_distribution = + |rr: &RouteRef, d: &policy::RouteDistribution| match d { + policy::RouteDistribution::Empty => route::BackendDistribution::Empty, + policy::RouteDistribution::FirstAvailable(backends) => { + route::BackendDistribution::first_available( + backends.iter().map(|b| mk_route_backend(rr, b)), + ) + } + policy::RouteDistribution::RandomAvailable(backends) => { + route::BackendDistribution::random_available( + backends + .iter() + .map(|(rb, weight)| (mk_route_backend(rr, rb), *weight)), + ) + .expect("distribution must be valid") + } + }; + + let mk_policy = |policy::RoutePolicy:: { + meta, + distribution, + filters, + .. + }| { + let route_ref = RouteRef(meta); + let logical = logical.clone(); + + let distribution = mk_distribution(&route_ref, &distribution); + route::Route { + logical, + parent: parent.clone(), + route_ref, + filters, + distribution, + } + }; + + let routes = routes.as_ref().map(|route| opaq_route::Route { + policy: mk_policy(route.policy.clone()), + }); + + let backends = backends.iter().map(mk_dispatch).collect(); + + Self { + routes, + backends, + parent, + logical, + } + } +} + +impl svc::router::SelectRoute for Router +where + T: Clone + Eq + Hash + Debug, +{ + type Key = route::MatchedRoute; + type Error = NoRoute; + + fn select(&self, _: &I) -> Result { + tracing::trace!("Selecting Opaq route"); + let Some(ref route) = self.routes else { + return Err(NoRoute); + }; + let params = route.policy.clone(); + tracing::debug!(meta = ?params.route_ref, "Selected route"); + + Ok(route::MatchedRoute { params }) + } +} + +impl svc::Param for Router +where + T: Eq + Hash + Clone + Debug, +{ + fn param(&self) -> Logical { + self.logical.clone() + } +} + +impl svc::Param>> for Router +where + T: Eq + Hash + Clone + Debug, +{ + fn param(&self) -> distribute::Backends> { + self.backends.clone() + } +} diff --git a/linkerd/app/outbound/src/opaq/logical/tests.rs b/linkerd/app/outbound/src/opaq/logical/tests.rs index 57be41efef..ca4d6e1bf7 100644 --- a/linkerd/app/outbound/src/opaq/logical/tests.rs +++ b/linkerd/app/outbound/src/opaq/logical/tests.rs @@ -1,13 +1,23 @@ -use super::*; +use super::{client_policy as policy, *}; +use crate::opaq::{self, policy::Receiver as PolicyReceiver}; use crate::test_util::*; use io::AsyncWriteExt; use linkerd_app_core::{ errors::{self, FailFastError}, io::AsyncReadExt, + profiles, svc::{NewService, ServiceExt}, + transport::{ClientAddr, Local, Remote, ServerAddr}, + Addr, NameAddr, }; use std::net::SocketAddr; -use tokio::time; +use tokio::{sync::watch, time}; + +#[derive(Clone, Debug)] +struct Target { + addr: Addr, + routes: watch::Receiver, +} /// Tests that the logical stack forwards connections to services with a single endpoint. #[tokio::test] @@ -17,11 +27,10 @@ async fn forward() { // We create a logical target to be resolved to endpoints. let laddr = "xyz.example.com:4444".parse::().unwrap(); - let (_tx, rx) = tokio::sync::watch::channel(Profile { - addr: Some(profiles::LogicalAddr(laddr.clone())), - ..Default::default() - }); - let logical = Logical::Route(laddr.clone(), rx.into()); + let addr = Addr::Socket("1.2.3.4:444".parse().unwrap()); + + let (_tx, policy_rx) = watch::channel(default_service_policy(laddr.clone())); + let target = Target::new(policy_rx, None, addr); // The resolution resolves a single endpoint. let ep_addr = SocketAddr::new([192, 0, 2, 30].into(), 3333); @@ -31,7 +40,7 @@ async fn forward() { // Build the TCP logical stack with a mocked connector. let (rt, _shutdown) = runtime(); let stack = Outbound::new(default_config(), rt, &mut Default::default()) - .with_stack(svc::mk(move |ep: concrete::Endpoint>| { + .with_stack(svc::mk(move |ep: concrete::Endpoint>| { let Remote(ServerAddr(ea)) = svc::Param::param(&ep); assert_eq!(ea, ep_addr); let mut io = support::io(); @@ -47,7 +56,7 @@ async fn forward() { let mut io = support::io(); io.read(b"hola").write(b"mundo"); stack - .new_service(logical.clone()) + .new_service(target.clone()) .oneshot(io.build()) .await .expect("forwarding must not fail"); @@ -68,11 +77,10 @@ async fn balances() { // We create a logical target to be resolved to endpoints. let laddr = "xyz.example.com:4444".parse::().unwrap(); - let (_tx, rx) = tokio::sync::watch::channel(Profile { - addr: Some(profiles::LogicalAddr(laddr.clone())), - ..Default::default() - }); - let logical = Logical::Route(laddr.clone(), rx.into()); + let addr = Addr::Socket("1.2.3.4:444".parse().unwrap()); + + let (_tx, policy_rx) = watch::channel(default_service_policy(laddr.clone())); + let target = Target::new(policy_rx, None, addr); // The resolution resolves a single endpoint. let ep0_addr = SocketAddr::new([192, 0, 2, 30].into(), 3333); @@ -86,7 +94,7 @@ async fn balances() { let (rt, _shutdown) = runtime(); let svc = Outbound::new(default_config(), rt, &mut Default::default()) .with_stack(svc::mk( - move |ep: concrete::Endpoint>| match svc::Param::param(&ep) { + move |ep: concrete::Endpoint>| match svc::Param::param(&ep) { Remote(ServerAddr(addr)) if addr == ep0_addr => { tracing::debug!(%addr, "writing ep0"); let mut io = support::io(); @@ -107,7 +115,7 @@ async fn balances() { .push_opaq_concrete(resolve) .push_opaq_logical() .into_inner() - .new_service(logical); + .new_service(target); // We add a single endpoint to the balancer and it is used: @@ -203,3 +211,80 @@ fn spawn_io() -> ( }); (server_io, task) } + +fn default_service_policy(addr: NameAddr) -> policy::ClientPolicy { + let meta = policy::Meta::new_default("test"); + let queue = { + policy::Queue { + capacity: 100, + failfast_timeout: std::time::Duration::from_secs(3), + } + }; + + let load = policy::Load::PeakEwma(policy::PeakEwma { + default_rtt: std::time::Duration::from_millis(30), + decay: std::time::Duration::from_secs(10), + }); + + let backend = policy::Backend { + meta: meta.clone(), + queue, + dispatcher: policy::BackendDispatcher::BalanceP2c( + load, + policy::EndpointDiscovery::DestinationGet { + path: addr.to_string(), + }, + ), + }; + + let opaque = policy::opaq::Opaque { + routes: Some(policy::opaq::Route { + policy: policy::opaq::Policy { + distribution: policy::RouteDistribution::FirstAvailable(Arc::new([ + policy::RouteBackend { + backend: backend.clone(), + filters: Arc::new([]), + }, + ])), + filters: Arc::new([]), + meta: meta.clone(), + params: (), + }, + }), + }; + + policy::ClientPolicy { + parent: meta, + protocol: policy::Protocol::Opaque(opaque), + backends: Arc::new([backend]), + } +} + +// === impl Target === + +impl Target { + pub fn new(policy: PolicyReceiver, profile: Option, addr: Addr) -> Self { + let routes = opaq::routes_from_discovery(addr.clone(), profile, policy); + Self { addr, routes } + } +} + +impl svc::Param> for Target { + fn param(&self) -> watch::Receiver { + self.routes.clone() + } +} + +impl std::cmp::PartialEq for Target { + fn eq(&self, other: &Self) -> bool { + self.addr == other.addr + } +} + +impl std::cmp::Eq for Target {} + +impl std::hash::Hash for Target { + fn hash(&self, state: &mut H) { + self.addr.hash(state); + } +} diff --git a/linkerd/app/outbound/src/policy/api.rs b/linkerd/app/outbound/src/policy/api.rs index bcbad3c30e..0e6e5a1c05 100644 --- a/linkerd/app/outbound/src/policy/api.rs +++ b/linkerd/app/outbound/src/policy/api.rs @@ -8,7 +8,7 @@ use linkerd_app_core::{ svc::Service, Addr, Error, Recover, Result, }; -use linkerd_proxy_client_policy::ClientPolicy; +use linkerd_proxy_client_policy::{ClientPolicy, ClientPolicyOverrides}; use linkerd_tonic_stream::{LimitReceiveFuture, ReceiveLimits}; use linkerd_tonic_watch::StreamWatch; use std::sync::Arc; @@ -19,6 +19,7 @@ pub(crate) struct Api { workload: Arc, limits: ReceiveLimits, default_detect_timeout: time::Duration, + export_hostname_labels: bool, client: Client, } @@ -33,19 +34,20 @@ static INVALID_POLICY: once_cell::sync::OnceCell = once_cell::sync impl Api where S: tonic::client::GrpcService + Clone, - S::ResponseBody: - http::HttpBody + Default + Send + 'static, + S::ResponseBody: http::Body + Send + 'static, { pub(crate) fn new( workload: Arc, limits: ReceiveLimits, default_detect_timeout: time::Duration, + export_hostname_labels: bool, client: S, ) -> Self { Self { workload, limits, default_detect_timeout, + export_hostname_labels, client: Client::new(client), } } @@ -59,8 +61,7 @@ impl Service for Api where S: tonic::client::GrpcService, S: Clone + Send + Sync + 'static, - S::ResponseBody: - http::HttpBody + Default + Send + 'static, + S::ResponseBody: http::Body + Send + 'static, S::Future: Send + 'static, { type Response = @@ -88,6 +89,9 @@ where }; let detect_timeout = self.default_detect_timeout; + let overrides = ClientPolicyOverrides { + export_hostname_labels: self.export_hostname_labels, + }; let limits = self.limits; let mut client = self.client.clone(); Box::pin(async move { @@ -98,7 +102,7 @@ where // If the server returned an invalid client policy, we // default to using an invalid policy that causes all // requests to report an internal error. - let policy = ClientPolicy::try_from(up).unwrap_or_else(|error| { + let policy = ClientPolicy::try_from(overrides, up).unwrap_or_else(|error| { tracing::warn!(%error, "Client policy misconfigured"); INVALID_POLICY .get_or_init(|| ClientPolicy::invalid(detect_timeout)) diff --git a/linkerd/app/outbound/src/protocol.rs b/linkerd/app/outbound/src/protocol.rs index b9904cd42e..d793ca683b 100644 --- a/linkerd/app/outbound/src/protocol.rs +++ b/linkerd/app/outbound/src/protocol.rs @@ -1,20 +1,27 @@ -use crate::{http, Outbound}; -use linkerd_app_core::{detect, io, svc, Error, Infallible}; +use crate::{http, Outbound, ParentRef}; +use linkerd_app_core::{io, svc, Error, Infallible}; use std::{fmt::Debug, hash::Hash}; +mod metrics; +#[cfg(test)] +mod tests; + +pub use self::metrics::MetricsFamilies; + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Http { - version: http::Version, + version: http::Variant, parent: T, } /// Parameter type indicating how the proxy should handle a connection. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum Protocol { Http1, Http2, Detect, Opaque, + Tls, } // === impl Outbound === @@ -29,10 +36,12 @@ impl Outbound { pub fn push_protocol( self, http: svc::ArcNewCloneHttp>, + tls: svc::ArcNewCloneTcp>>, ) -> Outbound> where // Target type indicating whether detection should be skipped. T: svc::Param, + T: svc::Param, T: Eq + Hash + Clone + Debug + Send + Sync + 'static, // Server-side socket. I: io::AsyncRead + io::AsyncWrite + io::PeerAddr, @@ -60,13 +69,22 @@ impl Outbound { .arc_new_tcp() }); - let detect = http.clone().map_stack(|config, _, http| { + let detect = http.clone().map_stack(|config, rt, http| { + let read_timeout = config.proxy.detect_protocol_timeout; + let metrics = rt.metrics.prom.http_detect.clone(); + http.push_switch( - |(result, parent): (detect::Result, T)| -> Result<_, Infallible> { - Ok(match detect::allow_timeout(result) { - Some(version) => svc::Either::A(Http { version, parent }), - None => svc::Either::B(parent), - }) + |(detected, parent): (http::Detection, T)| -> Result<_, Infallible> { + match detected { + http::Detection::Http(version) => { + return Ok(svc::Either::Left(Http { version, parent })); + } + http::Detection::ReadTimeout(timeout) => { + tracing::info!("Continuing after timeout: {timeout:?}"); + } + _ => {} + } + Ok(svc::Either::Right(parent)) }, opaq.clone().into_inner(), ) @@ -75,33 +93,51 @@ impl Outbound { // unexpected reason) the inner service is not ready. .push_on_service(svc::LoadShed::layer()) .push_on_service(svc::MapTargetLayer::new(io::EitherIo::Right)) - .lift_new_with_target::<(detect::Result, T)>() - .push(detect::NewDetectService::layer(config.proxy.detect_http())) + .lift_new_with_target::<(http::Detection, T)>() + .push(http::NewDetect::layer(move |parent: &T| { + http::DetectParams { + read_timeout, + metrics: metrics.metrics(parent.param()), + } + })) .arc_new_tcp() }); - http.map_stack(|_, _, http| { + http.map_stack(|_, rt, http| { // First separate traffic that needs protocol detection. Then switch // between traffic that is known to be HTTP or opaque. - http.push_switch(Ok::<_, Infallible>, opaq.clone().into_inner()) + let known = http.push_switch( + Ok::<_, Infallible>, + opaq.clone() + .push_switch(Ok::<_, Infallible>, tls.clone()) + .into_inner(), + ); + + known .push_on_service(svc::MapTargetLayer::new(io::EitherIo::Left)) .push_switch( |parent: T| -> Result<_, Infallible> { match parent.param() { - Protocol::Http1 => Ok(svc::Either::A(svc::Either::A(Http { - version: http::Version::Http1, + Protocol::Http1 => Ok(svc::Either::Left(svc::Either::Left(Http { + version: http::Variant::Http1, parent, }))), - Protocol::Http2 => Ok(svc::Either::A(svc::Either::A(Http { - version: http::Version::H2, + Protocol::Http2 => Ok(svc::Either::Left(svc::Either::Left(Http { + version: http::Variant::H2, parent, }))), - Protocol::Opaque => Ok(svc::Either::A(svc::Either::B(parent))), - Protocol::Detect => Ok(svc::Either::B(parent)), + Protocol::Opaque => Ok(svc::Either::Left(svc::Either::Right( + svc::Either::Left(parent), + ))), + Protocol::Tls => Ok(svc::Either::Left(svc::Either::Right( + svc::Either::Right(parent), + ))), + Protocol::Detect => Ok(svc::Either::Right(parent)), } }, detect.into_inner(), ) + .push(metrics::NewRecord::layer(rt.metrics.prom.protocol.clone())) .arc_new_tcp() }) } @@ -109,14 +145,14 @@ impl Outbound { // === impl Http === -impl From<(http::Version, T)> for Http { - fn from((version, parent): (http::Version, T)) -> Self { +impl From<(http::Variant, T)> for Http { + fn from((version, parent): (http::Variant, T)) -> Self { Self { version, parent } } } -impl svc::Param for Http { - fn param(&self) -> http::Version { +impl svc::Param for Http { + fn param(&self) -> http::Variant { self.version } } diff --git a/linkerd/app/outbound/src/protocol/metrics.rs b/linkerd/app/outbound/src/protocol/metrics.rs new file mode 100644 index 0000000000..ff30b10f69 --- /dev/null +++ b/linkerd/app/outbound/src/protocol/metrics.rs @@ -0,0 +1,125 @@ +use super::Protocol; +use crate::ParentRef; +use linkerd_app_core::{ + metrics::prom::{self, EncodeLabelSetMut}, + svc, +}; + +#[derive(Clone, Debug)] +pub struct NewRecord { + inner: N, + metrics: MetricsFamilies, +} + +#[derive(Clone, Debug)] +pub struct Record { + inner: S, + counter: prom::Counter, +} + +#[derive(Clone, Debug, Default)] +pub struct MetricsFamilies { + connections: prom::Family, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct Labels { + protocol: Protocol, + parent_ref: ParentRef, +} + +// === impl MetricsFamilies === + +impl MetricsFamilies { + pub fn register(reg: &mut prom::Registry) -> Self { + let connections = prom::Family::default(); + reg.register( + "connections", + "Outbound TCP connections by protocol configuration", + connections.clone(), + ); + + Self { connections } + } +} + +// === impl NewRecord === + +impl NewRecord { + pub fn layer(metrics: MetricsFamilies) -> impl svc::layer::Layer + Clone { + svc::layer::mk(move |inner| Self { + inner, + metrics: metrics.clone(), + }) + } +} + +impl svc::NewService for NewRecord +where + T: svc::Param, + T: svc::Param, + N: svc::NewService, +{ + type Service = Record; + + fn new_service(&self, target: T) -> Self::Service { + let counter = (*self.metrics.connections.get_or_create(&Labels { + protocol: target.param(), + parent_ref: target.param(), + })) + .clone(); + + let inner = self.inner.new_service(target); + Record { inner, counter } + } +} + +// === impl Record === + +impl svc::Service for Record +where + S: svc::Service, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, io: I) -> Self::Future { + self.counter.inc(); + self.inner.call(io) + } +} + +// === impl Labels === + +impl prom::EncodeLabelSetMut for Labels { + fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::encoding::EncodeLabel; + + let protocol = match self.protocol { + Protocol::Http1 => "http/1", + Protocol::Http2 => "http/2", + Protocol::Detect => "detect", + Protocol::Opaque => "opaq", + Protocol::Tls => "tls", + }; + + ("protocol", protocol).encode(enc.encode_label())?; + self.parent_ref.encode_label_set(enc)?; + + Ok(()) + } +} + +impl prom::encoding::EncodeLabelSet for Labels { + fn encode(&self, mut enc: prom::encoding::LabelSetEncoder<'_>) -> Result<(), std::fmt::Error> { + self.encode_label_set(&mut enc) + } +} diff --git a/linkerd/app/outbound/src/protocol/tests.rs b/linkerd/app/outbound/src/protocol/tests.rs new file mode 100644 index 0000000000..f33990df6c --- /dev/null +++ b/linkerd/app/outbound/src/protocol/tests.rs @@ -0,0 +1,223 @@ +use super::*; +use futures::future; +use linkerd_app_core::{ + io, + metrics::prom, + svc::{Layer, NewService, Service, ServiceExt}, + trace, Error, +}; +use linkerd_proxy_client_policy::Meta; +use std::sync::Arc; + +// Mock target type that implements the required params +#[derive(Clone, Debug)] +struct MockTarget { + protocol: Protocol, + parent_ref: ParentRef, +} + +impl MockTarget { + fn new(port: u16, protocol: Protocol) -> Self { + Self { + protocol, + parent_ref: ParentRef(Arc::new(Meta::Resource { + group: "".into(), + kind: "Service".into(), + namespace: "myns".into(), + name: "mysvc".into(), + port: port.try_into().ok(), + section: None, + })), + } + } +} + +impl svc::Param for MockTarget { + fn param(&self) -> Protocol { + self.protocol + } +} + +impl svc::Param for MockTarget { + fn param(&self) -> ParentRef { + self.parent_ref.clone() + } +} + +// Test helpers +fn new_ok() -> svc::ArcNewTcp { + svc::ArcNewService::new(|_| svc::BoxService::new(svc::mk(|_| future::ok::<(), Error>(())))) +} + +// Metric assertion helpers +macro_rules! assert_counted { + ($registry:expr, $proto:expr, $port:expr, $value:expr) => {{ + let mut buf = String::new(); + prom::encoding::text::encode_registry(&mut buf, $registry).expect("encode registry failed"); + let lines = buf.split_terminator('\n').collect::>(); + let metric = format!( + "connections_total{{protocol=\"{}\",parent_group=\"\",parent_kind=\"Service\",parent_namespace=\"myns\",parent_name=\"mysvc\",parent_port=\"{}\",parent_section_name=\"\"}}", + $proto, $port + ); + assert_eq!( + lines.iter().find(|l| l.starts_with(&metric)), + Some(&&*format!("{metric} {}", $value)), + "metric '{metric}' not found in:\n{buf}", + ); + }}; +} + +// Test each protocol type +#[tokio::test(flavor = "current_thread")] +async fn http1() { + let _trace = trace::test::trace_init(); + + let target = MockTarget::new(8080, Protocol::Http1); + let (io, _) = io::duplex(100); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + + metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(target) + .oneshot(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + + assert_counted!(®istry, "http/1", 8080, 1); +} + +#[tokio::test(flavor = "current_thread")] +async fn http2() { + let _trace = trace::test::trace_init(); + + let target = MockTarget::new(8081, Protocol::Http2); + let (io, _) = io::duplex(100); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + + metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(target) + .oneshot(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + + assert_counted!(®istry, "http/2", 8081, 1); +} + +#[tokio::test(flavor = "current_thread")] +async fn opaque() { + let _trace = trace::test::trace_init(); + + let (io, _) = io::duplex(100); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + + metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(MockTarget::new(8082, Protocol::Opaque)) + .oneshot(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + + assert_counted!(®istry, "opaq", 8082, 1); +} + +#[tokio::test(flavor = "current_thread")] +async fn detect() { + let _trace = trace::test::trace_init(); + + let (io, _) = io::duplex(100); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + + metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(MockTarget::new(8083, Protocol::Detect)) + .oneshot(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + + assert_counted!(®istry, "detect", 8083, 1); +} + +#[tokio::test(flavor = "current_thread")] +async fn tls() { + let _trace = trace::test::trace_init(); + + let (io, _) = io::duplex(100); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + + metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(MockTarget::new(8084, Protocol::Tls)) + .oneshot(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + + assert_counted!(®istry, "tls", 8084, 1); +} + +#[tokio::test(flavor = "current_thread")] +async fn http1_x3() { + let _trace = trace::test::trace_init(); + + let target = MockTarget::new(8085, Protocol::Http1); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + let mut svc = metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(target); + + // Make three connections + for _ in 0..3 { + let (io, _) = io::duplex(100); + svc.ready().await.expect("ready"); + svc.call(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + } + + assert_counted!(®istry, "http/1", 8085, 3); +} + +#[tokio::test(flavor = "current_thread")] +async fn multiple() { + let _trace = trace::test::trace_init(); + + let mut registry = prom::Registry::default(); + let metrics = MetricsFamilies::register(&mut registry); + + // Make one connection of each type + let protocols = vec![ + (8090, Protocol::Http1), + (8091, Protocol::Http2), + (8092, Protocol::Opaque), + (8093, Protocol::Detect), + (8094, Protocol::Tls), + ]; + + for (port, protocol) in protocols { + let (io, _) = io::duplex(100); + metrics::NewRecord::layer(metrics.clone()) + .layer(new_ok()) + .new_service(MockTarget::new(port, protocol)) + .oneshot(io::BoxedIo::new(io)) + .await + .expect("service must not fail"); + } + + assert_counted!(®istry, "http/1", 8090, 1); + assert_counted!(®istry, "http/2", 8091, 1); + assert_counted!(®istry, "opaq", 8092, 1); + assert_counted!(®istry, "detect", 8093, 1); + assert_counted!(®istry, "tls", 8094, 1); +} diff --git a/linkerd/app/outbound/src/sidecar.rs b/linkerd/app/outbound/src/sidecar.rs index 25a046474c..d04d5c6543 100644 --- a/linkerd/app/outbound/src/sidecar.rs +++ b/linkerd/app/outbound/src/sidecar.rs @@ -1,7 +1,7 @@ use crate::{ http, opaq, policy, protocol::{self, Protocol}, - Discovery, Outbound, ParentRef, + tls, Discovery, Outbound, ParentRef, }; use linkerd_app_core::{ io, profiles, @@ -11,7 +11,7 @@ use linkerd_app_core::{ }, svc, transport::addrs::*, - Error, + Addr, Error, }; use std::fmt::Debug; use tokio::sync::watch; @@ -28,10 +28,22 @@ struct Sidecar { #[derive(Clone, Debug)] struct HttpSidecar { orig_dst: OrigDstAddr, - version: http::Version, + version: http::Variant, routes: watch::Receiver, } +#[derive(Clone, Debug)] +struct TlsSidecar { + orig_dst: OrigDstAddr, + routes: watch::Receiver, +} + +#[derive(Clone, Debug)] +struct OpaqSidecar { + orig_dst: OrigDstAddr, + routes: watch::Receiver, +} + // === impl Outbound === impl Outbound<()> { @@ -52,7 +64,20 @@ impl Outbound<()> { R: Resolve, R::Resolution: Unpin, { - let opaq = self.to_tcp_connect().push_opaq_cached(resolve.clone()); + let opaq = self.clone().with_stack( + self.to_tcp_connect() + .push_opaq_cached(resolve.clone()) + .into_stack() + .push_map_target(OpaqSidecar::from) + .arc_new_clone_tcp(), + ); + + let tls = self + .to_tcp_connect() + .push_tls_cached(resolve.clone()) + .into_stack() + .push_map_target(TlsSidecar::from) + .arc_new_clone_tcp(); let http = self .to_tcp_connect() @@ -64,7 +89,8 @@ impl Outbound<()> { .push_map_target(HttpSidecar::from) .arc_new_clone_http(); - opaq.push_protocol(http.into_inner()) + opaq.clone() + .push_protocol(http.into_inner(), tls.into_inner()) // Use a dedicated target type to bind discovery results to the // outbound sidecar stack configuration. .map_stack(move |_, _, stk| stk.push_map_target(Sidecar::from)) @@ -108,12 +134,6 @@ impl svc::Param> for Sidecar { } } -impl svc::Param> for Sidecar { - fn param(&self) -> Option { - self.profile.clone()?.logical_addr() - } -} - impl svc::Param> for Sidecar { fn param(&self) -> Option { self.profile.clone() @@ -131,26 +151,16 @@ impl svc::Param for Sidecar { match self.policy.borrow().protocol { policy::Protocol::Http1(_) => Protocol::Http1, policy::Protocol::Http2(_) | policy::Protocol::Grpc(_) => Protocol::Http2, - policy::Protocol::Opaque(_) | policy::Protocol::Tls(_) => Protocol::Opaque, + policy::Protocol::Opaque(_) => Protocol::Opaque, + policy::Protocol::Tls(_) => Protocol::Tls, policy::Protocol::Detect { .. } => Protocol::Detect, } } } -impl svc::Param for Sidecar { - fn param(&self) -> opaq::Logical { - if let Some(profile) = self.profile.clone() { - if let Some(profiles::LogicalAddr(addr)) = profile.logical_addr() { - return opaq::Logical::Route(addr, profile); - } - - if let Some((addr, metadata)) = profile.endpoint() { - return opaq::Logical::Forward(Remote(ServerAddr(addr)), metadata); - } - } - - let OrigDstAddr(addr) = self.orig_dst; - opaq::Logical::Forward(Remote(ServerAddr(addr)), Default::default()) +impl svc::Param for Sidecar { + fn param(&self) -> ParentRef { + ParentRef(self.policy.borrow().parent.clone()) } } @@ -173,7 +183,7 @@ impl std::hash::Hash for Sidecar { impl From> for HttpSidecar { fn from(parent: protocol::Http) -> Self { let orig_dst = parent.orig_dst; - let version = svc::Param::::param(&parent); + let version = svc::Param::::param(&parent); let mut policy = parent.policy.clone(); if let Some(mut profile) = parent.profile.clone().map(watch::Receiver::from) { @@ -211,7 +221,7 @@ impl From> for HttpSidecar { impl HttpSidecar { fn mk_policy_routes( OrigDstAddr(orig_dst): OrigDstAddr, - version: http::Version, + version: http::Variant, policy: &policy::ClientPolicy, ) -> Option { let parent_ref = ParentRef(policy.parent.clone()); @@ -227,8 +237,8 @@ impl HttpSidecar { ref http2, .. } => match version { - http::Version::Http1 => (http1.routes.clone(), http1.failure_accrual), - http::Version::H2 => (http2.routes.clone(), http2.failure_accrual), + http::Variant::Http1 => (http1.routes.clone(), http1.failure_accrual), + http::Variant::H2 => (http2.routes.clone(), http2.failure_accrual), }, policy::Protocol::Http1(policy::http::Http1 { ref routes, @@ -280,8 +290,8 @@ impl HttpSidecar { } } -impl svc::Param for HttpSidecar { - fn param(&self) -> http::Version { +impl svc::Param for HttpSidecar { + fn param(&self) -> http::Variant { self.version } } @@ -326,3 +336,98 @@ impl std::hash::Hash for HttpSidecar { self.version.hash(state); } } + +// === impl TlsSidecar === + +impl From for TlsSidecar { + fn from(parent: Sidecar) -> Self { + let orig_dst = parent.orig_dst; + let mut policy = parent.policy.clone(); + + let init = Self::mk_policy_routes(orig_dst, &policy.borrow_and_update()) + .expect("initial policy must be tls"); + let routes = tls::spawn_routes(policy, init, move |policy: &policy::ClientPolicy| { + Self::mk_policy_routes(orig_dst, policy) + }); + TlsSidecar { orig_dst, routes } + } +} + +impl TlsSidecar { + fn mk_policy_routes( + OrigDstAddr(orig_dst): OrigDstAddr, + policy: &policy::ClientPolicy, + ) -> Option { + let parent_ref = ParentRef(policy.parent.clone()); + let routes = match policy.protocol { + policy::Protocol::Tls(policy::tls::Tls { ref routes }) => routes.clone(), + _ => { + tracing::info!("Ignoring a discovery update that changed a route from TLS"); + return None; + } + }; + + Some(tls::Routes { + addr: orig_dst.into(), + meta: parent_ref, + routes, + backends: policy.backends.clone(), + }) + } +} + +impl svc::Param> for TlsSidecar { + fn param(&self) -> watch::Receiver { + self.routes.clone() + } +} + +impl std::cmp::PartialEq for TlsSidecar { + fn eq(&self, other: &Self) -> bool { + self.orig_dst == other.orig_dst + } +} + +impl std::cmp::Eq for TlsSidecar {} + +impl std::hash::Hash for TlsSidecar { + fn hash(&self, state: &mut H) { + self.orig_dst.hash(state); + } +} + +// === impl OpaqSidecar === + +impl From for OpaqSidecar { + fn from(parent: Sidecar) -> Self { + let routes = opaq::routes_from_discovery( + Addr::Socket(parent.orig_dst.into()), + parent.profile, + parent.policy, + ); + OpaqSidecar { + orig_dst: parent.orig_dst, + routes, + } + } +} + +impl svc::Param> for OpaqSidecar { + fn param(&self) -> watch::Receiver { + self.routes.clone() + } +} + +impl std::cmp::PartialEq for OpaqSidecar { + fn eq(&self, other: &Self) -> bool { + self.orig_dst == other.orig_dst + } +} + +impl std::cmp::Eq for OpaqSidecar {} + +impl std::hash::Hash for OpaqSidecar { + fn hash(&self, state: &mut H) { + self.orig_dst.hash(state); + } +} diff --git a/linkerd/app/outbound/src/tcp.rs b/linkerd/app/outbound/src/tcp.rs index eb433ba6d0..331969b6d8 100644 --- a/linkerd/app/outbound/src/tcp.rs +++ b/linkerd/app/outbound/src/tcp.rs @@ -1,3 +1,4 @@ +pub use self::connect::Connect; use crate::Outbound; use linkerd_app_core::{ io, svc, @@ -9,9 +10,6 @@ mod connect; mod endpoint; pub mod tagged_transport; -pub use self::connect::Connect; -pub use linkerd_app_core::proxy::tcp::Forward; - #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub struct Accept { orig_dst: OrigDstAddr, diff --git a/linkerd/app/outbound/src/tcp/connect.rs b/linkerd/app/outbound/src/tcp/connect.rs index 16f4944e1f..1b1aa6066a 100644 --- a/linkerd/app/outbound/src/tcp/connect.rs +++ b/linkerd/app/outbound/src/tcp/connect.rs @@ -21,7 +21,10 @@ pub struct PreventLoopback(S); impl Outbound<()> { pub fn to_tcp_connect(&self) -> Outbound> { - let connect = PreventLoopback(ConnectTcp::new(self.config.proxy.connect.keepalive)); + let connect = PreventLoopback(ConnectTcp::new( + self.config.proxy.connect.keepalive, + self.config.proxy.connect.user_timeout, + )); self.clone().with_stack(connect) } } diff --git a/linkerd/app/outbound/src/tcp/endpoint.rs b/linkerd/app/outbound/src/tcp/endpoint.rs index 3a9c6a8dd3..4986004595 100644 --- a/linkerd/app/outbound/src/tcp/endpoint.rs +++ b/linkerd/app/outbound/src/tcp/endpoint.rs @@ -1,5 +1,5 @@ use super::{tagged_transport::TaggedTransport, *}; -use crate::ConnectMeta; +use crate::{zone::TcpZoneLabels, ConnectMeta}; use linkerd_app_core::{proxy::http, tls, transport_header::SessionProtocol}; impl Outbound { @@ -22,6 +22,7 @@ impl Outbound { T: svc::Param>, T: svc::Param>, T: svc::Param, + T: svc::Param, // Connector stack. C: svc::MakeConnection, Error = io::Error>, C: Clone + Send + 'static, @@ -45,6 +46,9 @@ impl Outbound { .push(transport::metrics::Client::layer( rt.metrics.proxy.transport.clone(), )) + .push(transport::metrics::zone::client::ZoneMetricsClient::layer( + rt.metrics.prom.zone.clone(), + )) }) } } diff --git a/linkerd/app/outbound/src/test_util.rs b/linkerd/app/outbound/src/test_util.rs index 64d2f04afc..526861d4f8 100644 --- a/linkerd/app/outbound/src/test_util.rs +++ b/linkerd/app/outbound/src/test_util.rs @@ -7,7 +7,7 @@ use linkerd_app_core::{ http::{h1, h2}, tap, }, - transport::{DualListenAddr, Keepalive}, + transport::{DualListenAddr, Keepalive, UserTimeout}, IpMatch, IpNet, ProxyRuntime, }; pub use linkerd_app_test as support; @@ -26,10 +26,12 @@ pub(crate) fn default_config() -> Config { server: config::ServerConfig { addr: DualListenAddr(([0, 0, 0, 0], 0).into(), None), keepalive: Keepalive(None), + user_timeout: UserTimeout(None), http2: h2::ServerParams::default(), }, connect: config::ConnectConfig { keepalive: Keepalive(None), + user_timeout: UserTimeout(None), timeout: Duration::from_secs(1), backoff: exp_backoff::ExponentialBackoff::try_new( Duration::from_millis(100), @@ -66,3 +68,94 @@ pub(crate) fn runtime() -> (ProxyRuntime, drain::Signal) { }; (runtime, drain_tx) } + +pub use self::mock_body::MockBody; + +mod mock_body { + use bytes::Bytes; + use http_body::Frame; + use linkerd_app_core::proxy::http::Body; + use linkerd_app_core::{Error, Result}; + use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + }; + + #[derive(Default)] + #[pin_project::pin_project] + pub struct MockBody { + #[pin] + data: Option>>, + #[pin] + trailers: Option>>>, + } + + impl MockBody { + pub fn new(data: impl Future> + Send + 'static) -> Self { + Self { + data: Some(Box::pin(data)), + trailers: None, + } + } + + /// Returns a [`MockBody`] that never yields any data. + pub fn pending() -> Self { + let fut = futures::future::pending(); + Self::new(fut) + } + + /// Returns a [`MockBody`] that yields an error when polled. + pub fn error(msg: &'static str) -> Self { + let err = Err(msg.into()); + let fut = futures::future::ready(err); + Self::new(fut) + } + + /// Returns a [`MockBody`] that yields this gRPC code in its trailers section. + pub fn grpc_status(code: u8) -> Self { + let trailers = { + let mut trailers = http::HeaderMap::with_capacity(1); + let status = code.to_string().parse().unwrap(); + trailers.insert("grpc-status", status); + trailers + }; + let fut = futures::future::ready(Some(Ok(trailers))); + + Self { + data: None, + trailers: Some(Box::pin(fut)), + } + } + } + + impl Body for MockBody { + type Data = Bytes; + type Error = Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + if let Some(rx) = this.data.as_mut().as_pin_mut() { + let ready = futures::ready!(rx.poll(cx)); + *this.data = None; + return Poll::Ready(ready.err().map(Err)); + } + + if let Some(rx) = this.trailers.as_mut().as_pin_mut() { + let ready = futures::ready!(rx.poll(cx)).map(|o| o.map(Frame::trailers)); + *this.trailers = None; + return Poll::Ready(ready); + } + + Poll::Ready(None) + } + + fn is_end_stream(&self) -> bool { + self.data.is_none() && self.trailers.is_none() + } + } +} diff --git a/linkerd/app/outbound/src/tls.rs b/linkerd/app/outbound/src/tls.rs new file mode 100644 index 0000000000..a8e195e4d6 --- /dev/null +++ b/linkerd/app/outbound/src/tls.rs @@ -0,0 +1,137 @@ +use crate::{tcp, Outbound}; +use linkerd_app_core::{ + io, + metrics::prom, + proxy::{ + api_resolve::{ConcreteAddr, Metadata}, + core::Resolve, + }, + svc, + tls::{NewDetectRequiredSni, ServerName}, + transport::addrs::*, + Error, +}; +use std::{fmt::Debug, hash::Hash}; +use tokio::sync::watch; + +mod concrete; +mod logical; + +pub use self::logical::{route::filters::errors::*, Concrete, Routes}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +struct Tls { + sni: ServerName, + parent: T, +} + +pub fn spawn_routes( + mut route_rx: watch::Receiver, + init: Routes, + mut mk: impl FnMut(&T) -> Option + Send + Sync + 'static, +) -> watch::Receiver +where + T: Send + Sync + 'static, +{ + let (tx, rx) = watch::channel(init); + + tokio::spawn(async move { + loop { + let res = tokio::select! { + biased; + _ = tx.closed() => return, + res = route_rx.changed() => res, + }; + + if res.is_err() { + // Drop the `tx` sender when the profile sender is + // dropped. + return; + } + + if let Some(routes) = (mk)(&*route_rx.borrow_and_update()) { + if tx.send(routes).is_err() { + // Drop the `tx` sender when all of its receivers are dropped. + return; + } + } + } + }); + + rx +} + +#[derive(Clone, Debug, Default)] +pub struct TlsMetrics { + balance: concrete::BalancerMetrics, + route: logical::route::TlsRouteMetrics, +} + +// === impl Outbound === + +impl Outbound { + /// Builds a stack that proxies TLS connections. + /// + /// This stack uses caching so that a router/load-balancer may be reused + /// across multiple connections. + pub fn push_tls_cached(self, resolve: R) -> Outbound> + where + // Tls target + T: Clone + Debug + PartialEq + Eq + Hash + Send + Sync + 'static, + T: svc::Param>, + // Server-side connection + I: io::AsyncRead + io::AsyncWrite + io::PeerAddr + io::Peek, + I: Debug + Send + Sync + Unpin + 'static, + // Endpoint discovery + R: Resolve, + R::Resolution: Unpin, + // TCP endpoint stack. + C: svc::MakeConnection, Error = io::Error>, + C: Clone + Send + Sync + Unpin + 'static, + C::Connection: Send + Unpin, + C::Future: Send + Unpin, + { + self.push_tcp_endpoint() + .push_tls_concrete(resolve) + .push_tls_logical() + .map_stack(|config, _rt, stk| { + stk.push_new_idle_cached(config.discovery_idle_timeout) + // Use a dedicated target type to configure parameters for + // the TLS stack. It also helps narrow the cache key. + .push_map_target(|(sni, parent): (ServerName, T)| Tls { sni, parent }) + .push(NewDetectRequiredSni::layer( + config.proxy.detect_protocol_timeout, + )) + .arc_new_clone_tcp() + }) + } +} + +// === impl Tls === + +impl svc::Param for Tls { + fn param(&self) -> ServerName { + self.sni.clone() + } +} + +impl svc::Param> for Tls +where + T: svc::Param>, +{ + fn param(&self) -> watch::Receiver { + self.parent.param() + } +} + +// === impl TlsMetrics === + +impl TlsMetrics { + pub fn register(registry: &mut prom::Registry) -> Self { + let balance = + concrete::BalancerMetrics::register(registry.sub_registry_with_prefix("balancer")); + let route = + logical::route::TlsRouteMetrics::register(registry.sub_registry_with_prefix("route")); + Self { balance, route } + } +} diff --git a/linkerd/app/outbound/src/tls/concrete.rs b/linkerd/app/outbound/src/tls/concrete.rs new file mode 100644 index 0000000000..aa31a97347 --- /dev/null +++ b/linkerd/app/outbound/src/tls/concrete.rs @@ -0,0 +1,387 @@ +use crate::{ + metrics::BalancerMetricsParams, + stack_labels, + zone::{tcp_zone_labels, TcpZoneLabels}, + BackendRef, Outbound, ParentRef, +}; +use linkerd_app_core::{ + config::QueueConfig, + drain, io, + metrics::{ + self, + prom::{self, EncodeLabelSetMut}, + OutboundZoneLocality, + }, + proxy::{ + api_resolve::{ConcreteAddr, Metadata}, + core::Resolve, + http::AuthorityOverride, + tcp::{self, balance}, + }, + svc::{self, layer::Layer}, + tls::{self, ServerName}, + transport::{self, addrs::*}, + transport_header::SessionProtocol, + Error, Infallible, NameAddr, +}; +use std::{fmt::Debug, net::SocketAddr, sync::Arc}; +use tracing::info_span; + +/// Parameter configuring dispatcher behavior. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Dispatch { + Balance(NameAddr, balance::EwmaConfig), + Forward(Remote, Metadata), + /// A backend dispatcher that explicitly fails all requests. + Fail { + message: Arc, + }, +} + +#[derive(Debug, thiserror::Error)] +#[error("{0}")] +pub struct DispatcherFailed(Arc); + +/// Wraps errors encountered in this module. +#[derive(Debug, thiserror::Error)] +#[error("concrete service {addr}: {source}")] +pub struct ConcreteError { + addr: NameAddr, + #[source] + source: Error, +} + +/// Inner stack target type. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Endpoint { + addr: Remote, + is_local: bool, + metadata: Metadata, + parent: T, +} + +pub type BalancerMetrics = BalancerMetricsParams; + +/// A target configuring a load balancer stack. +#[derive(Clone, Debug, PartialEq, Eq)] +struct Balance { + concrete: NameAddr, + ewma: balance::EwmaConfig, + queue: QueueConfig, + parent: T, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct ConcreteLabels { + concrete: Arc, +} + +impl prom::EncodeLabelSetMut for ConcreteLabels { + fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::encoding::EncodeLabel; + + ("concrete", &*self.concrete).encode(enc.encode_label())?; + Ok(()) + } +} + +impl prom::encoding::EncodeLabelSet for ConcreteLabels { + fn encode(&self, mut enc: prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + self.encode_label_set(&mut enc) + } +} + +impl svc::ExtractParam> for BalancerMetricsParams { + fn extract_param(&self, bal: &Balance) -> balance::Metrics { + self.metrics(&ConcreteLabels { + concrete: bal.concrete.to_string().into(), + }) + } +} + +// === impl Outbound === + +impl Outbound { + /// Builds a [`svc::NewService`] stack that builds buffered tls services + /// for `T`-typed concrete targets. Connections may be load balanced across + /// a discovered set of replicas or forwarded to a single endpoint, + /// depending on the value of the `Dispatch` parameter. + /// + /// When a balancer has no available inner services, it goes into + /// 'failfast'. While in failfast, buffered requests are failed and the + /// service becomes unavailable so callers may choose alternate concrete + /// services. + pub fn push_tls_concrete( + self, + resolve: R, + ) -> Outbound< + svc::ArcNewService< + T, + impl svc::Service + Clone, + >, + > + where + // Logical target + T: svc::Param, + T: Clone + Debug + Send + Sync + 'static, + T: svc::Param, + // Server-side socket. + I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, + // Endpoint resolution. + R: Resolve, + R::Resolution: Unpin, + // Endpoint connector. + C: svc::MakeConnection> + Clone + Send + 'static, + C::Connection: Send + Unpin, + C::Metadata: Send + Unpin, + C::Future: Send, + C: Send + Sync + 'static, + { + let resolve = + svc::MapTargetLayer::new(|t: Balance| -> ConcreteAddr { ConcreteAddr(t.concrete) }) + .layer(resolve.into_service()); + + self.map_stack(|config, rt, inner| { + let queue = config.tcp_connection_queue; + + let connect = inner + .push(svc::stack::WithoutConnectionMetadata::layer()) + .push_new_thunk(); + + let forward = connect + .clone() + .push_on_service(rt.metrics.proxy.stack.layer(stack_labels("tls", "forward"))) + .instrument(|e: &Endpoint| info_span!("forward", addr = %e.addr)); + + let endpoint = connect + .push_on_service( + rt.metrics + .proxy + .stack + .layer(stack_labels("tls", "endpoint")), + ) + .instrument(|e: &Endpoint| info_span!("endpoint", addr = %e.addr)); + + let fail = svc::ArcNewService::new(|message: Arc| { + svc::mk(move |_| futures::future::ready(Err(DispatcherFailed(message.clone())))) + }); + + let inbound_ips = config.inbound_ips.clone(); + let balance = endpoint + .push_map_target( + move |((addr, metadata), target): ((SocketAddr, Metadata), Balance)| { + tracing::trace!(%addr, ?metadata, ?target, "Resolved endpoint"); + let is_local = inbound_ips.contains(&addr.ip()); + Endpoint { + addr: Remote(ServerAddr(addr)), + metadata, + is_local, + parent: target.parent, + } + }, + ) + .lift_new_with_target() + .push(tcp::NewBalance::layer( + resolve, + rt.metrics.prom.tls.balance.clone(), + )) + .push(svc::NewMapErr::layer_from_target::()) + .push_on_service(rt.metrics.proxy.stack.layer(stack_labels("tls", "balance"))) + .instrument(|t: &Balance| info_span!("balance", addr = %t.concrete)); + + balance + .push_switch(Ok::<_, Infallible>, forward.into_inner()) + .push_switch( + move |parent: T| -> Result<_, Infallible> { + Ok(match parent.param() { + Dispatch::Balance(concrete, ewma) => { + svc::Either::Left(svc::Either::Left(Balance { + concrete, + ewma, + queue, + parent, + })) + } + + Dispatch::Forward(addr, meta) => { + svc::Either::Left(svc::Either::Right(Endpoint { + addr, + is_local: false, + metadata: meta, + parent, + })) + } + Dispatch::Fail { message } => svc::Either::Right(message), + }) + }, + svc::stack(fail).check_new_clone().into_inner(), + ) + .push_on_service(tcp::Forward::layer()) + .push_on_service(drain::Retain::layer(rt.drain.clone())) + .push(svc::ArcNewService::layer()) + }) + } +} + +// === impl ConcreteError === + +impl From<(&Balance, Error)> for ConcreteError { + fn from((target, source): (&Balance, Error)) -> Self { + Self { + addr: target.concrete.clone(), + source, + } + } +} + +// === impl Balance === + +impl std::ops::Deref for Balance { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.parent + } +} + +impl svc::Param for Balance { + fn param(&self) -> balance::EwmaConfig { + self.ewma + } +} + +impl svc::Param for Balance { + fn param(&self) -> svc::queue::Capacity { + svc::queue::Capacity(self.queue.capacity) + } +} + +impl svc::Param for Balance { + fn param(&self) -> svc::queue::Timeout { + svc::queue::Timeout(self.queue.failfast_timeout) + } +} + +impl> svc::Param for Balance { + fn param(&self) -> ParentRef { + self.parent.param() + } +} + +impl> svc::Param for Balance { + fn param(&self) -> BackendRef { + self.parent.param() + } +} + +// === impl Endpoint === + +impl svc::Param> for Endpoint { + fn param(&self) -> Remote { + self.addr + } +} + +impl svc::Param> for Endpoint { + fn param(&self) -> Option { + if self.is_local { + return None; + } + self.metadata + .tagged_transport_port() + .map(crate::tcp::tagged_transport::PortOverride) + } +} + +impl svc::Param> for Endpoint { + fn param(&self) -> Option { + if self.is_local { + return None; + } + self.metadata + .authority_override() + .cloned() + .map(AuthorityOverride) + } +} + +impl svc::Param> for Endpoint { + fn param(&self) -> Option { + None + } +} + +impl svc::Param for Endpoint +where + T: svc::Param, +{ + fn param(&self) -> transport::labels::Key { + transport::labels::Key::OutboundClient(self.param()) + } +} + +impl svc::Param for Endpoint +where + T: svc::Param, +{ + fn param(&self) -> metrics::OutboundEndpointLabels { + metrics::OutboundEndpointLabels { + authority: None, + labels: metrics::prefix_labels("dst", self.metadata.labels().iter()), + zone_locality: self.param(), + server_id: self.param(), + target_addr: self.addr.into(), + } + } +} + +impl svc::Param for Endpoint { + fn param(&self) -> OutboundZoneLocality { + OutboundZoneLocality::new(&self.metadata) + } +} + +impl svc::Param for Endpoint { + fn param(&self) -> TcpZoneLabels { + tcp_zone_labels(self.param()) + } +} + +impl svc::Param for Endpoint +where + T: svc::Param, +{ + fn param(&self) -> metrics::EndpointLabels { + metrics::EndpointLabels::from(svc::Param::::param(self)) + } +} + +impl svc::Param for Endpoint { + fn param(&self) -> tls::ConditionalClientTls { + if self.is_local { + return tls::ConditionalClientTls::None(tls::NoClientTls::Loopback); + } + + // If we're transporting an opaque protocol OR we're communicating with + // a gateway, then set an ALPN value indicating support for a transport + // header. + let use_transport_header = self.metadata.tagged_transport_port().is_some() + || self.metadata.authority_override().is_some(); + self.metadata + .identity() + .cloned() + .map(move |mut client_tls| { + client_tls.alpn = if use_transport_header { + use linkerd_app_core::transport_header::PROTOCOL; + Some(tls::client::AlpnProtocols(vec![PROTOCOL.into()])) + } else { + None + }; + + tls::ConditionalClientTls::Some(client_tls) + }) + .unwrap_or(tls::ConditionalClientTls::None( + tls::NoClientTls::NotProvidedByServiceDiscovery, + )) + } +} diff --git a/linkerd/app/outbound/src/tls/logical.rs b/linkerd/app/outbound/src/tls/logical.rs new file mode 100644 index 0000000000..b37ab7c415 --- /dev/null +++ b/linkerd/app/outbound/src/tls/logical.rs @@ -0,0 +1,114 @@ +use super::concrete; +use crate::{BackendRef, Outbound, ParentRef}; +use linkerd_app_core::{io, svc, tls::ServerName, Addr, Error}; +use linkerd_proxy_client_policy as client_policy; +use std::{fmt::Debug, hash::Hash, sync::Arc}; +use tokio::sync::watch; + +pub mod route; +pub mod router; + +#[cfg(test)] +mod tests; + +/// Indicates the address used for logical routing. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct LogicalAddr(pub Addr); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Routes { + pub addr: Addr, + pub meta: ParentRef, + pub routes: Arc<[client_policy::tls::Route]>, + pub backends: Arc<[client_policy::Backend]>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Concrete { + target: concrete::Dispatch, + parent: T, + parent_ref: ParentRef, + backend_ref: BackendRef, +} + +#[derive(Debug, thiserror::Error)] +#[error("no route")] +pub struct NoRoute; + +#[derive(Debug, thiserror::Error)] +#[error("logical service {addr}: {source}")] +pub struct LogicalError { + addr: Addr, + #[source] + source: Error, +} + +impl Outbound { + /// Builds a `NewService` that produces a router service for each logical + /// target. + /// + /// The router uses discovery information (provided on the target) to + /// support per-connection routing over a set of concrete inner services. + /// Only available inner services are used for routing. When there are no + /// available backends, requests are failed with a [`svc::stack::LoadShedError`]. + pub fn push_tls_logical(self) -> Outbound> + where + // Logical target. + T: svc::Param>, + T: svc::Param, + T: Eq + Hash + Clone + Debug + Send + Sync + 'static, + // Concrete stack. + I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, + // Concrete stack. + N: svc::NewService, Service = NSvc> + Clone + Send + Sync + 'static, + NSvc: svc::Service + Clone + Send + Sync + 'static, + NSvc::Future: Send, + NSvc::Error: Into, + { + self.map_stack(|_config, rt, concrete| { + let metrics = rt.metrics.prom.tls.route.clone(); + + concrete + .lift_new() + .push_on_service(svc::layer::mk(move |concrete: N| { + svc::stack(concrete.clone()) + .push(router::Router::layer(metrics.clone())) + .push(svc::NewMapErr::layer_from_target::()) + .arc_new_clone_tcp() + .into_inner() + })) + // Rebuild the inner router stack every time the watch changes. + .push(svc::NewSpawnWatch::::layer_into::< + router::Router, + >()) + .arc_new_clone_tcp() + }) + } +} + +// === impl LogicalError === + +impl From<(&router::Router, Error)> for LogicalError +where + T: Eq + Hash + Clone + Debug, +{ + fn from((target, source): (&router::Router, Error)) -> Self { + let LogicalAddr(addr) = svc::Param::param(target); + Self { addr, source } + } +} + +impl svc::Param for Concrete { + fn param(&self) -> concrete::Dispatch { + self.target.clone() + } +} + +impl svc::Param for Concrete +where + T: svc::Param, +{ + fn param(&self) -> ServerName { + self.parent.param() + } +} diff --git a/linkerd/app/outbound/src/tls/logical/route.rs b/linkerd/app/outbound/src/tls/logical/route.rs new file mode 100644 index 0000000000..de50c9e162 --- /dev/null +++ b/linkerd/app/outbound/src/tls/logical/route.rs @@ -0,0 +1,180 @@ +use super::super::Concrete; +use crate::{ + metrics::transport::{NewTransportRouteMetrics, TransportRouteMetricsFamily}, + ParentRef, RouteRef, +}; +use linkerd_app_core::{io, metrics::prom, svc, tls::ServerName, Addr, Error}; +use linkerd_distribute as distribute; +use linkerd_proxy_client_policy as policy; +use linkerd_tls_route as tls_route; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + +pub(crate) mod filters; + +pub type TlsRouteMetrics = TransportRouteMetricsFamily; + +#[derive(Debug, PartialEq, Eq, Hash)] +pub(crate) struct Backend { + pub(crate) route_ref: RouteRef, + pub(crate) concrete: Concrete, + pub(super) filters: Arc<[policy::tls::Filter]>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct MatchedRoute { + pub(super) r#match: tls_route::RouteMatch, + pub(super) params: Route, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct Route { + pub(super) parent: T, + pub(super) addr: Addr, + pub(super) parent_ref: ParentRef, + pub(super) route_ref: RouteRef, + pub(super) filters: Arc<[policy::tls::Filter]>, + pub(super) distribution: BackendDistribution, + pub(super) params: policy::tls::RouteParams, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct RouteLabels { + parent: ParentRef, + route: RouteRef, + hostname: Option, +} + +pub(crate) type BackendDistribution = distribute::Distribution>; +pub(crate) type NewDistribute = distribute::NewDistribute, (), N>; + +/// Wraps errors with route metadata. +#[derive(Debug, thiserror::Error)] +#[error("route {}: {source}", route.0)] +struct RouteError { + route: RouteRef, + #[source] + source: Error, +} + +// === impl Backend === + +impl Clone for Backend { + fn clone(&self) -> Self { + Self { + route_ref: self.route_ref.clone(), + concrete: self.concrete.clone(), + filters: self.filters.clone(), + } + } +} + +// === impl MatchedRoute === + +impl MatchedRoute +where + // Parent target. + T: Debug + Eq + Hash, + T: Clone + Send + Sync + 'static, + T: svc::Param, +{ + /// Builds a route stack that applies policy filters to requests and + /// distributes requests over each route's backends. These [`Concrete`] + /// backends are expected to be cached/shared by the inner stack. + pub(crate) fn layer( + metrics: TlsRouteMetrics, + ) -> impl svc::Layer> + Clone + where + I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, + // Inner stack. + N: svc::NewService, Service = NSvc> + Clone + Send + Sync + 'static, + NSvc: svc::Service + Clone + Send + Sync + 'static, + NSvc::Future: Send, + NSvc::Error: Into, + { + svc::layer::mk(move |inner| { + svc::stack(inner) + .push_map_target(|t| t) + .push_map_target(|b: Backend| b.concrete) + // apply backend filters + .push(filters::NewApplyFilters::layer()) + .lift_new() + .push(NewDistribute::layer()) + // The router does not take the backend's availability into + // consideration, so we must eagerly fail requests to prevent + // leaking tasks onto the runtime. + .push_on_service(svc::LoadShed::layer()) + // apply route level filters + .push(filters::NewApplyFilters::layer()) + .push(svc::NewMapErr::layer_with(|rt: &Self| { + let route = rt.params.route_ref.clone(); + move |source| RouteError { + route: route.clone(), + source, + } + })) + .push(NewTransportRouteMetrics::layer(metrics.clone())) + .arc_new_clone_tcp() + .into_inner() + }) + } +} + +impl svc::Param> for MatchedRoute { + fn param(&self) -> BackendDistribution { + self.params.distribution.clone() + } +} + +impl svc::Param> for MatchedRoute { + fn param(&self) -> Arc<[policy::tls::Filter]> { + self.params.filters.clone() + } +} + +impl svc::Param> for Backend { + fn param(&self) -> Arc<[policy::tls::Filter]> { + self.filters.clone() + } +} + +impl svc::Param for MatchedRoute +where + T: Eq + Hash + Clone + Debug, + T: svc::Param, +{ + fn param(&self) -> RouteLabels { + RouteLabels { + route: self.params.route_ref.clone(), + parent: self.params.parent_ref.clone(), + hostname: self + .params + .params + .export_hostname_labels + .then(|| self.params.parent.param()), + } + } +} + +// === impl RouteLabels === + +impl prom::EncodeLabelSetMut for RouteLabels { + fn encode_label_set(&self, enc: &mut prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::encoding::*; + let Self { + parent, + route, + hostname, + } = self; + parent.encode_label_set(enc)?; + route.encode_label_set(enc)?; + ("hostname", hostname.as_deref().map(|n| n.as_str())).encode(enc.encode_label())?; + Ok(()) + } +} + +impl prom::encoding::EncodeLabelSet for RouteLabels { + fn encode(&self, mut enc: prom::encoding::LabelSetEncoder<'_>) -> std::fmt::Result { + use prom::EncodeLabelSetMut; + self.encode_label_set(&mut enc) + } +} diff --git a/linkerd/app/outbound/src/tls/logical/route/filters.rs b/linkerd/app/outbound/src/tls/logical/route/filters.rs new file mode 100644 index 0000000000..408291b875 --- /dev/null +++ b/linkerd/app/outbound/src/tls/logical/route/filters.rs @@ -0,0 +1,111 @@ +use futures::{future, TryFutureExt}; +use linkerd_app_core::{io, svc, Error}; +use linkerd_proxy_client_policy::tls; +use std::{ + fmt::Debug, + sync::Arc, + task::{Context, Poll}, +}; + +#[derive(Clone, Debug)] +pub struct NewApplyFilters { + inner: N, +} + +#[derive(Clone, Debug)] +pub struct ApplyFilters { + inner: S, + filters: Arc<[tls::Filter]>, +} + +// === impl NewApplyFilters === + +impl NewApplyFilters { + pub fn layer() -> impl svc::layer::Layer + Clone { + svc::layer::mk(move |inner| Self { inner }) + } +} + +impl svc::NewService for NewApplyFilters +where + N: svc::NewService, + T: svc::Param>, +{ + type Service = ApplyFilters; + + fn new_service(&self, target: T) -> Self::Service { + let filters: Arc<[tls::Filter]> = target.param(); + let svc = self.inner.new_service(target); + ApplyFilters { + inner: svc, + filters, + } + } +} + +// === impl ApplyFilters === + +impl ApplyFilters { + fn apply_filters(&self) -> Result<(), Error> { + if let Some(filter) = self.filters.iter().next() { + match filter { + tls::Filter::Forbidden => { + return Err(errors::TLSForbiddenRoute.into()); + } + + tls::Filter::Invalid(message) => { + return Err(errors::TLSInvalidBackend(message.clone()).into()); + } + + tls::Filter::InternalError(message) => { + return Err(errors::TLSInvalidPolicy(message).into()); + } + } + } + + Ok(()) + } +} + +impl svc::Service for ApplyFilters +where + I: io::AsyncRead + io::AsyncWrite + Send + 'static, + S: svc::Service + Send + Clone + 'static, + S::Error: Into, + S::Future: Send, +{ + type Response = S::Response; + type Error = Error; + type Future = future::Either< + future::ErrInto, + future::Ready>, + >; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, io: I) -> Self::Future { + if let Err(e) = self.apply_filters() { + return future::Either::Right(future::err(e)); + } + future::Either::Left(self.inner.call(io).err_into()) + } +} + +pub mod errors { + use super::*; + + #[derive(Debug, thiserror::Error)] + #[error("forbidden TLS route")] + pub struct TLSForbiddenRoute; + + #[derive(Debug, thiserror::Error)] + #[error("invalid TLS backend: {0}")] + pub struct TLSInvalidBackend(pub Arc); + + #[derive(Debug, thiserror::Error)] + #[error("invalid client policy: {0}")] + pub struct TLSInvalidPolicy(pub &'static str); +} diff --git a/linkerd/app/outbound/src/tls/logical/router.rs b/linkerd/app/outbound/src/tls/logical/router.rs new file mode 100644 index 0000000000..a40d68c084 --- /dev/null +++ b/linkerd/app/outbound/src/tls/logical/router.rs @@ -0,0 +1,214 @@ +use super::{ + super::{concrete, Concrete}, + route, LogicalAddr, NoRoute, +}; +use crate::{BackendRef, EndpointRef, RouteRef}; +use linkerd_app_core::{ + io, proxy::http, svc, tls::ServerName, transport::addrs::*, Addr, Error, NameAddr, Result, +}; +use linkerd_distribute as distribute; +use linkerd_proxy_client_policy as policy; +use linkerd_tls_route as tls_route; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct Router { + pub(super) parent: T, + pub(super) addr: Addr, + pub(super) routes: Arc<[tls_route::Route>]>, + pub(super) backends: distribute::Backends>, +} + +type NewBackendCache = distribute::NewBackendCache, (), N, S>; + +// === impl Router === + +impl Router +where + // Parent target type. + T: Eq + Hash + Clone + Debug + Send + Sync + 'static, + T: svc::Param, +{ + pub fn layer( + metrics: route::TlsRouteMetrics, + ) -> impl svc::Layer> + Clone + where + I: io::AsyncRead + io::AsyncWrite + Debug + Send + Unpin + 'static, + // Concrete stack. + N: svc::NewService, Service = NSvc> + Clone + Send + Sync + 'static, + NSvc: svc::Service + Clone + Send + Sync + 'static, + NSvc::Future: Send, + NSvc::Error: Into, + { + svc::layer::mk(move |inner| { + svc::stack(inner) + .lift_new() + // Each route builds over concrete backends. All of these + // backends are cached here and shared across routes. + .push(NewBackendCache::layer()) + .push_on_service(route::MatchedRoute::layer(metrics.clone())) + .push(svc::NewOneshotRoute::::layer_cached()) + .arc_new_clone_tcp() + .into_inner() + }) + } +} + +impl From<(crate::tls::Routes, T)> for Router +where + T: Eq + Hash + Clone + Debug, +{ + fn from((rts, parent): (crate::tls::Routes, T)) -> Self { + let crate::tls::Routes { + addr, + meta: parent_ref, + routes, + backends, + } = rts; + + let mk_concrete = { + let parent = parent.clone(); + let parent_ref = parent_ref.clone(); + + move |backend_ref: BackendRef, target: concrete::Dispatch| Concrete { + target, + parent: parent.clone(), + backend_ref, + parent_ref: parent_ref.clone(), + } + }; + + let mk_dispatch = move |bke: &policy::Backend| match bke.dispatcher { + policy::BackendDispatcher::BalanceP2c( + policy::Load::PeakEwma(policy::PeakEwma { decay, default_rtt }), + policy::EndpointDiscovery::DestinationGet { ref path }, + ) => mk_concrete( + BackendRef(bke.meta.clone()), + concrete::Dispatch::Balance( + path.parse::() + .expect("destination must be a nameaddr"), + http::balance::EwmaConfig { decay, default_rtt }, + ), + ), + policy::BackendDispatcher::Forward(addr, ref md) => mk_concrete( + EndpointRef::new(md, addr.port().try_into().expect("port must not be 0")).into(), + concrete::Dispatch::Forward(Remote(ServerAddr(addr)), md.clone()), + ), + policy::BackendDispatcher::Fail { ref message } => mk_concrete( + BackendRef(policy::Meta::new_default("fail")), + concrete::Dispatch::Fail { + message: message.clone(), + }, + ), + }; + + let mk_route_backend = + |route_ref: &RouteRef, rb: &policy::RouteBackend| { + let concrete = mk_dispatch(&rb.backend); + route::Backend { + route_ref: route_ref.clone(), + filters: rb.filters.clone(), + concrete, + } + }; + + let mk_distribution = + |rr: &RouteRef, d: &policy::RouteDistribution| match d { + policy::RouteDistribution::Empty => route::BackendDistribution::Empty, + policy::RouteDistribution::FirstAvailable(backends) => { + route::BackendDistribution::first_available( + backends.iter().map(|b| mk_route_backend(rr, b)), + ) + } + policy::RouteDistribution::RandomAvailable(backends) => { + route::BackendDistribution::random_available( + backends + .iter() + .map(|(rb, weight)| (mk_route_backend(rr, rb), *weight)), + ) + .expect("distribution must be valid") + } + }; + + let mk_policy = |policy::tls::Policy { + meta, + distribution, + filters, + params, + }| { + let route_ref = RouteRef(meta); + let parent_ref = parent_ref.clone(); + + let distribution = mk_distribution(&route_ref, &distribution); + route::Route { + addr: addr.clone(), + parent: parent.clone(), + parent_ref: parent_ref.clone(), + route_ref, + filters, + distribution, + params, + } + }; + + let routes = routes + .iter() + .map(|route| tls_route::Route { + snis: route.snis.clone(), + policy: mk_policy(route.policy.clone()), + }) + .collect(); + + let backends = backends.iter().map(mk_dispatch).collect(); + + Self { + routes, + backends, + addr, + parent, + } + } +} + +impl svc::router::SelectRoute for Router +where + T: Clone + Eq + Hash + Debug, + T: svc::Param, +{ + type Key = route::MatchedRoute; + type Error = NoRoute; + + fn select(&self, _: &I) -> Result { + use linkerd_tls_route::SessionInfo; + + let server_name: ServerName = self.parent.param(); + tracing::trace!("Selecting TLS route for {:?}", server_name); + let si = SessionInfo { sni: server_name }; + let (r#match, params) = policy::tls::find(&self.routes, si).ok_or(NoRoute)?; + tracing::debug!(meta = ?params.route_ref, "Selected route"); + tracing::trace!(?r#match); + + Ok(route::MatchedRoute { + r#match, + params: params.clone(), + }) + } +} + +impl svc::Param for Router +where + T: Eq + Hash + Clone + Debug, +{ + fn param(&self) -> LogicalAddr { + LogicalAddr(self.addr.clone()) + } +} + +impl svc::Param>> for Router +where + T: Eq + Hash + Clone + Debug, +{ + fn param(&self) -> distribute::Backends> { + self.backends.clone() + } +} diff --git a/linkerd/app/outbound/src/tls/logical/tests.rs b/linkerd/app/outbound/src/tls/logical/tests.rs new file mode 100644 index 0000000000..29a763ea85 --- /dev/null +++ b/linkerd/app/outbound/src/tls/logical/tests.rs @@ -0,0 +1,232 @@ +use super::{Outbound, ParentRef, Routes}; +use crate::test_util::*; +use linkerd_app_core::{ + io, + svc::{self, NewService}, + transport::addrs::*, + Result, +}; +use linkerd_app_test::{AsyncReadExt, AsyncWriteExt}; +use linkerd_proxy_client_policy::{self as client_policy, tls::sni}; +use parking_lot::Mutex; +use std::{ + collections::HashMap, + net::SocketAddr, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; +use tokio::sync::watch; +use tokio_rustls::rustls::pki_types::DnsName; + +mod basic; + +const REQUEST: &[u8] = b"who r u?"; +type Reponse = tokio::task::JoinHandle>; + +#[derive(Clone, Debug)] +struct Target { + num: usize, + routes: watch::Receiver, +} + +#[derive(Clone, Debug)] + +struct MockServer { + io: support::io::Builder, + addr: SocketAddr, +} + +#[derive(Clone, Debug, Default)] +struct ConnectTcp { + srvs: Arc>>, +} + +// === impl MockServer === + +impl MockServer { + fn new( + addr: SocketAddr, + service_name: &str, + client_hello: Vec, + ) -> (Self, io::DuplexStream, Reponse) { + let mut io = support::io(); + + io.write(&client_hello) + .write(REQUEST) + .read(service_name.as_bytes()); + + let server = MockServer { io, addr }; + let (io, response) = spawn_io(client_hello); + + (server, io, response) + } +} + +// === impl Target === + +impl PartialEq for Target { + fn eq(&self, other: &Self) -> bool { + self.num == other.num + } +} + +impl Eq for Target {} + +impl std::hash::Hash for Target { + fn hash(&self, state: &mut H) { + self.num.hash(state); + } +} + +impl svc::Param> for Target { + fn param(&self) -> watch::Receiver { + self.routes.clone() + } +} + +// === impl ConnectTcp === + +impl ConnectTcp { + fn add_server(&mut self, s: MockServer) { + self.srvs.lock().insert(s.addr, s); + } +} + +impl>> svc::Service for ConnectTcp { + type Response = (support::io::Mock, Local); + type Error = io::Error; + type Future = future::Ready>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, t: T) -> Self::Future { + let Remote(ServerAddr(addr)) = t.param(); + let mut mock = self + .srvs + .lock() + .remove(&addr) + .expect("tried to connect to an unexpected address"); + + assert_eq!(addr, mock.addr); + let local = Local(ClientAddr(addr)); + future::ok::<_, support::io::Error>((mock.io.build(), local)) + } +} + +fn spawn_io( + client_hello: Vec, +) -> ( + io::DuplexStream, + tokio::task::JoinHandle>, +) { + let (mut client_io, server_io) = io::duplex(100); + let task = tokio::spawn(async move { + client_io.write_all(&client_hello).await?; + client_io.write_all(REQUEST).await?; + + let mut buf = String::with_capacity(100); + client_io.read_to_string(&mut buf).await?; + Ok(buf) + }); + (server_io, task) +} + +fn default_backend(addr: SocketAddr) -> client_policy::Backend { + use client_policy::{Backend, BackendDispatcher, EndpointMetadata, Meta, Queue}; + Backend { + meta: Meta::new_default("test"), + queue: Queue { + capacity: 100, + failfast_timeout: Duration::from_secs(10), + }, + dispatcher: BackendDispatcher::Forward(addr, EndpointMetadata::default()), + } +} + +fn sni_route(backend: client_policy::Backend, sni: sni::MatchSni) -> client_policy::tls::Route { + use client_policy::{ + tls::{Filter, Policy, Route}, + Meta, RouteBackend, RouteDistribution, + }; + use once_cell::sync::Lazy; + static NO_FILTERS: Lazy> = Lazy::new(|| Arc::new([])); + Route { + snis: vec![sni], + policy: Policy { + meta: Meta::new_default("test_route"), + filters: NO_FILTERS.clone(), + params: Default::default(), + distribution: RouteDistribution::FirstAvailable(Arc::new([RouteBackend { + filters: NO_FILTERS.clone(), + backend, + }])), + }, + } +} + +// generates a sample ClientHello TLS message for testing +fn generate_client_hello(sni: &str) -> Vec { + use tokio_rustls::rustls::{ + internal::msgs::{ + base::Payload, + codec::{Codec, Reader}, + enums::Compression, + handshake::{ + ClientExtension, ClientHelloPayload, HandshakeMessagePayload, HandshakePayload, + Random, ServerName, SessionId, + }, + message::{MessagePayload, PlainMessage}, + }, + CipherSuite, ContentType, HandshakeType, ProtocolVersion, + }; + + let sni = DnsName::try_from(sni.to_string()).unwrap(); + let sni = trim_hostname_trailing_dot_for_sni(&sni); + + let mut server_name_bytes = vec![]; + 0u8.encode(&mut server_name_bytes); // encode the type first + (sni.as_ref().len() as u16).encode(&mut server_name_bytes); // then the length as u16 + server_name_bytes.extend_from_slice(sni.as_ref().as_bytes()); // then the server name itself + + let server_name = + ServerName::read(&mut Reader::init(&server_name_bytes)).expect("Server name is valid"); + + let hs_payload = HandshakeMessagePayload { + typ: HandshakeType::ClientHello, + payload: HandshakePayload::ClientHello(ClientHelloPayload { + client_version: ProtocolVersion::TLSv1_2, + random: Random::from([0; 32]), + session_id: SessionId::read(&mut Reader::init(&[0])).unwrap(), + cipher_suites: vec![CipherSuite::TLS_NULL_WITH_NULL_NULL], + compression_methods: vec![Compression::Null], + extensions: vec![ClientExtension::ServerName(vec![server_name])], + }), + }; + + let mut hs_payload_bytes = Vec::default(); + MessagePayload::handshake(hs_payload).encode(&mut hs_payload_bytes); + + let message = PlainMessage { + typ: ContentType::Handshake, + version: ProtocolVersion::TLSv1_2, + payload: Payload::Owned(hs_payload_bytes), + }; + + message.into_unencrypted_opaque().encode() +} + +fn trim_hostname_trailing_dot_for_sni(dns_name: &DnsName<'_>) -> DnsName<'static> { + let dns_name_str = dns_name.as_ref(); + + // RFC6066: "The hostname is represented as a byte string using + // ASCII encoding without a trailing dot" + if dns_name_str.ends_with('.') { + let trimmed = &dns_name_str[0..dns_name_str.len() - 1]; + DnsName::try_from(trimmed).unwrap().to_owned() + } else { + dns_name.to_owned() + } +} diff --git a/linkerd/app/outbound/src/tls/logical/tests/basic.rs b/linkerd/app/outbound/src/tls/logical/tests/basic.rs new file mode 100644 index 0000000000..1eb1150e7c --- /dev/null +++ b/linkerd/app/outbound/src/tls/logical/tests/basic.rs @@ -0,0 +1,73 @@ +use super::*; +use crate::tls::Tls; +use linkerd_app_core::{ + svc::ServiceExt, + tls::{NewDetectRequiredSni, ServerName}, + trace, NameAddr, +}; +use linkerd_proxy_client_policy as client_policy; +use std::{net::SocketAddr, str::FromStr, sync::Arc}; +use tokio::sync::watch; + +#[tokio::test(flavor = "current_thread", start_paused = true)] +async fn routes() { + let _trace = trace::test::trace_init(); + + const AUTHORITY: &str = "logical.test.svc.cluster.local"; + const PORT: u16 = 666; + let addr = SocketAddr::new([192, 0, 2, 41].into(), PORT); + let dest: NameAddr = format!("{AUTHORITY}:{PORT}") + .parse::() + .expect("dest addr is valid"); + let resolve = support::resolver().endpoint_exists(dest.clone(), addr, Default::default()); + let (rt, _shutdown) = runtime(); + + let client_hello = generate_client_hello(AUTHORITY); + let (srv, io, rsp) = MockServer::new(addr, AUTHORITY, client_hello); + + let mut connect = ConnectTcp::default(); + connect.add_server(srv); + + let stack = Outbound::new(default_config(), rt, &mut Default::default()) + .with_stack(connect) + .push_tls_concrete(resolve) + .push_tls_logical() + .map_stack(|config, _rt, stk| { + stk.push_new_idle_cached(config.discovery_idle_timeout) + .push_map_target(|(sni, parent): (ServerName, _)| Tls { sni, parent }) + .push(NewDetectRequiredSni::layer(Duration::from_secs(1))) + .arc_new_clone_tcp() + }) + .into_inner(); + + let correct_backend = default_backend(addr); + let correct_route = sni_route( + correct_backend.clone(), + sni::MatchSni::Exact(AUTHORITY.into()), + ); + + let wrong_addr = SocketAddr::new([0, 0, 0, 0].into(), PORT); + let wrong_backend = default_backend(wrong_addr); + let wrong_route_1 = sni_route( + wrong_backend.clone(), + sni::MatchSni::from_str("foo").unwrap(), + ); + let wrong_route_2 = sni_route( + wrong_backend.clone(), + sni::MatchSni::from_str("*.test.svc.cluster.local").unwrap(), + ); + + let (_route_tx, routes) = watch::channel(Routes { + addr: addr.into(), + backends: Arc::new([correct_backend, wrong_backend]), + routes: Arc::new([correct_route, wrong_route_1, wrong_route_2]), + meta: ParentRef(client_policy::Meta::new_default("parent")), + }); + + let target = Target { num: 1, routes }; + let svc = stack.new_service(target); + + svc.oneshot(io).await.unwrap(); + let msg = rsp.await.unwrap().unwrap(); + assert_eq!(msg, AUTHORITY); +} diff --git a/linkerd/app/outbound/src/zone.rs b/linkerd/app/outbound/src/zone.rs new file mode 100644 index 0000000000..1c1a1ffa85 --- /dev/null +++ b/linkerd/app/outbound/src/zone.rs @@ -0,0 +1,41 @@ +use linkerd_app_core::{metrics::OutboundZoneLocality, transport::metrics::zone}; +use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue}; +use std::{fmt::Debug, hash::Hash}; + +pub type TcpZoneMetrics = zone::TcpZoneMetricsParams; +pub type TcpZoneLabels = zone::TcpZoneLabels; + +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, EncodeLabelSet)] +pub struct TcpZoneOpLabel { + op: TcpZoneOp, + zone_locality: OutboundZoneLocality, +} + +impl TcpZoneOpLabel { + fn new_send_labels(zone_locality: OutboundZoneLocality) -> Self { + Self { + op: TcpZoneOp::Send, + zone_locality, + } + } + + fn new_recv_labels(zone_locality: OutboundZoneLocality) -> Self { + Self { + op: TcpZoneOp::Recv, + zone_locality, + } + } +} + +pub fn tcp_zone_labels(zone_locality: OutboundZoneLocality) -> TcpZoneLabels { + zone::TcpZoneLabels { + recv_labels: TcpZoneOpLabel::new_recv_labels(zone_locality), + send_labels: TcpZoneOpLabel::new_send_labels(zone_locality), + } +} + +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, EncodeLabelValue)] +enum TcpZoneOp { + Send, + Recv, +} diff --git a/linkerd/app/src/env.rs b/linkerd/app/src/env.rs index 32ee39a092..bdb2986cf1 100644 --- a/linkerd/app/src/env.rs +++ b/linkerd/app/src/env.rs @@ -1,11 +1,12 @@ -use crate::{dns, gateway, identity, inbound, oc_collector, outbound, policy, spire}; +use crate::{dns, gateway, identity, inbound, outbound, policy, spire, trace_collector}; use linkerd_app_core::{ addr, config::*, control::{Config as ControlConfig, ControlAddr}, + http_tracing::CollectorProtocol, proxy::http::{h1, h2}, tls, - transport::{DualListenAddr, Keepalive, ListenAddr}, + transport::{DualListenAddr, Keepalive, ListenAddr, UserTimeout}, AddrMatch, Conditional, IpNet, }; use std::{ @@ -19,7 +20,7 @@ use tracing::{debug, error, info, warn}; mod control; mod http2; -mod opencensus; +mod trace; mod types; use self::types::*; @@ -94,6 +95,9 @@ pub enum ParseError { InvalidTrustAnchors, #[error("not a valid port policy: {0}")] InvalidPortPolicy(String), + + #[error("authority labels may only be set to 'unsafe'")] + NotAnAuthorityLabelsSetting, } // Environment variables to look at when loading the configuration @@ -105,6 +109,8 @@ pub const ENV_ADMIN_LISTEN_ADDR: &str = "LINKERD2_PROXY_ADMIN_LISTEN_ADDR"; pub const ENV_METRICS_RETAIN_IDLE: &str = "LINKERD2_PROXY_METRICS_RETAIN_IDLE"; +pub const ENV_SHUTDOWN_ENDPOINT_ENABLED: &str = "LINKERD2_PROXY_SHUTDOWN_ENDPOINT_ENABLED"; + const ENV_INGRESS_MODE: &str = "LINKERD2_PROXY_INGRESS_MODE"; const ENV_INBOUND_HTTP_QUEUE_CAPACITY: &str = "LINKERD2_PROXY_INBOUND_HTTP_QUEUE_CAPACITY"; @@ -127,6 +133,12 @@ const ENV_OUTBOUND_ACCEPT_KEEPALIVE: &str = "LINKERD2_PROXY_OUTBOUND_ACCEPT_KEEP const ENV_INBOUND_CONNECT_KEEPALIVE: &str = "LINKERD2_PROXY_INBOUND_CONNECT_KEEPALIVE"; const ENV_OUTBOUND_CONNECT_KEEPALIVE: &str = "LINKERD2_PROXY_OUTBOUND_CONNECT_KEEPALIVE"; +const ENV_INBOUND_ACCEPT_USER_TIMEOUT: &str = "LINKERD2_PROXY_INBOUND_ACCEPT_USER_TIMEOUT"; +const ENV_OUTBOUND_ACCEPT_USER_TIMEOUT: &str = "LINKERD2_PROXY_OUTBOUND_ACCEPT_USER_TIMEOUT"; + +const ENV_INBOUND_CONNECT_USER_TIMEOUT: &str = "LINKERD2_PROXY_INBOUND_CONNECT_USER_TIMEOUT"; +const ENV_OUTBOUND_CONNECT_USER_TIMEOUT: &str = "LINKERD2_PROXY_OUTBOUND_CONNECT_USER_TIMEOUT"; + const ENV_INBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: &str = "LINKERD2_PROXY_MAX_IDLE_CONNS_PER_ENDPOINT"; const ENV_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT: &str = "LINKERD2_PROXY_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT"; @@ -137,7 +149,18 @@ pub const ENV_OUTBOUND_MAX_IN_FLIGHT: &str = "LINKERD2_PROXY_OUTBOUND_MAX_IN_FLI const ENV_OUTBOUND_DISABLE_INFORMATIONAL_HEADERS: &str = "LINKERD2_PROXY_OUTBOUND_DISABLE_INFORMATIONAL_HEADERS"; -pub const ENV_TRACE_ATTRIBUTES_PATH: &str = "LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH"; +const ENV_OUTBOUND_METRICS_HOSTNAME_LABELS: &str = + "LINKERD2_PROXY_OUTBOUND_METRICS_HOSTNAME_LABELS"; +const ENV_INBOUND_METRICS_AUTHORITY_LABELS: &str = + "LINKERD2_PROXY_INBOUND_METRICS_AUTHORITY_LABELS"; + +const ENV_TRACE_ATTRIBUTES_PATH: &str = "LINKERD2_PROXY_TRACE_ATTRIBUTES_PATH"; +const ENV_TRACE_PROTOCOL: &str = "LINKERD2_PROXY_TRACE_PROTOCOL"; +const ENV_TRACE_SERVICE_NAME: &str = "LINKERD2_PROXY_TRACE_SERVICE_NAME"; +const ENV_TRACE_EXTRA_ATTRIBUTES: &str = "LINKERD2_PROXY_TRACE_EXTRA_ATTRIBUTES"; +// This doesn't have the LINKERD2_ prefix because it is a conventional env var from OpenTelemetry: +// https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/#general-sdk-configuration +const ENV_OTEL_TRACE_ATTRIBUTES: &str = "OTEL_RESOURCE_ATTRIBUTES"; /// Constrains which destination names may be used for profile/route discovery. /// @@ -230,7 +253,6 @@ pub const ENV_DESTINATION_PROFILE_INITIAL_TIMEOUT: &str = "LINKERD2_PROXY_DESTINATION_PROFILE_INITIAL_TIMEOUT"; pub const ENV_TAP_SVC_NAME: &str = "LINKERD2_PROXY_TAP_SVC_NAME"; -const ENV_RESOLV_CONF: &str = "LINKERD2_PROXY_RESOLV_CONF"; /// Configures a minimum value for the TTL of DNS lookups. /// @@ -282,8 +304,6 @@ const DEFAULT_OUTBOUND_CONNECT_BACKOFF: ExponentialBackoff = const DEFAULT_CONTROL_QUEUE_CAPACITY: usize = 100; const DEFAULT_CONTROL_FAILFAST_TIMEOUT: Duration = Duration::from_secs(10); -const DEFAULT_RESOLV_CONF: &str = "/etc/resolv.conf"; - const DEFAULT_INITIAL_STREAM_WINDOW_SIZE: u32 = 65_535; // Protocol default const DEFAULT_INITIAL_CONNECTION_WINDOW_SIZE: u32 = 1048576; // 1MB ~ 16 streams at capacity @@ -372,6 +392,16 @@ pub fn parse_config(strings: &S) -> Result let inbound_connect_keepalive = parse(strings, ENV_INBOUND_CONNECT_KEEPALIVE, parse_duration); let outbound_connect_keepalive = parse(strings, ENV_OUTBOUND_CONNECT_KEEPALIVE, parse_duration); + let inbound_accept_user_timeout = + parse(strings, ENV_INBOUND_ACCEPT_USER_TIMEOUT, parse_duration); + let outbound_accept_user_timeout = + parse(strings, ENV_OUTBOUND_ACCEPT_USER_TIMEOUT, parse_duration); + + let inbound_connect_user_timeout = + parse(strings, ENV_INBOUND_CONNECT_USER_TIMEOUT, parse_duration); + let outbound_connect_user_timeout = + parse(strings, ENV_OUTBOUND_CONNECT_USER_TIMEOUT, parse_duration); + let shutdown_grace_period = parse(strings, ENV_SHUTDOWN_GRACE_PERIOD, parse_duration); let inbound_discovery_idle_timeout = @@ -397,9 +427,9 @@ pub fn parse_config(strings: &S) -> Result let control_receive_limits = control::mk_receive_limits(strings)?; - // DNS + let shutdown_endpoint_enabled = parse(strings, ENV_SHUTDOWN_ENDPOINT_ENABLED, parse_bool); - let resolv_conf_path = strings.get(ENV_RESOLV_CONF); + // DNS let dns_min_ttl = parse(strings, ENV_DNS_MIN_TTL, parse_duration); let dns_max_ttl = parse(strings, ENV_DNS_MAX_TTL, parse_duration); @@ -408,7 +438,11 @@ pub fn parse_config(strings: &S) -> Result let hostname = strings.get(ENV_HOSTNAME); - let oc_attributes_file_path = strings.get(ENV_TRACE_ATTRIBUTES_PATH); + let trace_attributes_file_path = strings.get(ENV_TRACE_ATTRIBUTES_PATH); + let trace_extra_attributes = strings.get(ENV_TRACE_EXTRA_ATTRIBUTES); + let trace_otel_attributes = strings.get(ENV_OTEL_TRACE_ATTRIBUTES); + let trace_protocol = strings.get(ENV_TRACE_PROTOCOL); + let trace_service_name = strings.get(ENV_TRACE_SERVICE_NAME); let trace_collector_addr = parse_control_addr(strings, ENV_TRACE_COLLECTOR_SVC_BASE); @@ -473,9 +507,11 @@ pub fn parse_config(strings: &S) -> Result }; let keepalive = Keepalive(outbound_accept_keepalive?); + let user_timeout = UserTimeout(outbound_accept_user_timeout?); let server = ServerConfig { addr, keepalive, + user_timeout, http2: http2::parse_server(strings, "LINKERD2_PROXY_OUTBOUND_SERVER_HTTP2")?, }; let discovery_idle_timeout = @@ -483,6 +519,7 @@ pub fn parse_config(strings: &S) -> Result let max_idle = outbound_max_idle_per_endpoint?.unwrap_or(DEFAULT_OUTBOUND_MAX_IDLE_CONNS_PER_ENDPOINT); let keepalive = Keepalive(outbound_connect_keepalive?); + let user_timeout = UserTimeout(outbound_connect_user_timeout?); let connection_pool_timeout = parse( strings, ENV_OUTBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT, @@ -491,6 +528,7 @@ pub fn parse_config(strings: &S) -> Result let connect = ConnectConfig { keepalive, + user_timeout, timeout: outbound_connect_timeout?.unwrap_or(DEFAULT_OUTBOUND_CONNECT_TIMEOUT), backoff: parse_backoff( strings, @@ -561,9 +599,11 @@ pub fn parse_config(strings: &S) -> Result None, ); let keepalive = Keepalive(inbound_accept_keepalive?); + let user_timeout = UserTimeout(inbound_accept_user_timeout?); let server = ServerConfig { addr, keepalive, + user_timeout, http2: http2::parse_server(strings, "LINKERD2_PROXY_INBOUND_SERVER_HTTP2")?, }; let discovery_idle_timeout = @@ -577,8 +617,10 @@ pub fn parse_config(strings: &S) -> Result )? .unwrap_or(DEFAULT_INBOUND_HTTP1_CONNECTION_POOL_IDLE_TIMEOUT); let keepalive = Keepalive(inbound_connect_keepalive?); + let user_timeout = UserTimeout(inbound_connect_user_timeout?); let connect = ConnectConfig { keepalive, + user_timeout, timeout: inbound_connect_timeout?.unwrap_or(DEFAULT_INBOUND_CONNECT_TIMEOUT), backoff: parse_backoff( strings, @@ -601,6 +643,17 @@ pub fn parse_config(strings: &S) -> Result let detect_protocol_timeout = inbound_detect_timeout?.unwrap_or(DEFAULT_INBOUND_DETECT_TIMEOUT); + let unsafe_authority_labels = parse(strings, ENV_INBOUND_METRICS_AUTHORITY_LABELS, |s| { + if s.is_empty() { + Ok(false) + } else if s.eq_ignore_ascii_case("unsafe") { + Ok(true) + } else { + Err(ParseError::NotAnAuthorityLabelsSetting) + } + })? + .unwrap_or(false); + // Ensure that connections that directly target the inbound port are secured (unless // identity is disabled). let policy = { @@ -687,6 +740,7 @@ pub fn parse_config(strings: &S) -> Result failfast_timeout: inbound_http_failfast_timeout? .unwrap_or(DEFAULT_INBOUND_HTTP_FAILFAST_TIMEOUT), }, + unsafe_authority_labels, } }; @@ -752,11 +806,14 @@ pub fn parse_config(strings: &S) -> Result }, } }; + let export_hostname_labels = + parse(strings, ENV_OUTBOUND_METRICS_HOSTNAME_LABELS, parse_bool)?.unwrap_or(false); policy::Config { control, workload, limits, + export_hostname_labels, } }; @@ -765,6 +822,7 @@ pub fn parse_config(strings: &S) -> Result server: ServerConfig { addr: DualListenAddr(admin_listener_addr, None), keepalive: inbound.proxy.server.keepalive, + user_timeout: inbound.proxy.server.user_timeout, http2: inbound.proxy.server.http2.clone(), }, @@ -773,18 +831,16 @@ pub fn parse_config(strings: &S) -> Result // configuration. #[cfg(feature = "pprof")] enable_profiling: true, + enable_shutdown: shutdown_endpoint_enabled?.unwrap_or_default(), }; let dns = dns::Config { min_ttl: dns_min_ttl?, max_ttl: dns_max_ttl?, - resolv_conf_path: resolv_conf_path? - .unwrap_or_else(|| DEFAULT_RESOLV_CONF.into()) - .into(), }; - let oc_collector = match trace_collector_addr? { - None => oc_collector::Config::Disabled, + let trace_collector = match trace_collector_addr? { + None => trace_collector::Config::Disabled, Some(addr) => { let connect = if addr.addr.is_loopback() { inbound.proxy.connect.clone() @@ -796,16 +852,35 @@ pub fn parse_config(strings: &S) -> Result } else { outbound.http_request_queue.failfast_timeout }; - let attributes = oc_attributes_file_path + let mut attributes = trace_attributes_file_path .map(|path| match path.and_then(|p| p.parse::().ok()) { - Some(path) => opencensus::read_trace_attributes(&path), + Some(path) => trace::read_trace_attributes(&path), None => HashMap::new(), }) .unwrap_or_default(); + if let Ok(Some(attrs)) = trace_extra_attributes { + if !attrs.is_empty() { + attributes.extend(trace::parse_env_trace_attributes(&attrs)); + } + } + if let Ok(Some(attrs)) = trace_otel_attributes { + if !attrs.is_empty() { + attributes.extend(trace::parse_env_trace_attributes(&attrs)); + } + } + + let trace_protocol = trace_protocol + .map(|proto| proto.and_then(|p| p.parse::().ok())) + .ok() + .flatten() + .unwrap_or_default(); - oc_collector::Config::Enabled(Box::new(oc_collector::EnabledConfig { + let trace_service_name = trace_service_name.ok().flatten(); + + trace_collector::Config::Enabled(Box::new(trace_collector::EnabledConfig { attributes, hostname: hostname?, + service_name: trace_service_name, control: ControlConfig { addr, connect, @@ -814,6 +889,7 @@ pub fn parse_config(strings: &S) -> Result failfast_timeout, }, }, + kind: trace_protocol, })) } }; @@ -824,6 +900,7 @@ pub fn parse_config(strings: &S) -> Result config: ServerConfig { addr: DualListenAddr(addr, None), keepalive: inbound.proxy.server.keepalive, + user_timeout: inbound.proxy.server.user_timeout, http2: inbound.proxy.server.http2.clone(), }, }) @@ -892,7 +969,7 @@ pub fn parse_config(strings: &S) -> Result dns, dst, tap, - oc_collector, + trace_collector, policy, identity, outbound, @@ -1020,6 +1097,8 @@ fn parse_default_policy( ) .into()), + "audit" => Ok(inbound::policy::defaults::audit(detect_timeout).into()), + name => Err(ParseError::InvalidPortPolicy(name.to_string())), } } diff --git a/linkerd/app/src/env/opencensus.rs b/linkerd/app/src/env/trace.rs similarity index 95% rename from linkerd/app/src/env/opencensus.rs rename to linkerd/app/src/env/trace.rs index 98b9c4e3f0..ca1eab54a8 100644 --- a/linkerd/app/src/env/opencensus.rs +++ b/linkerd/app/src/env/trace.rs @@ -14,6 +14,10 @@ pub(super) fn read_trace_attributes(path: &std::path::Path) -> HashMap HashMap { + parse_attrs(attrs) +} + fn parse_attrs(attrs: &str) -> HashMap { attrs .lines() diff --git a/linkerd/app/src/env/types.rs b/linkerd/app/src/env/types.rs index dad1a52ab5..5205249717 100644 --- a/linkerd/app/src/env/types.rs +++ b/linkerd/app/src/env/types.rs @@ -113,9 +113,8 @@ pub(super) fn parse_port_range_set(s: &str) -> Result, Pa let low = parse_number::(low)?; if let Some(high) = parts.next() { let high = high.trim(); - let high = parse_number::(high).map_err(|e| { + let high = parse_number::(high).inspect_err(|_| { error!("Not a valid port range: {part}"); - e })?; if high < low { error!("Not a valid port range: {part}; {high} is greater than {low}"); diff --git a/linkerd/app/src/lib.rs b/linkerd/app/src/lib.rs index aa73893851..d182c2dc1f 100644 --- a/linkerd/app/src/lib.rs +++ b/linkerd/app/src/lib.rs @@ -7,10 +7,10 @@ pub mod dst; pub mod env; pub mod identity; -pub mod oc_collector; pub mod policy; pub mod spire; pub mod tap; +pub mod trace_collector; pub use self::metrics::Metrics; use futures::{future, Future, FutureExt}; @@ -29,12 +29,13 @@ pub use linkerd_app_core::{metrics, trace, transport::BindTcp, BUILD_INFO}; use linkerd_app_gateway as gateway; use linkerd_app_inbound::{self as inbound, Inbound}; use linkerd_app_outbound::{self as outbound, Outbound}; +pub use linkerd_workers::Workers; use std::pin::Pin; use tokio::{ sync::mpsc, time::{self, Duration}, }; -use tracing::{debug, info, info_span, Instrument}; +use tracing::{debug, error, info, info_span, Instrument}; /// Spawns a sidecar proxy. /// @@ -60,7 +61,7 @@ pub struct Config { pub policy: policy::Config, pub admin: admin::Config, pub tap: tap::Config, - pub oc_collector: oc_collector::Config, + pub trace_collector: trace_collector::Config, /// Grace period for graceful shutdowns. /// @@ -75,7 +76,7 @@ pub struct App { dst: ControlAddr, identity: identity::Identity, inbound_addr: Local, - oc_collector: oc_collector::OcCollector, + trace_collector: trace_collector::TraceCollector, outbound_addr: Local, outbound_addr_additional: Option>, start_proxy: Pin + Send + 'static>>, @@ -123,7 +124,7 @@ impl Config { policy, identity, inbound, - oc_collector, + trace_collector, outbound, gateway, tap, @@ -133,7 +134,7 @@ impl Config { let (metrics, report) = Metrics::new(admin.metrics_retain_idle); debug!("Building DNS client"); - let dns = dns.build(); + let dns = dns.build(registry.sub_registry_with_prefix("control_dns")); // Ensure that we've obtained a valid identity before binding any servers. debug!("Building Identity client"); @@ -143,7 +144,11 @@ impl Config { ); info_span!("identity").in_scope(|| { - identity.build(dns.resolver.clone(), metrics.control.clone(), id_metrics) + identity.build( + dns.resolver("identity"), + metrics.control.clone(), + id_metrics, + ) })? }; @@ -161,7 +166,7 @@ impl Config { let control_metrics = ControlMetrics::register(registry.sub_registry_with_prefix("control_destination")); let metrics = metrics.control.clone(); - let dns = dns.resolver.clone(); + let dns = dns.resolver("destination"); info_span!("dst").in_scope(|| { dst.build( dns, @@ -173,10 +178,11 @@ impl Config { }?; debug!("Building Policy client"); + let export_hostname_labels = policy.export_hostname_labels; let policies = { let control_metrics = ControlMetrics::register(registry.sub_registry_with_prefix("control_policy")); - let dns = dns.resolver.clone(); + let dns = dns.resolver("policy"); let metrics = metrics.control.clone(); info_span!("policy").in_scope(|| { policy.build( @@ -188,16 +194,27 @@ impl Config { }) }?; - debug!(config = ?oc_collector, "Building client"); - let oc_collector = { - let control_metrics = - ControlMetrics::register(registry.sub_registry_with_prefix("opencensus")); + debug!(config = ?trace_collector, "Building trace collector"); + let trace_collector = { + let control_metrics = if let Some(prefix) = trace_collector.metrics_prefix() { + ControlMetrics::register(registry.sub_registry_with_prefix(prefix)) + } else { + ControlMetrics::register(&mut prom::Registry::default()) + }; let identity = identity.receiver().new_client(); - let dns = dns.resolver; + let dns = dns.resolver("trace_collector"); let client_metrics = metrics.control.clone(); - let metrics = metrics.opencensus; - info_span!("opencensus").in_scope(|| { - oc_collector.build(identity, dns, metrics, control_metrics, client_metrics) + let otel_metrics = metrics.opentelemetry; + let oc_metrics = metrics.opencensus; + info_span!("tracing").in_scope(|| { + trace_collector.build( + identity, + dns, + oc_metrics, + otel_metrics, + control_metrics, + client_metrics, + ) }) }?; @@ -205,10 +222,14 @@ impl Config { identity: identity.receiver(), metrics: metrics.proxy, tap: tap.registry(), - span_sink: oc_collector.span_sink(), + span_sink: trace_collector.span_sink(), drain: drain_rx.clone(), }; - let inbound = Inbound::new(inbound, runtime.clone()); + let inbound = Inbound::new( + inbound, + runtime.clone(), + registry.sub_registry_with_prefix("inbound"), + ); let outbound = Outbound::new( outbound, runtime, @@ -227,6 +248,7 @@ impl Config { policies.client.clone(), policies.backoff, policies.limits, + export_hostname_labels, ); let dst_addr = dst.addr.clone(); @@ -277,7 +299,10 @@ impl Config { }) }; - metrics::process::register(registry.sub_registry_with_prefix("process")); + if let Err(error) = metrics::process::register(registry.sub_registry_with_prefix("process")) + { + error!(%error, "Failed to register process metrics"); + } registry.register("proxy_build_info", "Proxy build info", BUILD_INFO.metric()); let admin = { @@ -309,7 +334,7 @@ impl Config { drain: drain_tx, identity, inbound_addr, - oc_collector, + trace_collector, outbound_addr, outbound_addr_additional, start_proxy, @@ -369,10 +394,10 @@ impl App { self.identity.receiver().local_id().clone() } - pub fn opencensus_addr(&self) -> Option<&ControlAddr> { - match self.oc_collector { - oc_collector::OcCollector::Disabled { .. } => None, - oc_collector::OcCollector::Enabled(ref oc) => Some(&oc.addr), + pub fn tracing_addr(&self) -> Option<&ControlAddr> { + match self.trace_collector { + trace_collector::TraceCollector::Disabled { .. } => None, + crate::trace_collector::TraceCollector::Enabled(ref oc) => Some(&oc.addr), } } @@ -381,7 +406,7 @@ impl App { admin, drain, identity, - oc_collector, + trace_collector: collector, start_proxy, tap, .. @@ -446,8 +471,8 @@ impl App { tokio::spawn(serve.instrument(info_span!("tap").or_current())); } - if let oc_collector::OcCollector::Enabled(oc) = oc_collector { - tokio::spawn(oc.task.instrument(info_span!("opencensus").or_current())); + if let trace_collector::TraceCollector::Enabled(collector) = collector { + tokio::spawn(collector.task.instrument(info_span!("tracing"))); } // we don't care if the admin shutdown channel is diff --git a/linkerd/app/src/oc_collector.rs b/linkerd/app/src/oc_collector.rs deleted file mode 100644 index e8f25e23c5..0000000000 --- a/linkerd/app/src/oc_collector.rs +++ /dev/null @@ -1,103 +0,0 @@ -use linkerd_app_core::{ - control, dns, identity, metrics::ControlHttp as HttpMetrics, svc::NewService, Error, -}; -use linkerd_opencensus::{self as opencensus, metrics, proto}; -use std::{collections::HashMap, future::Future, pin::Pin, time::SystemTime}; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; -use tracing::Instrument; - -#[derive(Clone, Debug)] -pub enum Config { - Disabled, - Enabled(Box), -} - -#[derive(Clone, Debug)] -pub struct EnabledConfig { - pub control: control::Config, - pub attributes: HashMap, - pub hostname: Option, -} - -pub type Task = Pin + Send + 'static>>; - -pub type SpanSink = mpsc::Sender; - -pub enum OcCollector { - Disabled, - Enabled(Box), -} - -pub struct EnabledCollector { - pub addr: control::ControlAddr, - pub span_sink: SpanSink, - pub task: Task, -} - -impl Config { - const SPAN_BUFFER_CAPACITY: usize = 100; - const SERVICE_NAME: &'static str = "linkerd-proxy"; - - pub fn build( - self, - identity: identity::NewClient, - dns: dns::Resolver, - legacy_metrics: metrics::Registry, - control_metrics: control::Metrics, - client_metrics: HttpMetrics, - ) -> Result { - match self { - Config::Disabled => Ok(OcCollector::Disabled), - Config::Enabled(inner) => { - let addr = inner.control.addr.clone(); - let svc = inner - .control - .build(dns, client_metrics, control_metrics, identity) - .new_service(()); - - let (span_sink, spans_rx) = mpsc::channel(Self::SPAN_BUFFER_CAPACITY); - let spans_rx = ReceiverStream::new(spans_rx); - - let task = { - use self::proto::agent::common::v1 as oc; - - let node = oc::Node { - identifier: Some(oc::ProcessIdentifier { - host_name: inner.hostname.unwrap_or_default(), - pid: std::process::id(), - start_timestamp: Some(SystemTime::now().into()), - }), - service_info: Some(oc::ServiceInfo { - name: Self::SERVICE_NAME.to_string(), - }), - attributes: inner.attributes, - ..oc::Node::default() - }; - - let addr = addr.clone(); - Box::pin( - opencensus::export_spans(svc, node, spans_rx, legacy_metrics).instrument( - tracing::debug_span!("opencensus", peer.addr = %addr).or_current(), - ), - ) - }; - - Ok(OcCollector::Enabled(Box::new(EnabledCollector { - addr, - task, - span_sink, - }))) - } - } - } -} - -impl OcCollector { - pub fn span_sink(&self) -> Option { - match self { - OcCollector::Disabled => None, - OcCollector::Enabled(inner) => Some(inner.span_sink.clone()), - } - } -} diff --git a/linkerd/app/src/policy.rs b/linkerd/app/src/policy.rs index 4f93d76473..218ebe6b2b 100644 --- a/linkerd/app/src/policy.rs +++ b/linkerd/app/src/policy.rs @@ -15,6 +15,7 @@ pub struct Config { pub control: control::Config, pub workload: String, pub limits: ReceiveLimits, + pub export_hostname_labels: bool, } /// Handles to policy service clients. diff --git a/linkerd/app/src/spire.rs b/linkerd/app/src/spire.rs index 5e87d50865..d7872fef25 100644 --- a/linkerd/app/src/spire.rs +++ b/linkerd/app/src/spire.rs @@ -1,12 +1,12 @@ use linkerd_app_core::{exp_backoff::ExponentialBackoff, Error}; use std::sync::Arc; -use tokio::net::UnixStream; use tokio::sync::watch; -use tonic::transport::{Endpoint, Uri}; pub use linkerd_app_core::identity::client::spire as client; +#[cfg(target_os = "linux")] const UNIX_PREFIX: &str = "unix:"; +#[cfg(target_os = "linux")] const TONIC_DEFAULT_URI: &str = "http://[::]:50051"; #[derive(Clone, Debug)] @@ -17,17 +17,27 @@ pub struct Config { // Connects to SPIRE workload API via Unix Domain Socket pub struct Client { + #[cfg_attr(not(target_os = "linux"), allow(dead_code))] config: Config, } // === impl Client === +#[cfg(target_os = "linux")] impl From for Client { fn from(config: Config) -> Self { Self { config } } } +#[cfg(not(target_os = "linux"))] +impl From for Client { + fn from(_: Config) -> Self { + panic!("Spire is supported on Linux only") + } +} + +#[cfg(target_os = "linux")] impl tower::Service<()> for Client { type Response = tonic::Response>; type Error = Error; @@ -44,6 +54,9 @@ impl tower::Service<()> for Client { let socket = self.config.socket_addr.clone(); let backoff = self.config.backoff; Box::pin(async move { + use tokio::net::UnixStream; + use tonic::transport::{Endpoint, Uri}; + // Strip the 'unix:' prefix for tonic compatibility. let stripped_path = socket .strip_prefix(UNIX_PREFIX) @@ -55,7 +68,8 @@ impl tower::Service<()> for Client { // as the request to the `MakeConnection`. let chan = Endpoint::try_from(TONIC_DEFAULT_URI)? .connect_with_connector(tower::util::service_fn(move |_: Uri| { - UnixStream::connect(stripped_path.clone()) + use futures::TryFutureExt; + UnixStream::connect(stripped_path.clone()).map_ok(hyper_util::rt::TokioIo::new) })) .await?; @@ -66,3 +80,21 @@ impl tower::Service<()> for Client { }) } } + +#[cfg(not(target_os = "linux"))] +impl tower::Service<()> for Client { + type Response = tonic::Response>; + type Error = Error; + type Future = futures::future::BoxFuture<'static, Result>; + + fn poll_ready( + &mut self, + _cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + unimplemented!("Spire is supported on Linux only") + } + + fn call(&mut self, _req: ()) -> Self::Future { + unimplemented!("Spire is supported on Linux only") + } +} diff --git a/linkerd/app/src/trace_collector.rs b/linkerd/app/src/trace_collector.rs new file mode 100644 index 0000000000..b173848c61 --- /dev/null +++ b/linkerd/app/src/trace_collector.rs @@ -0,0 +1,117 @@ +use linkerd_app_core::{ + control, dns, + http_tracing::{CollectorProtocol, SpanSink}, + identity, + metrics::ControlHttp as HttpMetrics, + opencensus, opentelemetry, + svc::NewService, +}; +use linkerd_error::Error; +use otel_collector::OtelCollectorAttributes; +use std::{collections::HashMap, future::Future, pin::Pin}; + +pub mod oc_collector; +pub mod otel_collector; + +const SPAN_BUFFER_CAPACITY: usize = 100; +const SERVICE_NAME: &str = "linkerd-proxy"; + +#[derive(Clone, Debug)] +pub enum Config { + Disabled, + Enabled(Box), +} + +#[derive(Clone, Debug)] +pub struct EnabledConfig { + pub control: control::Config, + pub attributes: HashMap, + pub hostname: Option, + pub service_name: Option, + pub kind: CollectorProtocol, +} + +pub type Task = Pin + Send + 'static>>; + +pub enum TraceCollector { + Disabled, + Enabled(Box), +} + +pub struct EnabledCollector { + pub addr: control::ControlAddr, + pub kind: CollectorProtocol, + pub span_sink: SpanSink, + pub task: Task, +} + +impl TraceCollector { + pub fn span_sink(&self) -> Option { + match self { + TraceCollector::Disabled => None, + TraceCollector::Enabled(inner) => Some(inner.span_sink.clone()), + } + } +} + +impl Config { + pub fn metrics_prefix(&self) -> Option<&'static str> { + match self { + Config::Disabled => None, + Config::Enabled(config) => match config.kind { + CollectorProtocol::OpenCensus => Some("opencensus"), + CollectorProtocol::OpenTelemetry => Some("opentelemetry"), + }, + } + } + + pub fn build( + self, + identity: identity::NewClient, + dns: dns::Resolver, + legacy_oc_metrics: opencensus::metrics::Registry, + legacy_otel_metrics: opentelemetry::metrics::Registry, + control_metrics: control::Metrics, + client_metrics: HttpMetrics, + ) -> Result { + match self { + Config::Disabled => Ok(TraceCollector::Disabled), + Config::Enabled(inner) => { + let addr = inner.control.addr.clone(); + let svc = inner + .control + .build(dns, client_metrics, control_metrics, identity) + .new_service(()); + let svc_name = inner + .service_name + .unwrap_or_else(|| SERVICE_NAME.to_string()); + + let collector = match inner.kind { + CollectorProtocol::OpenCensus => oc_collector::create_collector( + addr.clone(), + inner.hostname, + svc_name, + inner.attributes, + svc, + legacy_oc_metrics, + ), + CollectorProtocol::OpenTelemetry => { + let attributes = OtelCollectorAttributes { + hostname: inner.hostname, + service_name: svc_name, + extra: inner.attributes, + }; + otel_collector::create_collector( + addr.clone(), + attributes, + svc, + legacy_otel_metrics, + ) + } + }; + + Ok(TraceCollector::Enabled(Box::new(collector))) + } + } + } +} diff --git a/linkerd/app/src/trace_collector/oc_collector.rs b/linkerd/app/src/trace_collector/oc_collector.rs new file mode 100644 index 0000000000..96d92856ae --- /dev/null +++ b/linkerd/app/src/trace_collector/oc_collector.rs @@ -0,0 +1,57 @@ +use crate::trace_collector::EnabledCollector; +use linkerd_app_core::{ + control::ControlAddr, http_tracing::CollectorProtocol, proxy::http::Body, Error, +}; +use linkerd_opencensus::{self as opencensus, metrics, proto}; +use std::{collections::HashMap, time::SystemTime}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{body::BoxBody, client::GrpcService}; +use tracing::Instrument; + +pub(super) fn create_collector( + addr: ControlAddr, + hostname: Option, + service_name: String, + attributes: HashMap, + svc: S, + legacy_metrics: metrics::Registry, +) -> EnabledCollector +where + S: GrpcService + Clone + Send + 'static, + S::Error: Into, + S::Future: Send, + S::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, +{ + let (span_sink, spans_rx) = mpsc::channel(crate::trace_collector::SPAN_BUFFER_CAPACITY); + let spans_rx = ReceiverStream::new(spans_rx); + + let task = { + use self::proto::agent::common::v1 as oc; + + let node = oc::Node { + identifier: Some(oc::ProcessIdentifier { + host_name: hostname.unwrap_or_default(), + pid: std::process::id(), + start_timestamp: Some(SystemTime::now().into()), + }), + service_info: Some(oc::ServiceInfo { name: service_name }), + attributes, + ..oc::Node::default() + }; + + let addr = addr.clone(); + Box::pin( + opencensus::export_spans(svc, node, spans_rx, legacy_metrics) + .instrument(tracing::debug_span!("opencensus", peer.addr = %addr).or_current()), + ) + }; + + EnabledCollector { + addr, + task, + span_sink, + kind: CollectorProtocol::OpenCensus, + } +} diff --git a/linkerd/app/src/trace_collector/otel_collector.rs b/linkerd/app/src/trace_collector/otel_collector.rs new file mode 100644 index 0000000000..052cb71007 --- /dev/null +++ b/linkerd/app/src/trace_collector/otel_collector.rs @@ -0,0 +1,125 @@ +use super::EnabledCollector; +use linkerd_app_core::{ + control::ControlAddr, http_tracing::CollectorProtocol, proxy::http::Body, Error, +}; +use linkerd_opentelemetry::{ + self as opentelemetry, metrics, + proto::{ + proto::common::v1::{any_value, AnyValue, KeyValue}, + transform::common::ResourceAttributesWithSchema, + }, +}; +use std::{ + collections::HashMap, + time::{SystemTime, UNIX_EPOCH}, +}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{body::BoxBody, client::GrpcService}; +use tracing::Instrument; + +pub(super) struct OtelCollectorAttributes { + pub hostname: Option, + pub service_name: String, + pub extra: HashMap, +} + +pub(super) fn create_collector( + addr: ControlAddr, + attributes: OtelCollectorAttributes, + svc: S, + legacy_metrics: metrics::Registry, +) -> EnabledCollector +where + S: GrpcService + Clone + Send + 'static, + S::Error: Into, + S::Future: Send, + S::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, +{ + let (span_sink, spans_rx) = mpsc::channel(crate::trace_collector::SPAN_BUFFER_CAPACITY); + let spans_rx = ReceiverStream::new(spans_rx); + + let mut resources = ResourceAttributesWithSchema::default(); + + resources + .attributes + .0 + .push(attributes.service_name.with_key("service.name")); + resources + .attributes + .0 + .push((std::process::id() as i64).with_key("process.pid")); + + resources.attributes.0.push( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_secs() as i64) + .unwrap_or_else(|e| -(e.duration().as_secs() as i64)) + .with_key("process.start_timestamp"), + ); + resources.attributes.0.push( + attributes + .hostname + .unwrap_or_default() + .with_key("host.name"), + ); + + resources.attributes.0.extend( + attributes + .extra + .into_iter() + .map(|(key, value)| value.with_key(&key)), + ); + + let addr = addr.clone(); + let task = Box::pin( + opentelemetry::export_spans(svc, spans_rx, resources, legacy_metrics) + .instrument(tracing::debug_span!("opentelemetry", peer.addr = %addr).or_current()), + ); + + EnabledCollector { + addr, + task, + span_sink, + kind: CollectorProtocol::OpenTelemetry, + } +} + +trait IntoAnyValue +where + Self: Sized, +{ + fn into_any_value(self) -> AnyValue; + + fn with_key(self, key: &str) -> KeyValue { + KeyValue { + key: key.to_string(), + value: Some(self.into_any_value()), + } + } +} + +impl IntoAnyValue for String { + fn into_any_value(self) -> AnyValue { + AnyValue { + value: Some(any_value::Value::StringValue(self)), + } + } +} + +impl IntoAnyValue for &str { + fn into_any_value(self) -> AnyValue { + AnyValue { + value: Some(any_value::Value::StringValue(self.to_string())), + } + } +} + +impl IntoAnyValue for i64 { + fn into_any_value(self) -> AnyValue { + AnyValue { + value: Some(any_value::Value::IntValue(self)), + } + } +} diff --git a/linkerd/app/test/Cargo.toml b/linkerd/app/test/Cargo.toml index 1afee2908c..a9c52fef4e 100644 --- a/linkerd/app/test/Cargo.toml +++ b/linkerd/app/test/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-app-test" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Proxy test helpers """ @@ -14,10 +14,12 @@ client-policy = ["linkerd-proxy-client-policy", "tonic", "linkerd-http-route"] [dependencies] futures = { version = "0.3", default-features = false } -h2 = "0.3" -http = "0.2" -http-body = "0.4" -hyper = { version = "0.14", features = ["http1", "http2"] } +h2 = { workspace = true } +http = { workspace = true } +http-body = { workspace = true } +http-body-util = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } +hyper-util = { workspace = true, features = ["tokio"] } linkerd-app-core = { path = "../core" } linkerd-http-route = { path = "../../http/route", optional = true } linkerd-identity = { path = "../../identity" } @@ -29,10 +31,10 @@ regex = "1" tokio = { version = "1", features = ["io-util", "net", "rt", "sync"] } tokio-test = "0.4" tokio-stream = { version = "0.1", features = ["sync"] } -tonic = { version = "0.10", default-features = false, optional = true } -tower = { version = "0.4", default-features = false } +tonic = { workspace = true, default-features = false, optional = true } +tower = { workspace = true, default-features = false } tracing = "0.1" -thiserror = "1" +thiserror = "2" [dependencies.tracing-subscriber] version = "0.3" diff --git a/linkerd/app/test/src/connect.rs b/linkerd/app/test/src/connect.rs index 4ac7a91916..dcc789bafc 100644 --- a/linkerd/app/test/src/connect.rs +++ b/linkerd/app/test/src/connect.rs @@ -1,3 +1,4 @@ +use crate::io; use linkerd_app_core::{ svc::{Param, Service}, transport::{ClientAddr, Local, Remote, ServerAddr}, @@ -14,11 +15,6 @@ use std::{ }; use tracing::instrument::{Instrument, Instrumented}; -mod io { - pub use linkerd_app_core::io::*; - pub use tokio_test::io::*; -} - type ConnectFn = Box ConnectFuture + Send>; pub type ConnectFuture = diff --git a/linkerd/app/test/src/http_util.rs b/linkerd/app/test/src/http_util.rs index 6e316edb1e..b728644b6f 100644 --- a/linkerd/app/test/src/http_util.rs +++ b/linkerd/app/test/src/http_util.rs @@ -1,170 +1,136 @@ use crate::{ - app_core::{svc, tls, Error}, + app_core::{ + svc::{self, http::TokioExecutor}, + Error, + }, io, ContextError, }; -use futures::FutureExt; -use hyper::{ - body::HttpBody, - client::conn::{Builder as ClientBuilder, SendRequest}, - Body, Request, Response, -}; -use parking_lot::Mutex; -use std::{future::Future, sync::Arc}; -use tokio::task::JoinHandle; -use tower::{util::ServiceExt, Service}; +use http_body::Body; +use tokio::task::JoinSet; +use tower::ServiceExt; use tracing::Instrument; -pub struct Server { - settings: hyper::server::conn::Http, - f: HandleFuture, -} - -type HandleFuture = Box) -> Result, Error>) + Send>; - type BoxServer = svc::BoxTcp; -impl Default for Server { - fn default() -> Self { - Self { - settings: hyper::server::conn::Http::new(), - f: Box::new(|_| { - Ok(Response::builder() - .status(http::status::StatusCode::NOT_FOUND) - .body(Body::empty()) - .expect("known status code is fine")) - }), - } - } -} - -pub async fn run_proxy(mut server: BoxServer) -> (io::DuplexStream, JoinHandle>) { +/// Connects a client and server, running a proxy between them. +/// +/// Returns a tuple containing (1) a [`SendRequest`][send] that can be used to transmit a +/// request and await a response, and (2) a [`JoinSet`] running background tasks. +/// +/// [send]: hyper::client::conn::http1::SendRequest +pub async fn connect_and_accept_http1( + client_settings: &mut hyper::client::conn::http1::Builder, + server: BoxServer, +) -> ( + hyper::client::conn::http1::SendRequest, + JoinSet>, +) +where + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + tracing::info!(settings = ?client_settings, "connecting client with"); let (client_io, server_io) = io::duplex(4096); - let f = server - .ready() - .await - .expect("proxy server failed to become ready") - .call(server_io); - - let proxy = async move { - let res = f.await.map_err(Into::into); - drop(server); - tracing::debug!("dropped server"); - tracing::info!(?res, "proxy serve task complete"); - res.map(|_| ()) - } - .instrument(tracing::info_span!("proxy")); - (client_io, tokio::spawn(proxy)) -} -pub async fn connect_client( - client_settings: &mut ClientBuilder, - io: io::DuplexStream, -) -> (SendRequest, JoinHandle>) { let (client, conn) = client_settings - .handshake(io) + .handshake(hyper_util::rt::TokioIo::new(client_io)) .await .expect("Client must connect"); - let client_bg = conn - .map(|res| { - tracing::info!(?res, "Client background complete"); - res.map_err(Into::into) - }) - .instrument(tracing::info_span!("client_bg")); - (client, tokio::spawn(client_bg)) + + let mut bg = tokio::task::JoinSet::new(); + bg.spawn( + async move { + server + .oneshot(server_io) + .await + .map_err(ContextError::ctx("proxy background task failed"))?; + tracing::info!("proxy serve task complete"); + Ok(()) + } + .instrument(tracing::info_span!("proxy")), + ); + bg.spawn( + async move { + conn.await + .map_err(ContextError::ctx("client background task failed")) + .map_err(Error::from)?; + tracing::info!("client background complete"); + Ok(()) + } + .instrument(tracing::info_span!("client_bg")), + ); + + (client, bg) } -pub async fn connect_and_accept( - client_settings: &mut ClientBuilder, +/// Connects a client and server, running a proxy between them. +/// +/// Returns a tuple containing (1) a [`SendRequest`][send] that can be used to transmit a +/// request and await a response, and (2) a [`JoinSet`] running background tasks. +/// +/// [send]: hyper::client::conn::http2::SendRequest +pub async fn connect_and_accept_http2( + client_settings: &mut hyper::client::conn::http2::Builder, server: BoxServer, -) -> (SendRequest, impl Future>) { +) -> ( + hyper::client::conn::http2::SendRequest, + JoinSet>, +) +where + B: Body + Unpin + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ tracing::info!(settings = ?client_settings, "connecting client with"); - let (client_io, proxy) = run_proxy(server).await; - let (client, client_bg) = connect_client(client_settings, client_io).await; - let bg = async move { - proxy - .await - .expect("proxy background task panicked") - .map_err(ContextError::ctx("proxy background task failed"))?; - client_bg - .await - .expect("client background task panicked") - .map_err(ContextError::ctx("client background task failed"))?; - Ok(()) - }; - (client, bg) -} + let (client_io, server_io) = io::duplex(4096); -#[tracing::instrument(skip(client))] -pub async fn http_request( - client: &mut SendRequest, - request: Request, -) -> Result, Error> { - let rsp = client - .ready() - .await - .map_err(ContextError::ctx("HTTP client poll_ready failed"))? - .call(request) + let (client, conn) = client_settings + .handshake(hyper_util::rt::TokioIo::new(client_io)) .await - .map_err(ContextError::ctx("HTTP client request failed"))?; + .expect("Client must connect"); - tracing::info!(?rsp); + let mut bg = tokio::task::JoinSet::new(); + bg.spawn( + async move { + server + .oneshot(server_io) + .await + .map_err(ContextError::ctx("proxy background task failed"))?; + tracing::info!("proxy serve task complete"); + Ok(()) + } + .instrument(tracing::info_span!("proxy")), + ); + bg.spawn( + async move { + conn.await + .map_err(ContextError::ctx("client background task failed")) + .map_err(Error::from)?; + tracing::info!("client background complete"); + Ok(()) + } + .instrument(tracing::info_span!("client_bg")), + ); - Ok(rsp) + (client, bg) } +/// Collects a request or response body, returning it as a [`String`]. pub async fn body_to_string(body: T) -> Result where - T: HttpBody, + T: Body, T::Error: Into, { - let body = hyper::body::to_bytes(body) + use http_body_util::BodyExt; + let bytes = body + .collect() .await - .map_err(ContextError::ctx("HTTP response body stream failed"))?; - let body = std::str::from_utf8(&body[..]) - .map_err(ContextError::ctx("converting body to string failed"))? - .to_owned(); - Ok(body) -} - -impl Server { - pub fn http1(mut self) -> Self { - self.settings.http1_only(true); - self - } + .map(http_body_util::Collected::to_bytes) + .map_err(ContextError::ctx("HTTP response body stream failed"))? + .to_vec(); - pub fn http2(mut self) -> Self { - self.settings.http2_only(true); - self - } - - pub fn new(mut f: impl (FnMut(Request) -> Response) + Send + 'static) -> Self { - Self { - f: Box::new(move |req| Ok::<_, Error>(f(req))), - ..Default::default() - } - } - - pub fn run(self) -> impl (FnMut(E) -> io::Result) + Send + 'static - where - E: std::fmt::Debug, - E: svc::Param, - { - let Self { f, settings } = self; - let f = Arc::new(Mutex::new(f)); - move |endpoint| { - let span = tracing::debug_span!("server::run", ?endpoint).or_current(); - let _e = span.enter(); - let f = f.clone(); - let (client_io, server_io) = crate::io::duplex(4096); - let svc = hyper::service::service_fn(move |request: Request| { - let f = f.clone(); - async move { - tracing::info!(?request); - f.lock()(request) - } - }); - tokio::spawn(settings.serve_connection(server_io, svc).in_current_span()); - Ok(io::BoxedIo::new(client_io)) - } - } + String::from_utf8(bytes) + .map_err(ContextError::ctx("converting body to string failed")) + .map_err(Into::into) } diff --git a/linkerd/app/test/src/lib.rs b/linkerd/app/test/src/lib.rs index e49cb35b6c..1e6ec51e8b 100644 --- a/linkerd/app/test/src/lib.rs +++ b/linkerd/app/test/src/lib.rs @@ -14,6 +14,10 @@ pub use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; pub use tokio::sync::oneshot; pub use tower::Service; pub use tracing::*; + +/// I/O facilities for tests. +/// +/// Provides [`AsyncRead`] and [`AsyncWrite`] types via [`tokio_test`], and via [`linkerd_io`]. pub mod io { pub use linkerd_app_core::io::*; pub use tokio_test::io::*; diff --git a/linkerd/app/test/src/resolver.rs b/linkerd/app/test/src/resolver.rs index 5b5ff17d36..239703ff91 100644 --- a/linkerd/app/test/src/resolver.rs +++ b/linkerd/app/test/src/resolver.rs @@ -136,9 +136,8 @@ impl, E> tower::Service for Dst { .endpoints .lock() .remove(&addr) - .map(|x| { + .inspect(|_| { tracing::trace!("found endpoint for target"); - x }) .unwrap_or_else(|| { tracing::debug!(?addr, "no endpoint configured for"); @@ -188,9 +187,8 @@ impl Profiles { .endpoints .lock() .remove(&addr) - .map(|x| { + .inspect(|_| { tracing::trace!("found endpoint for addr"); - x }) .unwrap_or_else(|| { tracing::debug!(?addr, "no endpoint configured for"); diff --git a/linkerd/app/test/src/resolver/client_policy.rs b/linkerd/app/test/src/resolver/client_policy.rs index c8860425cf..f57d3f9ced 100644 --- a/linkerd/app/test/src/resolver/client_policy.rs +++ b/linkerd/app/test/src/resolver/client_policy.rs @@ -72,12 +72,10 @@ impl ClientPolicies { policy: http::Policy { meta: Meta::new_default("default"), filters: Arc::new([]), - failure_policy: Default::default(), - request_timeout: None, + params: Default::default(), distribution: RouteDistribution::FirstAvailable(Arc::new([RouteBackend { filters: Arc::new([]), backend: backend.clone(), - request_timeout: None, }])), }, }], @@ -94,16 +92,16 @@ impl ClientPolicies { failure_accrual: Default::default(), }, opaque: opaq::Opaque { - policy: Some(opaq::Policy { - meta: Meta::new_default("default"), - filters: Arc::new([]), - failure_policy: Default::default(), - request_timeout: None, - distribution: RouteDistribution::FirstAvailable(Arc::new([RouteBackend { + routes: Some(opaq::Route { + policy: opaq::Policy { + meta: Meta::new_default("default"), filters: Arc::new([]), - backend: backend.clone(), - request_timeout: None, - }])), + params: Default::default(), + distribution: RouteDistribution::FirstAvailable(Arc::new([RouteBackend { + filters: Arc::new([]), + backend: backend.clone(), + }])), + }, }), }, }; @@ -123,9 +121,8 @@ impl ClientPolicies { .endpoints .lock() .remove(&addr) - .map(|x| { + .inspect(|_| { tracing::trace!("found policy for addr"); - x }) .ok_or_else(|| { tracing::debug!(?addr, "no policy configured for"); diff --git a/linkerd/conditional/Cargo.toml b/linkerd/conditional/Cargo.toml index 6eb902949d..c32108a0ff 100644 --- a/linkerd/conditional/Cargo.toml +++ b/linkerd/conditional/Cargo.toml @@ -1,8 +1,9 @@ [package] name = "linkerd-conditional" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] diff --git a/linkerd/conditional/src/lib.rs b/linkerd/conditional/src/lib.rs index 15a13f9a48..da81040a94 100644 --- a/linkerd/conditional/src/lib.rs +++ b/linkerd/conditional/src/lib.rs @@ -86,7 +86,7 @@ impl Conditional { } } -impl<'a, C, R> Conditional<&'a C, R> +impl Conditional<&C, R> where C: Clone, { diff --git a/linkerd/detect/Cargo.toml b/linkerd/detect/Cargo.toml deleted file mode 100644 index 18392fd10c..0000000000 --- a/linkerd/detect/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "linkerd-detect" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false - -[dependencies] -async-trait = "0.1" -bytes = "1" -linkerd-error = { path = "../error" } -linkerd-io = { path = "../io" } -linkerd-stack = { path = "../stack" } -tokio = { version = "1", features = ["time"] } -thiserror = "1" -tower = "0.4" -tracing = "0.1" diff --git a/linkerd/detect/src/lib.rs b/linkerd/detect/src/lib.rs deleted file mode 100644 index 5a7f669051..0000000000 --- a/linkerd/detect/src/lib.rs +++ /dev/null @@ -1,187 +0,0 @@ -#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] -#![forbid(unsafe_code)] - -use bytes::BytesMut; -use linkerd_error::Error; -use linkerd_io as io; -use linkerd_stack::{layer, ExtractParam, NewService}; -use std::{ - fmt, - future::Future, - pin::Pin, - result::Result as StdResult, - task::{Context, Poll}, -}; -use thiserror::Error; -use tokio::time; -use tower::util::ServiceExt; -use tracing::{debug, info, trace}; - -#[async_trait::async_trait] -pub trait Detect: Clone + Send + Sync + 'static { - type Protocol: Send; - - async fn detect( - &self, - io: &mut I, - buf: &mut BytesMut, - ) -> StdResult, Error>; -} - -pub type Result

= StdResult, DetectTimeoutError

>; - -#[derive(Error)] -#[error("{} protocol detection timed out after {0:?}", std::any::type_name::

())] -pub struct DetectTimeoutError

(time::Duration, std::marker::PhantomData

); - -#[derive(Copy, Clone, Debug)] -pub struct Config { - pub detect: D, - pub capacity: usize, - pub timeout: time::Duration, -} - -#[derive(Copy, Clone, Debug)] -pub struct NewDetectService { - inner: N, - params: P, - _detect: std::marker::PhantomData D>, -} - -#[derive(Copy, Clone, Debug)] -pub struct DetectService { - config: Config, - inner: N, -} - -pub fn allow_timeout

(p: Result

) -> Option

{ - match p { - Ok(p) => p, - Err(e) => { - info!("Continuing after timeout: {}", e); - None - } - } -} - -// === impl Config === - -impl Config { - const DEFAULT_CAPACITY: usize = 1024; - - pub fn from_timeout(timeout: time::Duration) -> Self { - Self { - detect: D::default(), - capacity: Self::DEFAULT_CAPACITY, - timeout, - } - } -} - -// === impl NewDetectService === - -impl NewDetectService { - pub fn new(params: P, inner: N) -> Self { - Self { - inner, - params, - _detect: std::marker::PhantomData, - } - } - - pub fn layer(params: P) -> impl layer::Layer + Clone - where - P: Clone, - { - layer::mk(move |inner| Self::new(params.clone(), inner)) - } -} - -impl NewService for NewDetectService -where - P: ExtractParam, T>, - N: NewService, -{ - type Service = DetectService; - - fn new_service(&self, target: T) -> Self::Service { - let config = self.params.extract_param(&target); - DetectService { - config, - inner: self.inner.new_service(target), - } - } -} - -// === impl DetectService === - -impl tower::Service for DetectService -where - I: Send + 'static, - D: Detect, - D::Protocol: std::fmt::Debug, - N: NewService, Service = NSvc> + Clone + Send + 'static, - NSvc: tower::Service, Response = ()> + Send, - NSvc::Error: Into, - NSvc::Future: Send, -{ - type Response = (); - type Error = Error; - type Future = Pin> + Send + 'static>>; - - #[inline] - fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, mut io: I) -> Self::Future { - let Config { - detect, - capacity, - timeout, - } = self.config.clone(); - let inner = self.inner.clone(); - Box::pin(async move { - trace!(%capacity, ?timeout, "Starting protocol detection"); - let t0 = time::Instant::now(); - - let mut buf = BytesMut::with_capacity(capacity); - let detected = match time::timeout(timeout, detect.detect(&mut io, &mut buf)).await { - Ok(Ok(protocol)) => { - debug!( - ?protocol, - elapsed = ?time::Instant::now().saturating_duration_since(t0), - "Detected protocol", - ); - Ok(protocol) - } - Err(_) => Err(DetectTimeoutError(timeout, std::marker::PhantomData)), - Ok(Err(e)) => return Err(e), - }; - - trace!("Dispatching connection"); - let svc = inner.new_service(detected); - let mut svc = svc.ready_oneshot().await.map_err(Into::into)?; - svc.call(io::PrefixedIo::new(buf.freeze(), io)) - .await - .map_err(Into::into)?; - - trace!("Connection completed"); - // Hold the service until it's done being used so that cache - // idleness is reset. - drop(svc); - - Ok(()) - }) - } -} - -// === impl DetectTimeoutError === - -impl

fmt::Debug for DetectTimeoutError

{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_tuple(std::any::type_name::()) - .field(&self.0) - .finish() - } -} diff --git a/linkerd/distribute/Cargo.toml b/linkerd/distribute/Cargo.toml index 9911b4568a..910ab049c9 100644 --- a/linkerd/distribute/Cargo.toml +++ b/linkerd/distribute/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "linkerd-distribute" -version = "0.1.0" -edition = "2021" -license = "Apache-2.0" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] ahash = "0.8" -indexmap = "2" linkerd-stack = { path = "../stack" } parking_lot = "0.12" -rand = { version = "0.8", features = ["small_rng"] } +rand = { version = "0.9", features = ["small_rng"] } tokio = { version = "1", features = ["macros"] } tracing = "0.1" [dev-dependencies] tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } diff --git a/linkerd/distribute/src/keys.rs b/linkerd/distribute/src/keys.rs new file mode 100644 index 0000000000..439ff3839e --- /dev/null +++ b/linkerd/distribute/src/keys.rs @@ -0,0 +1,163 @@ +use ahash::{HashMap, HashMapExt}; +use rand::{ + distr::weighted::{Error as WeightedError, WeightedIndex}, + prelude::Distribution as _, + Rng, +}; +use std::hash::Hash; + +/// Uniquely identifies a key/backend pair for a distribution. This allows +/// backends to have the same key and still participate in request distribution. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct KeyId { + idx: usize, +} + +#[derive(Debug)] +pub struct ServiceKeys { + ids: Vec, + keys: HashMap, +} + +pub type WeightedServiceKeys = ServiceKeys>; + +#[derive(Debug, PartialEq, Eq, Hash)] +pub struct WeightedKey { + pub key: K, + pub weight: u32, +} + +pub(crate) struct WeightedKeySelector<'a, K> { + keys: &'a WeightedServiceKeys, + index: WeightedIndex, +} + +// === impl KeyId === + +impl KeyId { + pub(crate) fn new(idx: usize) -> Self { + Self { idx } + } +} + +// === impl UnweightedKeys === + +// PartialEq, Eq, and Hash are all valid to implement for UnweightedKeys since +// there is a defined iteration order for the keys, but it cannot be automatically +// derived for HashMap fields. +impl PartialEq for ServiceKeys { + fn eq(&self, other: &Self) -> bool { + if self.ids != other.ids { + return false; + } + + for id in &self.ids { + if self.keys.get(id) != other.keys.get(id) { + return false; + } + } + + true + } +} + +impl Eq for ServiceKeys {} + +impl Hash for ServiceKeys { + fn hash(&self, state: &mut H) { + self.ids.hash(state); + // Normally we would also hash the length, but self.ids and + // self.keys have the same length + for id in &self.ids { + self.keys.get(id).hash(state); + } + } +} + +impl ServiceKeys { + pub(crate) fn new(iter: impl Iterator) -> Self { + let mut ids = Vec::new(); + let mut keys = HashMap::new(); + for (idx, key) in iter.enumerate() { + let id = KeyId::new(idx); + ids.push(id); + keys.insert(id, key); + } + + Self { ids, keys } + } + + pub(crate) fn is_empty(&self) -> bool { + self.ids.is_empty() + } + + pub(crate) fn len(&self) -> usize { + self.ids.len() + } + + /// Returns the key `K` associated with the given [`KeyId`]. + /// + /// The output of using a [`KeyId`] not produced by the same instance of + /// [`ServiceKeys`] is unspecified, and it is likely to panic. + /// + /// # Panics + /// + /// This will panic if no entry is associated with the given lookup key. + pub(crate) fn get(&self, id: KeyId) -> &K { + self.keys + .get(&id) + .expect("distribution lookup keys must be valid") + } + + fn try_get_id(&self, idx: usize) -> Option { + self.ids.get(idx).copied() + } + + pub(crate) fn iter(&self) -> impl Iterator { + self.ids.iter() + } +} + +// === impl WeightedKeys === + +impl WeightedServiceKeys { + pub(crate) fn into_unweighted(self) -> ServiceKeys { + ServiceKeys { + ids: self.ids, + keys: self + .keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(), + } + } + + pub(crate) fn weighted_index(&self) -> Result, WeightedError> { + WeightedIndex::new(self.ids.iter().map(|&id| self.get(id).weight)) + } + + pub(crate) fn validate_weights(&self) -> Result<(), WeightedError> { + self.weighted_index()?; + Ok(()) + } + + pub(crate) fn selector(&self) -> WeightedKeySelector<'_, K> { + let index = self.weighted_index().expect("distribution must be valid"); + WeightedKeySelector { keys: self, index } + } +} + +// === impl WeightedKeySelector === + +impl WeightedKeySelector<'_, K> { + pub(crate) fn select_weighted(&self, rng: &mut R) -> KeyId { + let idx = self.index.sample(rng); + self.keys + .try_get_id(idx) + .expect("distrubtion must select a valid backend") + } + + pub(crate) fn disable_backend(&mut self, id: KeyId) -> Result<(), WeightedError> { + self.index.update_weights(&[(id.idx, &0)]) + } +} diff --git a/linkerd/distribute/src/lib.rs b/linkerd/distribute/src/lib.rs index 85528bc396..4d22809825 100644 --- a/linkerd/distribute/src/lib.rs +++ b/linkerd/distribute/src/lib.rs @@ -4,13 +4,15 @@ #![forbid(unsafe_code)] mod cache; +mod keys; mod params; mod service; mod stack; pub use self::{ cache::{BackendCache, NewBackendCache}, - params::{Backends, Distribution, WeightedKeys}, + keys::WeightedServiceKeys, + params::{Backends, Distribution}, service::Distribute, stack::NewDistribute, }; diff --git a/linkerd/distribute/src/params.rs b/linkerd/distribute/src/params.rs index d7628e2980..9d7b57e35d 100644 --- a/linkerd/distribute/src/params.rs +++ b/linkerd/distribute/src/params.rs @@ -1,5 +1,8 @@ +use crate::{ + keys::{ServiceKeys, WeightedKey}, + WeightedServiceKeys, +}; use ahash::AHashSet; -use rand::distributions::{WeightedError, WeightedIndex}; use std::{fmt::Debug, hash::Hash, sync::Arc}; #[derive(Debug, Clone, PartialEq, Eq)] @@ -16,17 +19,11 @@ pub enum Distribution { Empty, /// A distribution that uses the first available backend in an ordered list. - FirstAvailable(Arc<[K]>), + FirstAvailable(Arc>), /// A distribution that uses the first available backend when randomly /// selecting over a weighted distribution of backends. - RandomAvailable(Arc>), -} - -#[derive(Debug, PartialEq, Eq, Hash)] -pub struct WeightedKeys { - keys: Vec, - weights: Vec, + RandomAvailable(Arc>), } // === impl Backends === @@ -64,46 +61,29 @@ impl Default for Distribution { } impl Distribution { - pub fn first_available(keys: impl IntoIterator) -> Self { - let keys: Arc<[K]> = keys.into_iter().collect(); + pub fn first_available(iter: impl IntoIterator) -> Self { + let keys = ServiceKeys::new(iter.into_iter()); if keys.is_empty() { return Self::Empty; } - Self::FirstAvailable(keys) + + Self::FirstAvailable(Arc::new(keys)) } pub fn random_available>( iter: T, - ) -> Result { - let (keys, weights): (Vec<_>, Vec<_>) = iter.into_iter().filter(|(_, w)| *w > 0).unzip(); - if keys.len() < 2 { - return Ok(Self::first_available(keys)); - } - // Error if the distribution is invalid. - let _index = WeightedIndex::new(weights.iter().copied())?; - Ok(Self::RandomAvailable(Arc::new(WeightedKeys { - keys, - weights, - }))) - } - - pub(crate) fn keys(&self) -> &[K] { - match self { - Self::Empty => &[], - Self::FirstAvailable(keys) => keys, - Self::RandomAvailable(keys) => keys.keys(), + ) -> Result { + let weighted_keys = WeightedServiceKeys::new( + iter.into_iter() + .map(|(key, weight)| WeightedKey { key, weight }), + ); + if weighted_keys.len() < 2 { + return Ok(Self::FirstAvailable(Arc::new( + weighted_keys.into_unweighted(), + ))); } - } -} - -// === impl WeightedKeys === - -impl WeightedKeys { - pub(crate) fn keys(&self) -> &[K] { - &self.keys - } - pub(crate) fn index(&self) -> WeightedIndex { - WeightedIndex::new(self.weights.iter().copied()).expect("distribution must be valid") + weighted_keys.validate_weights()?; + Ok(Self::RandomAvailable(Arc::new(weighted_keys))) } } diff --git a/linkerd/distribute/src/service.rs b/linkerd/distribute/src/service.rs index 30e9246ff3..dbd8f633ff 100644 --- a/linkerd/distribute/src/service.rs +++ b/linkerd/distribute/src/service.rs @@ -1,48 +1,54 @@ -use super::{Distribution, WeightedKeys}; -use indexmap::IndexMap; -use linkerd_stack::Service; -use rand::{ - distributions::{Distribution as _, WeightedError}, - rngs::SmallRng, - SeedableRng, -}; +use self::{first::FirstAvailableSelection, random::RandomAvailableSelection}; +use super::Distribution; +use linkerd_stack::{NewService, Service}; use std::{ hash::Hash, - sync::Arc, task::{Context, Poll}, }; +mod first; +mod random; + /// A service that distributes requests over a set of backends. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Distribute { - backends: IndexMap, - selection: Selection, - - /// Stores the index of the backend that has been polled to ready. The - /// service at this index will be used on the next invocation of - /// `Service::call`. - ready_idx: Option, + selection: Selection, } /// Holds per-distribution state for a [`Distribute`] service. #[derive(Debug)] -enum Selection { +enum Selection { Empty, - FirstAvailable, - RandomAvailable { - keys: Arc>, - rng: SmallRng, - }, + FirstAvailable(FirstAvailableSelection), + RandomAvailable(RandomAvailableSelection), } // === impl Distribute === impl Distribute { - pub(crate) fn new(backends: IndexMap, dist: Distribution) -> Self { + pub(crate) fn new(dist: Distribution, make_svc: N) -> Self + where + N: for<'a> NewService<&'a K, Service = S>, + { Self { - backends, - selection: dist.into(), - ready_idx: None, + selection: Self::make_selection(&dist, make_svc), + } + } + + fn make_selection(dist: &Distribution, make_svc: N) -> Selection + where + N: for<'a> NewService<&'a K, Service = S>, + { + // Build the backends needed for this distribution, in the required + // order (so that weighted indices align). + match dist { + Distribution::Empty => Selection::Empty, + Distribution::FirstAvailable(keys) => { + Selection::FirstAvailable(FirstAvailableSelection::new(keys, make_svc)) + } + Distribution::RandomAvailable(keys) => { + Selection::RandomAvailable(RandomAvailableSelection::new(keys, make_svc)) + } } } } @@ -62,147 +68,96 @@ where /// readiness. We expect that these inner services should be buffered or /// otherwise drive themselves to readiness (i.e. via SpawnReady). fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - // If we've already chosen a ready index, then skip polling. - if self.ready_idx.is_some() { - return Poll::Ready(Ok(())); - } - - match self.selection { + match &mut self.selection { Selection::Empty => { tracing::debug!("empty distribution will never become ready"); + Poll::Pending } - - Selection::FirstAvailable => { - for (idx, svc) in self.backends.values_mut().enumerate() { - if svc.poll_ready(cx)?.is_ready() { - self.ready_idx = Some(idx); - return Poll::Ready(Ok(())); - } - } - } - - // Choose a random index (via the weighted distribution) to try to - // poll the backend. Continue selecting endpoints until we find one - // that is ready or we've tried all backends in the distribution. - Selection::RandomAvailable { - ref keys, - ref mut rng, - } => { - // Clone the weighted index so that we can zero out the weights - // as pending services are polled. - let mut index = keys.index(); - loop { - // Sample the weighted index to find a backend to try. - let idx = index.sample(rng); - let (_, svc) = self - .backends - .get_index_mut(idx) - .expect("distributions must not reference unknown backends"); - - if svc.poll_ready(cx)?.is_ready() { - self.ready_idx = Some(idx); - return Poll::Ready(Ok(())); - } - - // Zero out the weight of the backend we just tried so that - // it's not selected again. - match index.update_weights(&[(idx, &0)]) { - Ok(()) => {} - Err(WeightedError::AllWeightsZero) => { - // There are no backends remaining. - break; - } - Err(error) => { - tracing::error!(%error, "unexpected error updating weights; giving up"); - break; - } - } - } - } + Selection::FirstAvailable(s) => s.poll_ready(cx), + Selection::RandomAvailable(s) => s.poll_ready(cx), } - - debug_assert!(self.ready_idx.is_none()); - tracing::trace!("no ready services in distribution"); - Poll::Pending } fn call(&mut self, req: Req) -> Self::Future { - let idx = self - .ready_idx - .take() - .expect("poll_ready must be called first"); - - let (_, svc) = self.backends.get_index_mut(idx).expect("index must exist"); - - svc.call(req) - } -} - -impl Clone for Distribute { - fn clone(&self) -> Self { - Self { - backends: self.backends.clone(), - selection: self.selection.clone(), - // Clear the ready index so that the new clone must become ready - // independently. - ready_idx: None, + match &mut self.selection { + Selection::Empty => unreachable!("Empty selection is never ready"), + Selection::FirstAvailable(s) => s.call(req), + Selection::RandomAvailable(s) => s.call(req), } } } -impl Default for Distribute { +// === impl Selection === + +impl Default for Selection { /// Returns an empty distribution. This distribution will never become /// ready. fn default() -> Self { - Self { - backends: Default::default(), - selection: Selection::Empty, - ready_idx: None, - } + Self::Empty } } -// === impl Selection === - -impl From> for Selection { - fn from(dist: Distribution) -> Self { - match dist { - Distribution::Empty => Self::Empty, - Distribution::FirstAvailable(_) => Self::FirstAvailable, - Distribution::RandomAvailable(keys) => Self::RandomAvailable { - keys, - rng: SmallRng::from_rng(rand::thread_rng()).expect("RNG must initialize"), - }, +impl Clone for Selection { + fn clone(&self) -> Self { + match self { + Self::Empty => Self::Empty, + Self::FirstAvailable(s) => Self::FirstAvailable(s.clone()), + Self::RandomAvailable(s) => Self::RandomAvailable(s.clone()), } } } -impl Clone for Selection { - fn clone(&self) -> Self { - match self { - Self::Empty => Selection::Empty, - Self::FirstAvailable => Self::FirstAvailable, - Self::RandomAvailable { keys, .. } => Self::RandomAvailable { - keys: keys.clone(), - rng: SmallRng::from_rng(rand::thread_rng()).expect("RNG must initialize"), - }, +impl Default for Distribute { + fn default() -> Self { + Self { + selection: Selection::default(), } } } #[cfg(test)] mod tests { + use std::cell::RefCell; + + use crate::keys::KeyId; + use super::*; use tokio_test::*; use tower_test::mock; + fn mock_first_available( + svcs: Vec<(K, S)>, + ) -> Distribute { + // Wrap in RefCell because NewService is only blanked impl'd for Fn, not FnMut + let svcs = RefCell::new(svcs); + let dist = Distribution::first_available(svcs.borrow().iter().map(|(k, _)| k.clone())); + let dist = Distribute::new(dist, |_: &K| svcs.borrow_mut().remove(0).1); + assert!(svcs.borrow().is_empty()); + dist + } + + fn mock_random_available( + svcs: Vec<(K, S, u32)>, + ) -> Distribute { + let svcs = RefCell::new(svcs); + let dist = Distribution::random_available( + svcs.borrow() + .iter() + .map(|(k, _, weight)| (k.clone(), *weight)), + ) + .unwrap(); + let dist = Distribute::new(dist, |_: &K| svcs.borrow_mut().remove(0).1); + assert!(svcs.borrow().is_empty()); + dist + } + #[test] fn empty_pending() { let mut dist_svc = mock::Spawn::new(Distribute::<&'static str, mock::Mock<(), ()>>::new( Default::default(), - Default::default(), + |_: &&str| panic!("Empty service should never call make_svc"), )); - assert_eq!(dist_svc.get_ref().backends.len(), 0); + assert!(matches!(dist_svc.get_ref().selection, Selection::Empty)); assert_pending!(dist_svc.poll_ready()); } @@ -210,12 +165,10 @@ mod tests { fn first_available_woken() { let (mulder, mut mulder_ctl) = mock::pair::<(), ()>(); let (scully, mut scully_ctl) = mock::pair::<(), ()>(); - let mut dist_svc = mock::Spawn::new(Distribute::new( - vec![("mulder", mulder), ("scully", scully)] - .into_iter() - .collect(), - Distribution::FirstAvailable(Arc::new(["mulder", "scully"])), - )); + let mut dist_svc = mock::Spawn::new(mock_first_available(vec![ + ("mulder", mulder), + ("scully", scully), + ])); mulder_ctl.allow(0); scully_ctl.allow(0); @@ -228,17 +181,18 @@ mod tests { fn first_available_prefers_first() { let (mulder, mut mulder_ctl) = mock::pair(); let (scully, mut scully_ctl) = mock::pair(); - let mut dist_svc = mock::Spawn::new(Distribute::new( - vec![("mulder", mulder), ("scully", scully)] - .into_iter() - .collect(), - Distribution::FirstAvailable(Arc::new(["mulder", "scully"])), - )); + let mut dist_svc = mock::Spawn::new(mock_first_available(vec![ + ("mulder", mulder), + ("scully", scully), + ])); scully_ctl.allow(1); mulder_ctl.allow(1); assert_ready_ok!(dist_svc.poll_ready()); - assert_eq!(dist_svc.get_ref().ready_idx, Some(0)); + let Selection::FirstAvailable(selection) = &dist_svc.get_ref().selection else { + panic!() + }; + assert_eq!(selection.get_ready_idx(), Some(0)); let mut call = task::spawn(dist_svc.call(())); match assert_ready!(mulder_ctl.poll_request()) { Some(((), rsp)) => rsp.send_response(()), @@ -251,17 +205,18 @@ mod tests { fn first_available_uses_second() { let (mulder, mut mulder_ctl) = mock::pair(); let (scully, mut scully_ctl) = mock::pair(); - let mut dist_svc = mock::Spawn::new(Distribute::new( - vec![("mulder", mulder), ("scully", scully)] - .into_iter() - .collect(), - Distribution::FirstAvailable(Arc::new(["mulder", "scully"])), - )); + let mut dist_svc = mock::Spawn::new(mock_first_available(vec![ + ("mulder", mulder), + ("scully", scully), + ])); mulder_ctl.allow(0); scully_ctl.allow(1); assert_ready_ok!(dist_svc.poll_ready()); - assert_eq!(dist_svc.get_ref().ready_idx, Some(1)); + let Selection::FirstAvailable(selection) = &dist_svc.get_ref().selection else { + panic!() + }; + assert_eq!(selection.get_ready_idx(), Some(1)); let mut call = task::spawn(dist_svc.call(())); match assert_ready!(scully_ctl.poll_request()) { Some(((), rsp)) => rsp.send_response(()), @@ -270,18 +225,40 @@ mod tests { assert_ready_ok!(call.poll()); } + #[test] + fn first_available_duplicate_keys() { + let (mulder_1, mut mulder_1_ctl) = mock::pair(); + let (mulder_2, mut mulder_2_ctl) = mock::pair(); + let mut dist_svc = mock::Spawn::new(mock_first_available(vec![ + ("mulder", mulder_1), + ("mulder", mulder_2), + ])); + + mulder_2_ctl.allow(1); + mulder_1_ctl.allow(1); + assert_ready_ok!(dist_svc.poll_ready()); + let Selection::FirstAvailable(selection) = &dist_svc.get_ref().selection else { + panic!() + }; + assert_eq!(selection.get_ready_idx(), Some(0)); + let mut call = task::spawn(dist_svc.call(())); + match assert_ready!(mulder_1_ctl.poll_request()) { + Some(((), rsp)) => rsp.send_response(()), + _ => panic!("expected request"), + } + assert_ready_ok!(call.poll()); + } + #[test] fn random_available_woken() { let (mulder, mut mulder_ctl) = mock::pair::<(), ()>(); let (scully, mut scully_ctl) = mock::pair::<(), ()>(); let (skinner, mut skinner_ctl) = mock::pair::<(), ()>(); - let mut dist_svc = mock::Spawn::new(Distribute::new( - vec![("mulder", mulder), ("scully", scully), ("skinner", skinner)] - .into_iter() - .collect(), - Distribution::random_available([("mulder", 1), ("scully", 99998), ("skinner", 1)]) - .unwrap(), - )); + let mut dist_svc = mock::Spawn::new(mock_random_available(vec![ + ("mulder", mulder, 1), + ("scully", scully, 99998), + ("skinner", skinner, 1), + ])); mulder_ctl.allow(0); scully_ctl.allow(0); @@ -296,19 +273,20 @@ mod tests { let (mulder, mut mulder_ctl) = mock::pair(); let (scully, mut scully_ctl) = mock::pair(); let (skinner, mut skinner_ctl) = mock::pair(); - let mut dist_svc = mock::Spawn::new(Distribute::new( - vec![("mulder", mulder), ("scully", scully), ("skinner", skinner)] - .into_iter() - .collect(), - Distribution::random_available([("mulder", 1), ("scully", 99998), ("skinner", 1)]) - .unwrap(), - )); + let mut dist_svc = mock::Spawn::new(mock_random_available(vec![ + ("mulder", mulder, 1), + ("scully", scully, 99998), + ("skinner", skinner, 1), + ])); mulder_ctl.allow(1); scully_ctl.allow(1); skinner_ctl.allow(1); assert_ready_ok!(dist_svc.poll_ready()); - assert_eq!(dist_svc.get_ref().ready_idx, Some(1)); + let Selection::RandomAvailable(selection) = &dist_svc.get_ref().selection else { + panic!() + }; + assert_eq!(selection.get_ready_idx(), Some(KeyId::new(1))); let mut call = task::spawn(dist_svc.call(())); match assert_ready!(scully_ctl.poll_request()) { Some(((), rsp)) => rsp.send_response(()), @@ -322,19 +300,20 @@ mod tests { let (mulder, mut mulder_ctl) = mock::pair(); let (scully, mut scully_ctl) = mock::pair(); let (skinner, mut skinner_ctl) = mock::pair(); - let mut dist_svc = mock::Spawn::new(Distribute::new( - vec![("mulder", mulder), ("scully", scully), ("skinner", skinner)] - .into_iter() - .collect(), - Distribution::random_available([("mulder", 1), ("scully", 99998), ("skinner", 1)]) - .unwrap(), - )); + let mut dist_svc = mock::Spawn::new(mock_random_available(vec![ + ("mulder", mulder, 1), + ("scully", scully, 99998), + ("skinner", skinner, 1), + ])); mulder_ctl.allow(1); scully_ctl.allow(0); skinner_ctl.allow(0); assert_ready_ok!(dist_svc.poll_ready()); - assert_eq!(dist_svc.get_ref().ready_idx, Some(0)); + let Selection::RandomAvailable(selection) = &dist_svc.get_ref().selection else { + panic!() + }; + assert_eq!(selection.get_ready_idx(), Some(KeyId::new(0))); let mut call = task::spawn(dist_svc.call(())); match assert_ready!(mulder_ctl.poll_request()) { Some(((), rsp)) => rsp.send_response(()), @@ -342,4 +321,31 @@ mod tests { } assert_ready_ok!(call.poll()); } + + #[test] + fn random_available_duplicate_keys_allowed() { + let (mulder_1, mut mulder_1_ctl) = mock::pair(); + let (mulder_2, mut mulder_2_ctl) = mock::pair(); + let (mulder_3, mut mulder_3_ctl) = mock::pair(); + let mut dist_svc = mock::Spawn::new(mock_random_available(vec![ + ("mulder", mulder_1, 1), + ("mulder", mulder_2, 99998), + ("mulder", mulder_3, 1), + ])); + + mulder_1_ctl.allow(1); + mulder_2_ctl.allow(1); + mulder_3_ctl.allow(1); + assert_ready_ok!(dist_svc.poll_ready()); + let Selection::RandomAvailable(selection) = &dist_svc.get_ref().selection else { + panic!() + }; + assert_eq!(selection.get_ready_idx(), Some(KeyId::new(1))); + let mut call = task::spawn(dist_svc.call(())); + match assert_ready!(mulder_2_ctl.poll_request()) { + Some(((), rsp)) => rsp.send_response(()), + _ => panic!("expected request"), + } + assert_ready_ok!(call.poll()); + } } diff --git a/linkerd/distribute/src/service/first.rs b/linkerd/distribute/src/service/first.rs new file mode 100644 index 0000000000..696b91d227 --- /dev/null +++ b/linkerd/distribute/src/service/first.rs @@ -0,0 +1,80 @@ +use crate::keys::ServiceKeys; +use linkerd_stack::{NewService, Service}; +use std::task::{Context, Poll}; + +#[derive(Debug)] +pub(crate) struct FirstAvailableSelection { + backends: Vec, + + /// Stores the index of the backend that has been polled to ready. The + /// service at this index will be used on the next invocation of + /// `Service::call`. + ready_idx: Option, +} + +impl FirstAvailableSelection { + pub fn new(keys: &ServiceKeys, make_svc: N) -> Self + where + N: for<'a> NewService<&'a K, Service = S>, + { + Self { + backends: keys + .iter() + .map(|&id| make_svc.new_service(keys.get(id))) + .collect(), + ready_idx: None, + } + } + + #[cfg(test)] + pub fn get_ready_idx(&self) -> Option { + self.ready_idx + } +} + +impl Clone for FirstAvailableSelection { + fn clone(&self) -> Self { + Self { + backends: self.backends.clone(), + // Clear the ready index so that the new clone must become ready + // independently. + ready_idx: None, + } + } +} + +impl Service for FirstAvailableSelection +where + S: Service, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // If we've already chosen a ready index, then skip polling. + if self.ready_idx.is_some() { + return Poll::Ready(Ok(())); + } + + for (idx, svc) in self.backends.iter_mut().enumerate() { + if svc.poll_ready(cx)?.is_ready() { + self.ready_idx = Some(idx); + return Poll::Ready(Ok(())); + } + } + debug_assert!(self.ready_idx.is_none()); + Poll::Pending + } + + fn call(&mut self, req: Req) -> Self::Future { + let idx = self + .ready_idx + .take() + .expect("poll_ready must be called first"); + + let svc = self.backends.get_mut(idx).expect("index must exist"); + + svc.call(req) + } +} diff --git a/linkerd/distribute/src/service/random.rs b/linkerd/distribute/src/service/random.rs new file mode 100644 index 0000000000..a0b916a041 --- /dev/null +++ b/linkerd/distribute/src/service/random.rs @@ -0,0 +1,120 @@ +use crate::{keys::KeyId, WeightedServiceKeys}; +use ahash::HashMap; +use linkerd_stack::{NewService, Service}; +use rand::{distr::weighted, rngs::SmallRng, SeedableRng}; +use std::{ + hash::Hash, + sync::Arc, + task::{Context, Poll}, +}; + +#[derive(Debug)] +pub(crate) struct RandomAvailableSelection { + keys: Arc>, + backends: HashMap, + rng: SmallRng, + + /// Stores the index of the backend that has been polled to ready. The + /// service at this index will be used on the next invocation of + /// `Service::call`. + ready_idx: Option, +} + +fn new_rng() -> SmallRng { + SmallRng::from_rng(&mut rand::rng()) +} + +impl RandomAvailableSelection { + pub fn new(keys: &Arc>, make_svc: N) -> Self + where + N: for<'a> NewService<&'a K, Service = S>, + { + Self { + keys: keys.clone(), + backends: keys + .iter() + .map(|&id| (id, make_svc.new_service(&keys.get(id).key))) + .collect(), + ready_idx: None, + rng: new_rng(), + } + } + + #[cfg(test)] + pub fn get_ready_idx(&self) -> Option { + self.ready_idx + } +} + +impl Clone for RandomAvailableSelection { + fn clone(&self) -> Self { + Self { + keys: self.keys.clone(), + backends: self.backends.clone(), + rng: new_rng(), + // Clear the ready index so that the new clone must become ready + // independently. + ready_idx: None, + } + } +} + +impl Service for RandomAvailableSelection +where + K: Hash + Eq, + S: Service, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // If we've already chosen a ready index, then skip polling. + if self.ready_idx.is_some() { + return Poll::Ready(Ok(())); + } + + let mut selector = self.keys.selector(); + loop { + let id = selector.select_weighted(&mut self.rng); + let svc = self + .backends + .get_mut(&id) + .expect("distributions must not reference unknown backends"); + + if svc.poll_ready(cx)?.is_ready() { + self.ready_idx = Some(id); + return Poll::Ready(Ok(())); + } + + // Since the backend we just tried isn't ready, zero out the weight + // so that it's not tried again in this round, i.e. subsequent calls + // to `poll_ready` can try this backend again. + match selector.disable_backend(id) { + Ok(()) => {} + Err(weighted::Error::InsufficientNonZero) => { + // There are no backends remaining. + break; + } + Err(error) => { + tracing::error!(%error, "unexpected error updating weights; giving up"); + break; + } + } + } + + debug_assert!(self.ready_idx.is_none()); + Poll::Pending + } + + fn call(&mut self, req: Req) -> Self::Future { + let id = self + .ready_idx + .take() + .expect("poll_ready must be called first"); + + let svc = self.backends.get_mut(&id).expect("index must exist"); + + svc.call(req) + } +} diff --git a/linkerd/distribute/src/stack.rs b/linkerd/distribute/src/stack.rs index e027630d7b..731481fe07 100644 --- a/linkerd/distribute/src/stack.rs +++ b/linkerd/distribute/src/stack.rs @@ -55,16 +55,9 @@ where /// Referencing other keys causes a panic. fn new_service(&self, target: T) -> Self::Service { let dist = self.extract.extract_param(&target); - tracing::debug!(backends = ?dist.keys(), "New distribution"); + tracing::debug!(backends = ?dist, "New distribution"); - // Build the backends needed for this distribution, in the required - // order (so that weighted indices align). let newk = self.inner.new_service(target); - let backends = dist - .keys() - .iter() - .map(|k| (k.clone(), newk.new_service(k.clone()))) - .collect(); - Distribute::new(backends, dist) + Distribute::new(dist, |k: &K| newk.new_service(k.clone())) } } diff --git a/linkerd/dns/Cargo.toml b/linkerd/dns/Cargo.toml index 1d37bfb530..e2e7c223d3 100644 --- a/linkerd/dns/Cargo.toml +++ b/linkerd/dns/Cargo.toml @@ -1,16 +1,20 @@ [package] name = "linkerd-dns" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } +hickory-resolver = "0.25.1" linkerd-dns-name = { path = "./name" } linkerd-error = { path = "../error" } -thiserror = "1" -tracing = "0.1" -hickory-resolver = "0.24.1" +prometheus-client = { workspace = true } +thiserror = "2" tokio = { version = "1", features = ["rt", "sync", "time"] } +tracing = "0.1" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/linkerd/dns/fuzz/Cargo.toml b/linkerd/dns/fuzz/Cargo.toml index 3e00516d61..6f7cfdd21c 100644 --- a/linkerd/dns/fuzz/Cargo.toml +++ b/linkerd/dns/fuzz/Cargo.toml @@ -1,10 +1,10 @@ - [package] name = "linkerd-dns-fuzz" -version = "0.0.0" -authors = ["Linkerd Developers "] -publish = false -edition = "2021" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [package.metadata] cargo-fuzz = true diff --git a/linkerd/dns/name/Cargo.toml b/linkerd/dns/name/Cargo.toml index f025b58ac2..982c5169b3 100644 --- a/linkerd/dns/name/Cargo.toml +++ b/linkerd/dns/name/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "linkerd-dns-name" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -thiserror = "1" +thiserror = "2" untrusted = "0.9" diff --git a/linkerd/dns/name/src/name.rs b/linkerd/dns/name/src/name.rs index ac86edadc6..2447fafb97 100644 --- a/linkerd/dns/name/src/name.rs +++ b/linkerd/dns/name/src/name.rs @@ -41,7 +41,7 @@ pub struct Name(Arc); /// allowed. /// /// [RFC 5280 Section 7.2]: https://tools.ietf.org/html/rfc5280#section-7.2 -#[derive(Clone, Copy, Debug, Eq, Hash)] +#[derive(Clone, Copy, Debug, Eq)] pub struct NameRef<'a>(&'a str); #[derive(Copy, Clone, Debug, Eq, PartialEq, Error)] @@ -118,8 +118,9 @@ impl<'a> NameRef<'a> { return Err(InvalidName); } - let s = std::str::from_utf8(dns_name).map_err(|_| InvalidName)?; - Ok(Self(s)) + std::str::from_utf8(dns_name) + .map(Self) + .map_err(|_| InvalidName) } pub fn try_from_ascii_str(n: &'a str) -> Result { diff --git a/linkerd/dns/src/lib.rs b/linkerd/dns/src/lib.rs index f830d93b2b..18e64337ac 100644 --- a/linkerd/dns/src/lib.rs +++ b/linkerd/dns/src/lib.rs @@ -2,11 +2,10 @@ #![forbid(unsafe_code)] pub use hickory_resolver::config::ResolverOpts; -use hickory_resolver::{ - config::ResolverConfig, error, proto::rr::rdata, system_conf, AsyncResolver, TokioAsyncResolver, -}; +use hickory_resolver::{config::ResolverConfig, proto::rr::rdata, system_conf, TokioResolver}; use linkerd_dns_name::NameRef; pub use linkerd_dns_name::{InvalidName, Name, Suffix}; +use prometheus_client::metrics::counter::Counter; use std::{fmt, net}; use thiserror::Error; use tokio::time::{self, Instant}; @@ -14,27 +13,40 @@ use tracing::{debug, trace}; #[derive(Clone)] pub struct Resolver { - dns: TokioAsyncResolver, + dns: TokioResolver, + metrics: Option, } pub trait ConfigureResolver { fn configure_resolver(&self, _: &mut ResolverOpts); } +#[derive(Clone)] +pub struct Metrics { + /// A [`Counter`] tracking the number of A/AAAA records successfully resolved. + pub a_records_resolved: Counter, + /// A [`Counter`] tracking the number of A/AAAA records not found. + pub a_records_not_found: Counter, + /// A [`Counter`] tracking the number of SRV records successfully resolved. + pub srv_records_resolved: Counter, + /// A [`Counter`] tracking the number of SRV records not found. + pub srv_records_not_found: Counter, +} + #[derive(Debug, Clone, Error)] #[error("invalid SRV record {:?}", self.0)] struct InvalidSrv(rdata::SRV); #[derive(Debug, Error)] #[error("failed to resolve A record: {0}")] -struct ARecordError(#[from] error::ResolveError); +struct ARecordError(#[from] hickory_resolver::ResolveError); #[derive(Debug, Error)] enum SrvRecordError { #[error(transparent)] Invalid(#[from] InvalidSrv), #[error("failed to resolve SRV record: {0}")] - Resolve(#[from] error::ResolveError), + Resolve(#[from] hickory_resolver::ResolveError), } #[derive(Debug, Error)] @@ -64,12 +76,29 @@ impl Resolver { } pub fn new(config: ResolverConfig, mut opts: ResolverOpts) -> Self { - // Disable Trust-DNS's caching. + // Disable Hickory-resolver's caching. opts.cache_size = 0; // This function is synchronous, but needs to be called within the Tokio // 0.2 runtime context, since it gets a handle. - let dns = AsyncResolver::tokio(config, opts); - Resolver { dns } + let provider = hickory_resolver::name_server::TokioConnectionProvider::default(); + let mut builder = hickory_resolver::Resolver::builder_with_config(config, provider); + *builder.options_mut() = opts; + let dns = builder.build(); + /* TODO(kate): this can be used if/when hickory-dns/hickory-dns#2877 is released. + let dns = hickory_resolver::Resolver::builder_with_config(config, provider) + .with_options(opts) + .build(); + */ + + Resolver { dns, metrics: None } + } + + /// Installs a counter tracking the number of A/AAAA records resolved. + pub fn with_metrics(self, metrics: Metrics) -> Self { + Self { + metrics: Some(metrics), + ..self + } } /// Resolves a name to a set of addresses, preferring SRV records to normal A/AAAA @@ -78,17 +107,27 @@ impl Resolver { &self, name: NameRef<'_>, default_port: u16, - ) -> Result<(Vec, time::Sleep), ResolveError> { + ) -> Result<(Vec, Instant), ResolveError> { match self.resolve_srv(name).await { - Ok(res) => Ok(res), + Ok(res) => { + self.metrics.as_ref().map(Metrics::inc_srv_records_resolved); + Ok(res) + } Err(srv_error) => { // If the SRV lookup failed for any reason, fall back to A/AAAA // record resolution. debug!(srv.error = %srv_error, "Falling back to A/AAAA record lookup"); + self.metrics + .as_ref() + .map(Metrics::inc_srv_records_not_found); let (ips, delay) = match self.resolve_a_or_aaaa(name).await { Ok(res) => res, - Err(a_error) => return Err(ResolveError { a_error, srv_error }), + Err(a_error) => { + self.metrics.as_ref().map(Metrics::inc_a_records_not_found); + return Err(ResolveError { a_error, srv_error }); + } }; + self.metrics.as_ref().map(Metrics::inc_a_records_resolved); let addrs = ips .into_iter() .map(|ip| net::SocketAddr::new(ip, default_port)) @@ -101,18 +140,18 @@ impl Resolver { async fn resolve_a_or_aaaa( &self, name: NameRef<'_>, - ) -> Result<(Vec, time::Sleep), ARecordError> { + ) -> Result<(Vec, Instant), ARecordError> { debug!(%name, "Resolving an A/AAAA record"); let lookup = self.dns.lookup_ip(name.as_str()).await?; - let valid_until = Instant::from_std(lookup.valid_until()); let ips = lookup.iter().collect::>(); - Ok((ips, time::sleep_until(valid_until))) + let valid_until = Instant::from_std(lookup.valid_until()); + Ok((ips, valid_until)) } async fn resolve_srv( &self, name: NameRef<'_>, - ) -> Result<(Vec, time::Sleep), SrvRecordError> { + ) -> Result<(Vec, Instant), SrvRecordError> { debug!(%name, "Resolving a SRV record"); let srv = self.dns.srv_lookup(name.as_str()).await?; @@ -121,9 +160,9 @@ impl Resolver { .into_iter() .map(Self::srv_to_socket_addr) .collect::>()?; - debug!(ttl = ?valid_until - time::Instant::now(), ?addrs); + debug!(ttl = ?valid_until - Instant::now(), ?addrs); - Ok((addrs, time::sleep_until(valid_until))) + Ok((addrs, valid_until)) } // XXX We need to convert the SRV records to an IP addr manually, @@ -149,7 +188,7 @@ impl Resolver { } } -/// Note: `AsyncResolver` does not implement `Debug`, so we must manually +/// Note: `hickory_resolver::Resolver` does not implement `Debug`, so we must manually /// implement this. impl fmt::Debug for Resolver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -165,19 +204,23 @@ impl ResolveError { /// Returns the amount of time that the resolver should wait before /// retrying. pub fn negative_ttl(&self) -> Option { - if let error::ResolveErrorKind::NoRecordsFound { + if let Some(hickory_resolver::proto::ProtoErrorKind::NoRecordsFound { negative_ttl: Some(ttl_secs), .. - } = self.a_error.0.kind() + }) = self + .a_error + .0 + .proto() + .map(hickory_resolver::proto::ProtoError::kind) { return Some(time::Duration::from_secs(*ttl_secs as u64)); } if let SrvRecordError::Resolve(error) = &self.srv_error { - if let error::ResolveErrorKind::NoRecordsFound { + if let Some(hickory_resolver::proto::ProtoErrorKind::NoRecordsFound { negative_ttl: Some(ttl_secs), .. - } = error.kind() + }) = error.proto().map(hickory_resolver::proto::ProtoError::kind) { return Some(time::Duration::from_secs(*ttl_secs as u64)); } @@ -187,6 +230,26 @@ impl ResolveError { } } +// === impl Metrics === + +impl Metrics { + fn inc_a_records_resolved(&self) { + self.a_records_resolved.inc(); + } + + fn inc_a_records_not_found(&self) { + self.a_records_not_found.inc(); + } + + fn inc_srv_records_resolved(&self) { + self.srv_records_resolved.inc(); + } + + fn inc_srv_records_not_found(&self) { + self.srv_records_not_found.inc(); + } +} + #[cfg(test)] mod tests { use super::{Name, Resolver, Suffix}; diff --git a/linkerd/duplex/Cargo.toml b/linkerd/duplex/Cargo.toml index a0fe62f89b..384f586e9a 100644 --- a/linkerd/duplex/Cargo.toml +++ b/linkerd/duplex/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "linkerd-duplex" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } tokio = { version = "1", features = ["io-util"] } pin-project = "1" diff --git a/linkerd/errno/Cargo.toml b/linkerd/errno/Cargo.toml index 3990cb47e1..cd149d0ceb 100644 --- a/linkerd/errno/Cargo.toml +++ b/linkerd/errno/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "linkerd-errno" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } diff --git a/linkerd/error-respond/Cargo.toml b/linkerd/error-respond/Cargo.toml index a2f6f8c26a..78aae9f43f 100644 --- a/linkerd/error-respond/Cargo.toml +++ b/linkerd/error-respond/Cargo.toml @@ -1,11 +1,10 @@ [package] name = "linkerd-error-respond" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false - +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } diff --git a/linkerd/error/Cargo.toml b/linkerd/error/Cargo.toml index 7bdd515161..85f610552e 100644 --- a/linkerd/error/Cargo.toml +++ b/linkerd/error/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "linkerd-error" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } [dev-dependencies] -thiserror = "1" +thiserror = "2" diff --git a/linkerd/exp-backoff/Cargo.toml b/linkerd/exp-backoff/Cargo.toml index c221ca9111..e2de5ec105 100644 --- a/linkerd/exp-backoff/Cargo.toml +++ b/linkerd/exp-backoff/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "linkerd-exp-backoff" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } -rand = { version = "0.8", features = ["small_rng"] } -thiserror = "1" +rand = { version = "0.9", features = ["small_rng"] } +thiserror = "2" tokio = { version = "1", features = ["time"] } pin-project = "1" diff --git a/linkerd/exp-backoff/src/lib.rs b/linkerd/exp-backoff/src/lib.rs index 45ee8ff465..08ea9d26d3 100644 --- a/linkerd/exp-backoff/src/lib.rs +++ b/linkerd/exp-backoff/src/lib.rs @@ -3,7 +3,7 @@ use futures::Stream; use pin_project::pin_project; -use rand::{rngs::SmallRng, thread_rng, SeedableRng}; +use rand::{rngs::SmallRng, SeedableRng}; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; @@ -74,7 +74,7 @@ impl ExponentialBackoff { pub fn stream(&self) -> ExponentialBackoffStream { ExponentialBackoffStream { backoff: *self, - rng: SmallRng::from_rng(&mut thread_rng()).expect("RNG must be valid"), + rng: SmallRng::from_rng(&mut rand::rng()), iterations: 0, sleeping: false, sleep: Box::pin(time::sleep(time::Duration::from_secs(0))), @@ -103,7 +103,7 @@ impl ExponentialBackoff { if self.jitter == 0.0 { time::Duration::default() } else { - let jitter_factor = rng.gen::(); + let jitter_factor = rng.random::(); debug_assert!( jitter_factor > 0.0, "rng returns values between 0.0 and 1.0" @@ -212,7 +212,7 @@ mod tests { Ok(backoff) => backoff, }; - let j = backoff.jitter(base, &mut rand::thread_rng()); + let j = backoff.jitter(base, &mut rand::rng()); if jitter == 0.0 || base_ms == 0 || max_ms == base_ms { TestResult::from_bool(j == time::Duration::default()) } else { diff --git a/linkerd/http/access-log/Cargo.toml b/linkerd/http/access-log/Cargo.toml index 1318bb80a3..399b3c03d8 100644 --- a/linkerd/http/access-log/Cargo.toml +++ b/linkerd/http/access-log/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "linkerd-http-access-log" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } edition = "2018" -publish = false +publish = { workspace = true } [dependencies] futures-core = "0.3" -http = "0.2" -humantime = "2" +http = { workspace = true } +jiff = { version = "0.2", features = ["std"] } pin-project = "1" tokio = { version = "1", features = ["time"] } tracing = "0.1" diff --git a/linkerd/http/access-log/src/lib.rs b/linkerd/http/access-log/src/lib.rs index 1383fd8ebc..97c2da6b43 100644 --- a/linkerd/http/access-log/src/lib.rs +++ b/linkerd/http/access-log/src/lib.rs @@ -13,7 +13,7 @@ use std::{ net::SocketAddr, pin::Pin, task::{Context, Poll}, - time::{Duration, SystemTime}, + time::Duration, }; use svc::{NewService, Param}; use tokio::time::Instant; @@ -202,14 +202,14 @@ where .map(|x| span.record("response_bytes", x)); span.record("status", response.status().as_u16()); - span.record("total_ns", &field::display(total_ns)); - span.record("processing_ns", &field::display(processing_ns)); + span.record("total_ns", field::display(total_ns)); + span.record("processing_ns", field::display(processing_ns)); Poll::Ready(Ok(response)) } } #[inline] -fn now() -> humantime::Rfc3339Timestamp { - humantime::format_rfc3339(SystemTime::now()) +fn now() -> String { + jiff::Timestamp::now().to_string() } diff --git a/linkerd/http/box/Cargo.toml b/linkerd/http/box/Cargo.toml index 3f6f6a8feb..ccb5952414 100644 --- a/linkerd/http/box/Cargo.toml +++ b/linkerd/http/box/Cargo.toml @@ -1,16 +1,16 @@ [package] name = "linkerd-http-box" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } -http = "0.2" -http-body = "0.4" +http = { workspace = true } +http-body = { workspace = true } pin-project = "1" linkerd-error = { path = "../../error" } diff --git a/linkerd/http/box/src/body.rs b/linkerd/http/box/src/body.rs index 16f2ff4181..3029748a21 100644 --- a/linkerd/http/box/src/body.rs +++ b/linkerd/http/box/src/body.rs @@ -1,5 +1,4 @@ -use http::{HeaderMap, HeaderValue}; -use http_body::Body; +use http_body::{Body, Frame}; use linkerd_error::Error; use pin_project::pin_project; use std::pin::Pin; @@ -39,6 +38,18 @@ impl BoxBody { inner: Box::pin(Inner(inner)), } } + + /// Returns an empty [`BoxBody`]. + /// + /// This is an alias for [`BoxBody::default()`]. + pub fn empty() -> Self { + Self::default() + } + + /// Returns a [`BoxBody`] with the contents of a static string. + pub fn from_static(body: &'static str) -> Self { + Self::new(body.to_string()) + } } impl Body for BoxBody { @@ -51,19 +62,11 @@ impl Body for BoxBody { } #[inline] - fn poll_data( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - self.as_mut().inner.as_mut().poll_data(cx) - } - - #[inline] - fn poll_trailers( + fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>, Self::Error>> { - self.as_mut().inner.as_mut().poll_trailers(cx) + ) -> Poll, Self::Error>>> { + self.as_mut().inner.as_mut().poll_frame(cx) } #[inline] @@ -72,6 +75,17 @@ impl Body for BoxBody { } } +impl Data { + fn new(buf: B) -> Self + where + B: bytes::Buf + Send + 'static, + { + Self { + inner: Box::new(buf), + } + } +} + impl bytes::Buf for Data { fn remaining(&self) -> usize { self.inner.remaining() @@ -104,24 +118,16 @@ where self.0.is_end_stream() } - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - let opt = futures::ready!(self.project().0.poll_data(cx)); - Poll::Ready(opt.map(|res| { - res.map_err(Into::into).map(|buf| Data { - inner: Box::new(buf), - }) - })) - } + ) -> Poll, Self::Error>>> { + // Poll the inner body `B` for the next frame. + let body = self.project().0; + let frame = futures::ready!(body.poll_frame(cx)); + let frame = frame.map(Self::map_frame); - #[inline] - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>, Self::Error>> { - Poll::Ready(futures::ready!(self.project().0.poll_trailers(cx)).map_err(Into::into)) + Poll::Ready(frame) } #[inline] @@ -130,6 +136,20 @@ where } } +impl Inner +where + B: Body, + B::Data: Send + 'static, + B::Error: Into, +{ + fn map_frame(frame: Result, B::Error>) -> Result, Error> { + match frame { + Ok(f) => Ok(f.map_data(Data::new)), + Err(e) => Err(e.into()), + } + } +} + impl std::fmt::Debug for BoxBody { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("BoxBody").finish() @@ -144,20 +164,13 @@ impl Body for NoBody { true } - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, _: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { Poll::Ready(None) } - fn poll_trailers( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll>, Self::Error>> { - Poll::Ready(Ok(None)) - } - fn size_hint(&self) -> http_body::SizeHint { http_body::SizeHint::with_exact(0) } diff --git a/linkerd/http/box/src/response.rs b/linkerd/http/box/src/response.rs index 550cbb6b4d..7030a75060 100644 --- a/linkerd/http/box/src/response.rs +++ b/linkerd/http/box/src/response.rs @@ -10,8 +10,12 @@ use std::task::{Context, Poll}; pub struct BoxResponse(S); impl BoxResponse { + pub fn new(inner: S) -> Self { + Self(inner) + } + pub fn layer() -> impl layer::Layer + Copy { - layer::mk(Self) + layer::mk(Self::new) } /// Constructs a boxing layer that erases the inner response type with [`EraseResponse`]. diff --git a/linkerd/http/classify/Cargo.toml b/linkerd/http/classify/Cargo.toml index 051f80c32f..a12f860fc3 100644 --- a/linkerd/http/classify/Cargo.toml +++ b/linkerd/http/classify/Cargo.toml @@ -1,12 +1,24 @@ [package] name = "linkerd-http-classify" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -http = "0.2" +futures = { version = "0.3", default-features = false } +http = { workspace = true } +http-body = { workspace = true } +pin-project = "1" +tokio = { version = "1", default-features = false } +tracing = "0.1" linkerd-error = { path = "../../error" } +linkerd-http-box = { path = "../../http/box" } +linkerd-stack = { path = "../../stack" } + +[dev-dependencies] +tokio-test = "0.4" +tower-test = { workspace = true } +linkerd-tracing = { path = "../../tracing", features = ["ansi"] } diff --git a/linkerd/proxy/http/src/classify/channel.rs b/linkerd/http/classify/src/channel.rs similarity index 91% rename from linkerd/proxy/http/src/classify/channel.rs rename to linkerd/http/classify/src/channel.rs index 413887345b..63c4009fbd 100644 --- a/linkerd/proxy/http/src/classify/channel.rs +++ b/linkerd/http/classify/src/channel.rs @@ -1,5 +1,6 @@ use super::{ClassifyEos, ClassifyResponse}; use futures::{prelude::*, ready}; +use http_body::Frame; use linkerd_error::Error; use linkerd_stack::{layer, ExtractParam, NewService, Service}; use pin_project::{pin_project, pinned_drop}; @@ -207,48 +208,42 @@ where // === impl ResponseBody === -impl hyper::body::HttpBody for ResponseBody +impl http_body::Body for ResponseBody where C: ClassifyEos + Unpin, - B: hyper::body::HttpBody, + B: http_body::Body, { type Data = B::Data; type Error = B::Error; - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { let this = self.project(); - match ready!(this.inner.poll_data(cx)) { - None => Poll::Ready(None), - Some(Ok(data)) => Poll::Ready(Some(Ok(data))), - Some(Err(e)) => { + match ready!(this.inner.poll_frame(cx)) { + None => { + // Classify the stream if it has reached a `None`. if let Some(State { classify, tx }) = this.state.take() { - let _ = tx.try_send(classify.error(&e)); + let _ = tx.try_send(classify.eos(None)); } - Poll::Ready(Some(Err(e))) + Poll::Ready(None) } - } - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.project(); - match ready!(this.inner.poll_trailers(cx)) { - Ok(trls) => { - if let Some(State { classify, tx }) = this.state.take() { - let _ = tx.try_send(classify.eos(trls.as_ref())); + Some(Ok(data)) => { + // Classify the stream if this is a trailers frame. + if let trls @ Some(_) = data.trailers_ref() { + if let Some(State { classify, tx }) = this.state.take() { + let _ = tx.try_send(classify.eos(trls)); + } } - Poll::Ready(Ok(trls)) + Poll::Ready(Some(Ok(data))) } - Err(e) => { + Some(Err(e)) => { + // Classify the stream if an error has been encountered. if let Some(State { classify, tx }) = this.state.take() { let _ = tx.try_send(classify.error(&e)); } - Poll::Ready(Err(e)) + Poll::Ready(Some(Err(e))) } } } diff --git a/linkerd/proxy/http/src/classify/gate.rs b/linkerd/http/classify/src/gate.rs similarity index 98% rename from linkerd/proxy/http/src/classify/gate.rs rename to linkerd/http/classify/src/gate.rs index 709a0a135f..726d8bf493 100644 --- a/linkerd/proxy/http/src/classify/gate.rs +++ b/linkerd/http/classify/src/gate.rs @@ -1,4 +1,4 @@ -use crate::classify::{BroadcastClassification, ClassifyResponse}; +use crate::{channel::BroadcastClassification, ClassifyResponse}; use linkerd_stack::{gate, layer, ExtractParam, Gate, NewService}; use std::marker::PhantomData; use tokio::sync::mpsc; diff --git a/linkerd/proxy/http/src/classify/insert.rs b/linkerd/http/classify/src/insert.rs similarity index 100% rename from linkerd/proxy/http/src/classify/insert.rs rename to linkerd/http/classify/src/insert.rs diff --git a/linkerd/http/classify/src/lib.rs b/linkerd/http/classify/src/lib.rs index 5003776444..5640fe43a9 100644 --- a/linkerd/http/classify/src/lib.rs +++ b/linkerd/http/classify/src/lib.rs @@ -3,6 +3,16 @@ use linkerd_error::Error; +pub use self::{ + channel::{BroadcastClassification, NewBroadcastClassification, Tx}, + gate::{NewClassifyGate, NewClassifyGateSet}, + insert::{InsertClassifyResponse, NewInsertClassifyResponse}, +}; + +pub mod channel; +pub mod gate; +mod insert; + /// Determines how a request's response should be classified. pub trait Classify { type Class: Clone + Send + Sync + 'static; diff --git a/linkerd/http/detect/Cargo.toml b/linkerd/http/detect/Cargo.toml new file mode 100644 index 0000000000..bdb2d11f53 --- /dev/null +++ b/linkerd/http/detect/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "linkerd-http-detect" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } + +[dependencies] +bytes = { workspace = true } +httparse = "1" +prometheus-client = { workspace = true } +thiserror = "2" +tokio = { version = "1", features = ["time"] } +tracing = { version = "0.1" } + +linkerd-error = { path = "../../error" } +linkerd-http-variant = { path = "../variant" } +linkerd-io = { path = "../../io" } +linkerd-stack = { path = "../../stack" } + +[dev-dependencies] +tokio-test = "0.4" + +linkerd-tracing = { path = "../../tracing", features = ["ansi"] } diff --git a/linkerd/http/detect/src/lib.rs b/linkerd/http/detect/src/lib.rs new file mode 100644 index 0000000000..a75bb30021 --- /dev/null +++ b/linkerd/http/detect/src/lib.rs @@ -0,0 +1,318 @@ +use bytes::BytesMut; +use linkerd_error::{Error, Result}; +use linkerd_http_variant::Variant; +use linkerd_io::{self as io, AsyncReadExt}; +use linkerd_stack::{self as svc, ServiceExt}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tokio::time; +use tracing::{debug, trace}; + +mod metrics; + +pub use self::metrics::{DetectMetrics, DetectMetricsFamilies}; + +#[derive(Clone, Debug, Default)] +pub struct DetectParams { + pub read_timeout: time::Duration, + pub metrics: metrics::DetectMetrics, +} + +#[derive(Debug, Clone)] +pub enum Detection { + NotHttp, + Http(Variant), + ReadTimeout(time::Duration), +} + +/// Attempts to detect the HTTP version of a stream. +/// +/// This module biases towards availability instead of correctness. I.e. instead +/// of buffering until we can be sure that we're dealing with an HTTP stream, we +/// instead perform only a single read and use that data to inform protocol +/// hinting. If a single read doesn't provide enough data to make a decision, we +/// treat the protocol as unknown. +/// +/// This allows us to interoperate with protocols that send very small initial +/// messages. In rare situations, we may fail to properly detect that a stream is +/// HTTP. +#[derive(Clone, Debug)] +pub struct Detect { + params: DetectParams, + inner: N, +} + +#[derive(Clone, Debug)] +pub struct NewDetect { + inner: N, + params: P, +} + +#[derive(Debug, thiserror::Error)] +#[error("read timed out after {0:?}")] +pub struct ReadTimeoutError(pub time::Duration); + +// Coincidentally, both our abbreviated H2 preface and our smallest possible +// HTTP/1 message are 14 bytes. +const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0"; +const SMALLEST_POSSIBLE_HTTP1_REQ: &str = "GET / HTTP/1.1"; + +const READ_CAPACITY: usize = 1024; + +// === impl NewDetect === + +impl NewDetect { + pub fn new(params: P, inner: N) -> Self { + Self { inner, params } + } + + pub fn layer(params: P) -> impl svc::layer::Layer + Clone + where + P: Clone, + { + svc::layer::mk(move |inner| Self::new(params.clone(), inner)) + } +} + +impl svc::NewService for NewDetect +where + P: svc::ExtractParam, + N: svc::NewService, +{ + type Service = Detect; + + fn new_service(&self, target: T) -> Self::Service { + let params = self.params.extract_param(&target); + Detect { + params, + inner: self.inner.new_service(target), + } + } +} + +// === impl Detect === + +impl svc::Service for Detect +where + I: io::AsyncRead + Send + Unpin + 'static, + N: svc::NewService + Clone + Send + 'static, + NSvc: svc::Service, Response = ()> + Send, + NSvc::Error: Into, + NSvc::Future: Send, +{ + type Response = (); + type Error = Error; + type Future = Pin> + Send + 'static>>; + + #[inline] + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, mut io: I) -> Self::Future { + let params = self.params.clone(); + let inner = self.inner.clone(); + Box::pin(async move { + let t0 = time::Instant::now(); + let mut buf = BytesMut::with_capacity(READ_CAPACITY); + let result = detect(¶ms, &mut io, &mut buf).await; + let elapsed = time::Instant::now().saturating_duration_since(t0); + params.metrics.observe(&result, elapsed); + debug!(?result, ?elapsed, "Detected"); + + let detection = result?; + trace!("Dispatching connection"); + let svc = inner.new_service(detection); + let mut svc = svc.ready_oneshot().await.map_err(Into::into)?; + svc.call(io::PrefixedIo::new(buf.freeze(), io)) + .await + .map_err(Into::into)?; + + trace!("Connection completed"); + // Hold the service until it's done being used so that cache + // idleness is reset. + drop(svc); + + Ok(()) + }) + } +} + +impl Detection { + pub fn variant(&self) -> Option { + match self { + Detection::Http(v) => Some(*v), + _ => None, + } + } +} + +async fn detect( + params: &DetectParams, + io: &mut I, + buf: &mut BytesMut, +) -> io::Result { + debug_assert!(buf.capacity() > 0, "buffer must have capacity"); + + trace!(capacity = buf.capacity(), timeout = ?params.read_timeout, "Reading"); + let sz = match time::timeout(params.read_timeout, io.read_buf(buf)).await { + Ok(res) => res?, + Err(_) => return Ok(Detection::ReadTimeout(params.read_timeout)), + }; + + trace!(sz, "Read"); + if sz == 0 { + return Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "socket closed before protocol detection", + )); + } + + // HTTP/2 checking is faster because it's a simple string match. If we + // have enough data, check it first. We don't bother matching on the + // entire H2 preface because the first part is enough to get a clear + // signal. + if buf.len() >= H2_PREFACE.len() { + trace!("Checking H2 preface"); + if &buf[..H2_PREFACE.len()] == H2_PREFACE { + return Ok(Detection::Http(Variant::H2)); + } + } + + // Otherwise, we try to parse the data as an HTTP/1 message. + if buf.len() >= SMALLEST_POSSIBLE_HTTP1_REQ.len() { + trace!("Parsing HTTP/1 message"); + if let Ok(_) | Err(httparse::Error::TooManyHeaders) = + httparse::Request::new(&mut [httparse::EMPTY_HEADER; 0]).parse(&buf[..]) + { + return Ok(Detection::Http(Variant::Http1)); + } + } + + Ok(Detection::NotHttp) +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio_test::io; + + const HTTP11_LINE: &[u8] = b"GET / HTTP/1.1\r\n"; + const H2_AND_GARBAGE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\ngarbage"; + const GARBAGE: &[u8] = + b"garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage"; + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn timeout() { + let _trace = linkerd_tracing::test::trace_init(); + + let params = DetectParams { + read_timeout: time::Duration::from_millis(1), + ..Default::default() + }; + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().wait(params.read_timeout * 2).build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert!(matches!(kind, Detection::ReadTimeout(_)), "{kind:?}"); + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn h2() { + let _trace = linkerd_tracing::test::trace_init(); + + let params = DetectParams { + read_timeout: time::Duration::from_millis(1), + ..Default::default() + }; + for read in &[H2_PREFACE, H2_AND_GARBAGE] { + debug!(read = ?std::str::from_utf8(read).unwrap()); + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(read).build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert_eq!(kind.variant(), Some(Variant::H2), "{kind:?}"); + } + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn http1() { + let _trace = linkerd_tracing::test::trace_init(); + + let params = DetectParams { + read_timeout: time::Duration::from_millis(1), + ..Default::default() + }; + for i in 1..SMALLEST_POSSIBLE_HTTP1_REQ.len() { + debug!(read = ?std::str::from_utf8(&HTTP11_LINE[..i]).unwrap()); + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(&HTTP11_LINE[..i]).build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert!(matches!(kind, Detection::NotHttp), "{kind:?}"); + } + + debug!(read = ?std::str::from_utf8(HTTP11_LINE).unwrap()); + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(HTTP11_LINE).build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert_eq!(kind.variant(), Some(Variant::Http1), "{kind:?}"); + + const REQ: &[u8] = b"GET /foo/bar/bar/blah HTTP/1.1\r\nHost: foob.example.com\r\n\r\n"; + for i in SMALLEST_POSSIBLE_HTTP1_REQ.len()..REQ.len() { + debug!(read = ?std::str::from_utf8(&REQ[..i]).unwrap()); + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(&REQ[..i]).build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert_eq!(kind.variant(), Some(Variant::Http1), "{kind:?}"); + assert_eq!(buf[..], REQ[..i]); + } + + // Starts with a P, like the h2 preface. + const POST: &[u8] = b"POST /foo HTTP/1.1\r\n"; + for i in SMALLEST_POSSIBLE_HTTP1_REQ.len()..POST.len() { + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(&POST[..i]).build(); + debug!(read = ?std::str::from_utf8(&POST[..i]).unwrap()); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert_eq!(kind.variant(), Some(Variant::Http1), "{kind:?}"); + assert_eq!(buf[..], POST[..i]); + } + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn unknown() { + let _trace = linkerd_tracing::test::trace_init(); + + let params = DetectParams { + read_timeout: time::Duration::from_millis(1), + ..Default::default() + }; + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(b"foo.bar.blah\r\nbobo").build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert!(matches!(kind, Detection::NotHttp), "{kind:?}"); + assert_eq!(&buf[..], b"foo.bar.blah\r\nbobo"); + + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().read(GARBAGE).build(); + let kind = detect(¶ms, &mut io, &mut buf).await.unwrap(); + assert!(matches!(kind, Detection::NotHttp), "{kind:?}"); + assert_eq!(&buf[..], GARBAGE); + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn empty() { + let _trace = linkerd_tracing::test::trace_init(); + + let params = DetectParams { + read_timeout: time::Duration::from_millis(1), + ..Default::default() + }; + let mut buf = BytesMut::with_capacity(1024); + let mut io = io::Builder::new().build(); + let err = detect(¶ms, &mut io, &mut buf).await.unwrap_err(); + assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof, "{err:?}"); + assert_eq!(&buf[..], b""); + } +} diff --git a/linkerd/http/detect/src/metrics.rs b/linkerd/http/detect/src/metrics.rs new file mode 100644 index 0000000000..3b031d01f2 --- /dev/null +++ b/linkerd/http/detect/src/metrics.rs @@ -0,0 +1,195 @@ +use linkerd_http_variant::Variant; +use prometheus_client::{ + encoding::EncodeLabelSet, + metrics::{ + counter::Counter, + family::{Family, MetricConstructor}, + histogram::Histogram, + }, + registry::{Registry, Unit}, +}; +use std::{fmt::Debug, hash::Hash}; +use tokio::time; + +#[derive(Clone, Debug)] +pub struct DetectMetricsFamilies +where + L: Clone + Hash + Eq + EncodeLabelSet + Debug + Send + Sync + 'static, +{ + duration: Family, + results: Family, Counter>, +} + +#[derive(Clone, Debug)] +pub struct DetectMetrics { + duration: Histogram, + not_http: Counter, + http1: Counter, + h2: Counter, + read_timeout: Counter, + error: Counter, +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +struct DetectLabels +where + L: Clone + Hash + Eq + EncodeLabelSet + Debug + Send + Sync + 'static, +{ + result: DetectResult, + labels: L, +} + +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +enum DetectResult { + NotHttp, + Http1, + H2, + ReadTimeout, + Error, +} + +#[derive(Clone, Debug, Default)] +struct MkDurations; + +// === impl DetectMetricsFamilies === + +impl Default for DetectMetricsFamilies +where + L: Clone + Hash + Eq + EncodeLabelSet + Debug + Send + Sync + 'static, +{ + fn default() -> Self { + Self { + duration: Family::new_with_constructor(MkDurations), + results: Family::default(), + } + } +} + +impl DetectMetricsFamilies +where + L: Clone + Hash + Eq + EncodeLabelSet + Debug + Send + Sync + 'static, +{ + pub fn register(reg: &mut Registry) -> Self { + let duration = Family::new_with_constructor(MkDurations); + reg.register_with_unit( + "duration", + "Time taken for protocol detection", + Unit::Seconds, + duration.clone(), + ); + + let results = Family::default(); + reg.register("results", "Protocol detection results", results.clone()); + + Self { duration, results } + } + + pub fn metrics(&self, labels: L) -> DetectMetrics { + let duration = (*self.duration.get_or_create(&labels)).clone(); + + let not_http = (*self.results.get_or_create(&DetectLabels { + result: DetectResult::NotHttp, + labels: labels.clone(), + })) + .clone(); + let http1 = (*self.results.get_or_create(&DetectLabels { + result: DetectResult::Http1, + labels: labels.clone(), + })) + .clone(); + let h2 = (*self.results.get_or_create(&DetectLabels { + result: DetectResult::H2, + labels: labels.clone(), + })) + .clone(); + let read_timeout = (*self.results.get_or_create(&DetectLabels { + result: DetectResult::ReadTimeout, + labels: labels.clone(), + })) + .clone(); + let error = (*self.results.get_or_create(&DetectLabels { + result: DetectResult::Error, + labels, + })) + .clone(); + + DetectMetrics { + duration, + not_http, + http1, + h2, + read_timeout, + error, + } + } +} + +// === impl DetectMetrics === + +impl Default for DetectMetrics { + fn default() -> Self { + Self { + duration: MkDurations.new_metric(), + not_http: Counter::default(), + http1: Counter::default(), + h2: Counter::default(), + read_timeout: Counter::default(), + error: Counter::default(), + } + } +} + +impl DetectMetrics { + pub(crate) fn observe( + &self, + result: &std::io::Result, + elapsed: time::Duration, + ) { + match result { + Ok(super::Detection::NotHttp) => self.not_http.inc(), + Ok(super::Detection::Http(Variant::Http1)) => self.http1.inc(), + Ok(super::Detection::Http(Variant::H2)) => self.h2.inc(), + Ok(super::Detection::ReadTimeout(_)) => self.read_timeout.inc(), + Err(_) => self.error.inc(), + }; + self.duration.observe(elapsed.as_secs_f64()); + } +} + +// === impl DetectLabels === + +impl EncodeLabelSet for DetectLabels +where + L: Clone + Hash + Eq + EncodeLabelSet + Debug + Send + Sync + 'static, +{ + fn encode( + &self, + mut enc: prometheus_client::encoding::LabelSetEncoder, + ) -> Result<(), std::fmt::Error> { + use prometheus_client::encoding::EncodeLabel; + + ( + "result", + match self.result { + DetectResult::NotHttp => "not_http", + DetectResult::Http1 => "http/1", + DetectResult::H2 => "http/2", + DetectResult::ReadTimeout => "read_timeout", + DetectResult::Error => "error", + }, + ) + .encode(enc.encode_label())?; + + self.labels.encode(enc)?; + + Ok(()) + } +} + +// === impl MkDurations === + +impl MetricConstructor for MkDurations { + fn new_metric(&self) -> Histogram { + Histogram::new([0.001, 0.1]) + } +} diff --git a/linkerd/http/h2/Cargo.toml b/linkerd/http/h2/Cargo.toml index 4a1c99b986..a676051a3d 100644 --- a/linkerd/http/h2/Cargo.toml +++ b/linkerd/http/h2/Cargo.toml @@ -1,6 +1,8 @@ [package] name = "linkerd-http-h2" -version = "0.1.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = "HTTP/2-specific configuration types" diff --git a/linkerd/http/insert/Cargo.toml b/linkerd/http/insert/Cargo.toml new file mode 100644 index 0000000000..c58f25014f --- /dev/null +++ b/linkerd/http/insert/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "linkerd-http-insert" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Tower middleware to insert parameters into HTTP extensions. +""" + +[dependencies] +futures = { version = "0.3", default-features = false } +http = { workspace = true } +pin-project = "1" +tower = { workspace = true, default-features = false } + +linkerd-stack = { path = "../../stack" } diff --git a/linkerd/proxy/http/src/insert.rs b/linkerd/http/insert/src/lib.rs similarity index 88% rename from linkerd/proxy/http/src/insert.rs rename to linkerd/http/insert/src/lib.rs index dba4f54b0e..173fb78eb0 100644 --- a/linkerd/proxy/http/src/insert.rs +++ b/linkerd/http/insert/src/lib.rs @@ -1,3 +1,7 @@ +//! Tower middleware to insert parameters into HTTP extensions. +//! +//! See [`NewInsert`] and [`NewResponseInsert`]. + use futures::{Future, TryFuture}; use linkerd_stack::{layer, NewService, Param, Proxy}; use std::{ @@ -6,11 +10,16 @@ use std::{ task::{Context, Poll}, }; +/// Lazily yields a value. +/// +/// This is used by the [`Insert`] and [`ResponseInsert`] middleware to yield +/// parameters. pub trait Lazy: Clone { + /// Returns a `V`-typed value. fn value(&self) -> V; } -/// Wraps an HTTP `Service` so that a `P`-typed `Param` is cloned into each +/// Wraps an HTTP `NewService` so that a `P`-typed `Param` is cloned into each /// request's extensions. #[derive(Debug)] pub struct NewInsert { @@ -18,7 +27,7 @@ pub struct NewInsert { _marker: PhantomData P>, } -/// Wraps an HTTP `Service` so that a `P`-typed `Param` is cloned into each +/// Wraps an HTTP `NewService` so that a `P`-typed `Param` is cloned into each /// response's extensions. #[derive(Debug)] pub struct NewResponseInsert { @@ -26,21 +35,27 @@ pub struct NewResponseInsert { _marker: PhantomData P>, } +/// Wraps an HTTP `Service` so that a `P`-typed `Param` is cloned into each +/// request's extensions. pub struct Insert { inner: S, lazy: L, _marker: PhantomData V>, } +/// Wraps an HTTP `Service` so that a `P`-typed `Param` is cloned into each +/// response's extensions. pub struct ResponseInsert { inner: S, lazy: L, _marker: PhantomData V>, } +/// A [`Lazy`] function. #[derive(Clone, Debug)] pub struct FnLazy(F); +/// A [`Lazy`] value. #[derive(Clone, Debug)] pub struct ValLazy(V); @@ -56,6 +71,9 @@ pub struct ResponseInsertFuture { // === impl NewInsert === impl NewInsert { + /// Returns a [`Layer`][tower::layer::Layer]. + /// + /// This layer inserts a `P` parameter into the request extensions. pub fn layer() -> impl tower::layer::Layer + Copy { layer::mk(|inner| Self { inner, @@ -91,6 +109,9 @@ impl Clone for NewInsert { // === impl NewResponseInsert === impl NewResponseInsert { + /// Returns a [`Layer`][tower::layer::Layer]. + /// + /// This layer inserts a `P` parameter into the response extensions. pub fn layer() -> impl tower::layer::Layer + Copy { layer::mk(|inner| Self { inner, @@ -258,7 +279,7 @@ impl Future for ResponseInsertFuture where F: TryFuture>, L: Lazy, - V: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, { type Output = Result; diff --git a/linkerd/http/metrics/Cargo.toml b/linkerd/http/metrics/Cargo.toml index 2ea20c79da..5940607bbe 100644 --- a/linkerd/http/metrics/Cargo.toml +++ b/linkerd/http/metrics/Cargo.toml @@ -1,27 +1,27 @@ [package] name = "linkerd-http-metrics" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] test-util = [] [dependencies] -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } -http = "0.2" -http-body = "0.4" -hyper = { version = "0.14", features = ["http1", "http2"] } +http = { workspace = true } +http-body = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } parking_lot = "0.12" pin-project = "1" tokio = { version = "1", features = ["time"] } -tower = "0.4" +tower = { workspace = true } tracing = "0.1" linkerd-error = { path = "../../error" } linkerd-http-classify = { path = "../classify" } -linkerd-metrics = { path = "../../metrics", features = ["linkerd-stack"] } +linkerd-metrics = { path = "../../metrics", features = ["stack"] } linkerd-stack = { path = "../../stack" } diff --git a/linkerd/http/metrics/src/lib.rs b/linkerd/http/metrics/src/lib.rs index c33922b845..cfadf887ab 100644 --- a/linkerd/http/metrics/src/lib.rs +++ b/linkerd/http/metrics/src/lib.rs @@ -93,7 +93,7 @@ where } } -impl<'p, N: fmt::Display> fmt::Display for Prefixed<'p, N> { +impl fmt::Display for Prefixed<'_, N> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.prefix.is_empty() { return self.name.fmt(f); diff --git a/linkerd/http/metrics/src/requests.rs b/linkerd/http/metrics/src/requests.rs index 7a0df94b87..83b0af2e87 100644 --- a/linkerd/http/metrics/src/requests.rs +++ b/linkerd/http/metrics/src/requests.rs @@ -59,13 +59,25 @@ impl Requests { pub fn to_layer( &self, - ) -> impl layer::Layer> + Clone + ) -> impl layer::Layer> + Clone where L: ClassifyResponse + Send + Sync + 'static, N: svc::NewService, + { + self.to_layer_via(()) + } + + pub fn to_layer_via( + &self, + params: X, + ) -> impl layer::Layer> + Clone + where + L: ClassifyResponse + Send + Sync + 'static, + N: svc::NewService, + X: Clone, { let reg = self.0.clone(); - NewMetrics::layer(reg) + NewMetrics::layer_via(reg, params) } } diff --git a/linkerd/http/metrics/src/requests/service.rs b/linkerd/http/metrics/src/requests/service.rs index ad1af7ecbc..6745211b24 100644 --- a/linkerd/http/metrics/src/requests/service.rs +++ b/linkerd/http/metrics/src/requests/service.rs @@ -1,6 +1,6 @@ use super::{Metrics, StatusMetrics}; use futures::{ready, TryFuture}; -use http_body::Body; +use http_body::{Body, Frame}; use linkerd_error::Error; use linkerd_http_classify::{ClassifyEos, ClassifyResponse}; use linkerd_metrics::NewMetrics; @@ -18,8 +18,8 @@ use std::{ use tokio::time::Instant; /// Wraps services to record metrics. -pub type NewHttpMetrics = - NewMetrics>, HttpMetrics>; +pub type NewHttpMetrics = + NewMetrics>, HttpMetrics>; /// A middleware that records HTTP metrics. #[pin_project] @@ -266,12 +266,12 @@ where self.inner.is_end_stream() } - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { let this = self.project(); - let frame = ready!(this.inner.poll_data(cx)); + let frame = ready!(this.inner.poll_frame(cx)); if let Some(lock) = this.metrics.take() { let now = Instant::now(); @@ -283,13 +283,6 @@ where Poll::Ready(frame) } - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - self.project().inner.poll_trailers(cx) - } - #[inline] fn size_hint(&self) -> http_body::SizeHint { self.inner.size_hint() @@ -408,38 +401,46 @@ where self.inner.is_end_stream() } - fn poll_data( + fn poll_frame( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - let poll = ready!(self.as_mut().project().inner.poll_data(cx)); + ) -> Poll, Self::Error>>> { + // Poll the body for the next frame. + let poll = ready!(self.as_mut().project().inner.poll_frame(cx)); let frame = poll.map(|opt| opt.map_err(|e| self.as_mut().measure_err(e.into()))); + // Update latency metrics if we are tracking body latency. if !(*self.as_mut().project().latency_recorded) { - self.record_latency(); + self.as_mut().record_latency(); } - Poll::Ready(frame) - } - - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let trls = ready!(self.as_mut().project().inner.poll_trailers(cx)) - .map_err(|e| self.as_mut().measure_err(e.into()))?; - - if let Some(c) = self - .as_mut() - .project() - .classify - .take() - .map(|c| c.eos(trls.as_ref())) - { - self.record_class(c); + match &frame { + // Classify the stream if we have reached the end of the stream. + None => { + if let Some(classify) = self.as_mut().project().classify.take() { + let class = classify.eos(None); + self.record_class(class); + } + } + // Classify the stream if we have reached a trailers frame. + Some(Ok(frame)) => { + if let trls @ Some(_) = frame.trailers_ref() { + if let Some(classify) = self.as_mut().project().classify.take() { + let class = classify.eos(trls); + self.record_class(class); + } + } + } + // Classify the stream if we have reached an error. + Some(Err(error)) => { + if let Some(classify) = self.as_mut().project().classify.take() { + let class = classify.error(error); + self.record_class(class); + } + } } - Poll::Ready(Ok(trls)) + Poll::Ready(frame) } #[inline] diff --git a/linkerd/http/override-authority/Cargo.toml b/linkerd/http/override-authority/Cargo.toml new file mode 100644 index 0000000000..36d0172917 --- /dev/null +++ b/linkerd/http/override-authority/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "linkerd-http-override-authority" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Tower middleware to override request authorities. +""" + +[dependencies] +http = { workspace = true } +tower = { workspace = true, default-features = false } +tracing = "0.1" + +linkerd-stack = { path = "../../stack" } diff --git a/linkerd/proxy/http/src/override_authority.rs b/linkerd/http/override-authority/src/lib.rs similarity index 75% rename from linkerd/proxy/http/src/override_authority.rs rename to linkerd/http/override-authority/src/lib.rs index dc9983d6ac..e4efd3216b 100644 --- a/linkerd/proxy/http/src/override_authority.rs +++ b/linkerd/http/override-authority/src/lib.rs @@ -1,4 +1,7 @@ -use http::{header::AsHeaderName, uri::Authority}; +use http::{ + header::AsHeaderName, + uri::{self, Authority}, +}; use linkerd_stack::{layer, NewService, Param}; use std::{ fmt, @@ -23,6 +26,27 @@ pub struct OverrideAuthority { inner: S, } +/// Sets the [`Authority`] of the given URI. +pub fn set_authority(uri: &mut uri::Uri, auth: Authority) { + let mut parts = uri::Parts::from(std::mem::take(uri)); + + parts.authority = Some(auth); + + // If this was an origin-form target (path only), + // then we can't *only* set the authority, as that's + // an illegal target (such as `example.com/docs`). + // + // But don't set a scheme if this was authority-form (CONNECT), + // since that would change its meaning (like `https://example.com`). + if parts.path_and_query.is_some() { + parts.scheme = Some(http::uri::Scheme::HTTP); + } + + let new = http::uri::Uri::from_parts(parts).expect("absolute uri"); + + *uri = new; +} + // === impl NewOverrideAuthority === impl NewOverrideAuthority { diff --git a/linkerd/http/prom/Cargo.toml b/linkerd/http/prom/Cargo.toml new file mode 100644 index 0000000000..2ff2d3a1bf --- /dev/null +++ b/linkerd/http/prom/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "linkerd-http-prom" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Tower middleware for Prometheus metrics. +""" + +[features] +test-util = [] + +[dependencies] +bytes = { workspace = true } +futures = { version = "0.3", default-features = false } +http = { workspace = true } +http-body = { workspace = true } +parking_lot = "0.12" +pin-project = "1" +prometheus-client = { workspace = true } +thiserror = "2" +tokio = { version = "1", features = ["time"] } + +linkerd-error = { path = "../../error" } +linkerd-http-box = { path = "../box" } +linkerd-metrics = { path = "../../metrics" } +linkerd-stack = { path = "../../stack" } diff --git a/linkerd/http/prom/src/body_data.rs b/linkerd/http/prom/src/body_data.rs new file mode 100644 index 0000000000..237e811e36 --- /dev/null +++ b/linkerd/http/prom/src/body_data.rs @@ -0,0 +1,5 @@ +pub mod request; +pub mod response; + +mod body; +mod metrics; diff --git a/linkerd/http/prom/src/body_data/body.rs b/linkerd/http/prom/src/body_data/body.rs new file mode 100644 index 0000000000..9c1db83500 --- /dev/null +++ b/linkerd/http/prom/src/body_data/body.rs @@ -0,0 +1,70 @@ +use super::metrics::BodyDataMetrics; +use http_body::{Frame, SizeHint}; +use pin_project::pin_project; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; + +/// An instrumented body. +#[pin_project] +pub struct Body { + /// The inner body. + #[pin] + inner: B, + /// Metrics with which the inner body will be instrumented. + metrics: BodyDataMetrics, +} + +impl Body { + /// Returns a new, instrumented body. + pub(crate) fn new(body: B, metrics: BodyDataMetrics) -> Self { + Self { + inner: body, + metrics, + } + } +} + +impl http_body::Body for Body +where + B: http_body::Body, +{ + type Data = B::Data; + type Error = B::Error; + + /// Attempt to pull out the next data buffer of this stream. + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project(); + let inner = this.inner; + let BodyDataMetrics { frame_size } = this.metrics; + + let frame = std::task::ready!(inner.poll_frame(cx)); + + if let Some(Ok(frame)) = &frame { + if let Some(data) = frame.data_ref() { + // We've polled and yielded a new chunk! Increment our telemetry. + // + // NB: We're careful to call `remaining()` rather than `chunk()`, which + // "can return a shorter slice (this allows non-continuous internal representation)." + let bytes = bytes::Buf::remaining(data); + frame_size.observe(linkerd_metrics::to_f64(bytes as u64)); + } + } + + Poll::Ready(frame) + } + + #[inline] + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } + + #[inline] + fn size_hint(&self) -> SizeHint { + self.inner.size_hint() + } +} diff --git a/linkerd/http/prom/src/body_data/metrics.rs b/linkerd/http/prom/src/body_data/metrics.rs new file mode 100644 index 0000000000..d123cc4a57 --- /dev/null +++ b/linkerd/http/prom/src/body_data/metrics.rs @@ -0,0 +1,132 @@ +//! Prometheus counters for request and response bodies. + +use linkerd_metrics::prom::{ + self, metrics::family::MetricConstructor, Family, Histogram, Registry, Unit, +}; + +/// Counters for request body frames. +#[derive(Clone, Debug)] +pub struct RequestBodyFamilies { + /// Counts the number of request body frames by size. + frame_sizes: Family, +} + +/// Counters for response body frames. +#[derive(Clone, Debug)] +pub struct ResponseBodyFamilies { + /// Counts the number of response body frames by size. + frame_sizes: Family, +} + +/// Counters to instrument a request or response body. +#[derive(Clone, Debug)] +pub struct BodyDataMetrics { + /// Counts the number of request body frames. + pub frame_size: Histogram, +} + +/// A constructor for creating new [`Histogram`]s in a [`Family`]. +#[derive(Clone, Copy)] +struct NewHisto; + +// === impl NewHisto === + +impl MetricConstructor for NewHisto { + fn new_metric(&self) -> Histogram { + Histogram::new([128.0, 1024.0, 10240.0]) + } +} + +// === impl RequestBodyFamilies === + +impl Default for RequestBodyFamilies +where + L: Clone + std::hash::Hash + Eq, +{ + fn default() -> Self { + Self { + frame_sizes: Family::new_with_constructor(NewHisto), + } + } +} + +impl RequestBodyFamilies +where + L: prom::encoding::EncodeLabelSet + + std::fmt::Debug + + std::hash::Hash + + Eq + + Clone + + Send + + Sync + + 'static, +{ + /// Registers and returns a new family of body data metrics. + pub fn register(registry: &mut Registry) -> Self { + let frame_sizes = Family::new_with_constructor(NewHisto); + registry.register_with_unit( + "request_frame_size", + "Request data frame sizes", + Unit::Bytes, + frame_sizes.clone(), + ); + + Self { frame_sizes } + } + + /// Returns the [`BodyDataMetrics`] for the given label set. + pub fn metrics(&self, labels: &L) -> BodyDataMetrics { + let Self { frame_sizes } = self; + + let frame_size = frame_sizes.get_or_create(labels).clone(); + + BodyDataMetrics { frame_size } + } +} + +// === impl ResponseBodyFamilies === + +impl Default for ResponseBodyFamilies +where + L: Clone + std::hash::Hash + Eq, +{ + fn default() -> Self { + Self { + frame_sizes: Family::new_with_constructor(NewHisto), + } + } +} + +impl ResponseBodyFamilies +where + L: prom::encoding::EncodeLabelSet + + std::fmt::Debug + + std::hash::Hash + + Eq + + Clone + + Send + + Sync + + 'static, +{ + /// Registers and returns a new family of body data metrics. + pub fn register(registry: &mut Registry) -> Self { + let frame_sizes = Family::new_with_constructor(NewHisto); + registry.register_with_unit( + "response_frame_size", + "Response data frame sizes", + Unit::Bytes, + frame_sizes.clone(), + ); + + Self { frame_sizes } + } + + /// Returns the [`BodyDataMetrics`] for the given label set. + pub fn metrics(&self, labels: &L) -> BodyDataMetrics { + let Self { frame_sizes } = self; + + let frame_size = frame_sizes.get_or_create(labels).clone(); + + BodyDataMetrics { frame_size } + } +} diff --git a/linkerd/http/prom/src/body_data/request.rs b/linkerd/http/prom/src/body_data/request.rs new file mode 100644 index 0000000000..c7947bfe3c --- /dev/null +++ b/linkerd/http/prom/src/body_data/request.rs @@ -0,0 +1,126 @@ +//! Tower middleware to instrument request bodies. + +pub use super::metrics::{BodyDataMetrics, RequestBodyFamilies}; + +use http::{Request, Response}; +use linkerd_error::Error; +use linkerd_http_box::BoxBody; +use linkerd_stack::{self as svc, layer::Layer, ExtractParam, NewService, Service}; +use std::marker::PhantomData; + +/// A [`NewService`] that creates [`RecordBodyData`] services. +#[derive(Clone, Debug)] +pub struct NewRecordBodyData { + /// The inner [`NewService`]. + inner: N, + extract: X, + metrics: RequestBodyFamilies, + marker: PhantomData, +} + +/// Tracks body frames for an inner `S`-typed [`Service`]. +#[derive(Clone, Debug)] +pub struct RecordBodyData { + /// The inner [`Service`]. + inner: S, + extract: ReqX, + metrics: RequestBodyFamilies, +} + +// === impl NewRecordBodyData === + +impl NewRecordBodyData +where + X: Clone, + L: Clone, +{ + /// Returns a [`Layer`] that tracks body chunks. + /// + /// This uses an `X`-typed [`ExtractParam`] implementation to extract service parameters + /// from a `T`-typed target. + pub fn new(extract: X, metrics: RequestBodyFamilies) -> impl Layer { + svc::layer::mk(move |inner| Self { + inner, + extract: extract.clone(), + metrics: metrics.clone(), + marker: PhantomData, + }) + } +} + +impl NewService for NewRecordBodyData +where + N: NewService, + X: ExtractParam, + L: Clone, +{ + type Service = RecordBodyData; + + fn new_service(&self, target: T) -> Self::Service { + let Self { + inner, + extract, + metrics, + marker: _, + } = self; + + let extract = extract.extract_param(&target); + let inner = inner.new_service(target); + let metrics = metrics.clone(); + + RecordBodyData { + inner, + extract, + metrics, + } + } +} + +// === impl RecordBodyData === + +impl Service> for RecordBodyData +where + S: Service, Response = Response>, + S::Future: Send + 'static, + ReqB: http_body::Body + Send + 'static, + ReqB::Data: Send + 'static, + ReqB::Error: Into, + ReqX: ExtractParam>, + L: linkerd_metrics::prom::encoding::EncodeLabelSet + + std::fmt::Debug + + std::hash::Hash + + Eq + + Clone + + Send + + Sync + + 'static, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + let Self { + inner, + extract, + metrics, + } = self; + + let req = { + let labels = extract.extract_param(&req); + let metrics = metrics.metrics(&labels); + let instrument = |b| super::body::Body::new(b, metrics); + req.map(instrument).map(BoxBody::new) + }; + + inner.call(req) + } +} diff --git a/linkerd/http/prom/src/body_data/response.rs b/linkerd/http/prom/src/body_data/response.rs new file mode 100644 index 0000000000..6463f3ba44 --- /dev/null +++ b/linkerd/http/prom/src/body_data/response.rs @@ -0,0 +1,94 @@ +//! Tower middleware to instrument response bodies. + +pub use super::metrics::{BodyDataMetrics, ResponseBodyFamilies}; + +use http::{Request, Response}; +use http_body::Body; +use linkerd_error::Error; +use linkerd_http_box::BoxBody; +use linkerd_stack::{self as svc, layer::Layer, ExtractParam, NewService, Service}; +use std::{future::Future, pin::Pin}; + +/// A [`NewService`] that creates [`RecordBodyData`] services. +#[derive(Clone, Debug)] +pub struct NewRecordBodyData { + /// The [`ExtractParam`] strategy for obtaining our parameters. + extract: X, + /// The inner [`NewService`]. + inner: N, +} + +/// Tracks body frames for an inner `S`-typed [`Service`]. +#[derive(Clone, Debug)] +pub struct RecordBodyData { + /// The inner [`Service`]. + inner: S, + /// The metrics to be affixed to the response body. + metrics: BodyDataMetrics, +} + +// === impl NewRecordBodyData === + +impl NewRecordBodyData { + /// Returns a [`Layer`] that tracks body chunks. + /// + /// This uses an `X`-typed [`ExtractParam`] implementation to extract service parameters + /// from a `T`-typed target. + pub fn layer_via(extract: X) -> impl Layer { + svc::layer::mk(move |inner| Self { + extract: extract.clone(), + inner, + }) + } +} + +impl NewService for NewRecordBodyData +where + X: ExtractParam, + N: NewService, +{ + type Service = RecordBodyData; + + fn new_service(&self, target: T) -> Self::Service { + let Self { extract, inner } = self; + + let metrics = extract.extract_param(&target); + let inner = inner.new_service(target); + + RecordBodyData { inner, metrics } + } +} + +// === impl RecordBodyData === + +impl Service> for RecordBodyData +where + S: Service, Response = Response>, + S::Future: Send + 'static, + RespB: Body + Send + 'static, + RespB::Data: Send + 'static, + RespB::Error: Into, +{ + type Response = Response; + type Error = S::Error; + type Future = Pin> + Send>>; + + #[inline] + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + use futures::{FutureExt, TryFutureExt}; + + let Self { inner, metrics } = self; + let metrics = metrics.clone(); + inner + .call(req) + .map_ok(|rsp| rsp.map(|b| BoxBody::new(super::body::Body::new(b, metrics)))) + .boxed() + } +} diff --git a/linkerd/http/prom/src/count_reqs.rs b/linkerd/http/prom/src/count_reqs.rs new file mode 100644 index 0000000000..49e7dbde6b --- /dev/null +++ b/linkerd/http/prom/src/count_reqs.rs @@ -0,0 +1,139 @@ +//! A Tower middleware for counting requests processed by a service. + +use linkerd_stack as svc; +use prometheus_client::{ + encoding::EncodeLabelSet, + metrics::{counter::Counter, family::Family}, + registry::Registry, +}; +use std::task::{Context, Poll}; + +/// A [`Family`] of counters with `L`-encoded labels. +/// +/// See [`EncodeLabelSet`] for more information about encoding labels. +#[derive(Clone, Debug)] +pub struct RequestCountFamilies(Family); + +// A single [`Counter`] that tracks the number of requests. +#[derive(Clone, Debug)] +pub struct RequestCount(Counter); + +/// A [`NewService`][svc::NewService] that creates [`RequestCount`] services. +#[derive(Clone, Debug)] +pub struct NewCountRequests { + /// The inner [`NewService`][svc::NewService]. + inner: N, + /// The [`ExtractParam`][svc::ExtractParam] strategy for obtaining our parameters. + extract: X, +} + +/// Counts requests for an inner `S`-typed [`Service`][svc::Service]. +/// +/// This will increment its counter when [`call()`][svc::Service::call]ed, before calling the inner +/// service. +#[derive(Clone, Debug)] +pub struct CountRequests { + inner: S, + requests: Counter, +} + +// === impl NewCountRequests === + +impl NewCountRequests { + pub fn new(extract: X, inner: N) -> Self { + Self { extract, inner } + } + + /// Returns a [`Layer`][svc::layer::Layer] that counts requests. + /// + /// This uses an `X`-typed [`ExtractParam`][svc::ExtractParam] implementation to extract + /// [`RequestCount`] from a `T`-typed target. + pub fn layer_via(extract: X) -> impl svc::layer::Layer + Clone { + svc::layer::mk(move |inner| Self::new(extract.clone(), inner)) + } +} + +impl svc::NewService for NewCountRequests +where + X: svc::ExtractParam, + N: svc::NewService, +{ + type Service = CountRequests; + + fn new_service(&self, target: T) -> Self::Service { + let rc = self.extract.extract_param(&target); + let inner = self.inner.new_service(target); + CountRequests::new(rc, inner) + } +} + +// === impl CountRequests === + +impl CountRequests { + pub(crate) fn new(RequestCount(requests): RequestCount, inner: S) -> Self { + Self { requests, inner } + } +} + +impl svc::Service> for CountRequests +where + S: svc::Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = S::Future; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + // We received a request! Increment the counter and then call the inner service `S`. + self.requests.inc(); + self.inner.call(req) + } +} + +// === impl RequestCountFamilies === + +impl Default for RequestCountFamilies +where + L: EncodeLabelSet + std::fmt::Debug + std::hash::Hash, + L: Eq + Clone, +{ + fn default() -> Self { + Self(Family::default()) + } +} + +impl RequestCountFamilies +where + L: EncodeLabelSet + std::fmt::Debug + std::hash::Hash, + L: Eq + Clone + Send + Sync + 'static, +{ + /// Registers this family of counters with the given [`Registry`]. + pub fn register(registry: &mut Registry) -> Self { + let requests = Family::default(); + registry.register( + "requests", + "The total number of requests dispatched", + requests.clone(), + ); + Self(requests) + } + + /// Returns a [`RequestCount`] for the given label set. + pub fn metrics(&self, labels: &L) -> RequestCount { + RequestCount(self.0.get_or_create(labels).clone()) + } +} + +// === impl RequestCount === + +impl RequestCount { + /// Returns the current value of the counter. + pub fn get(&self) -> u64 { + self.0.get() + } +} diff --git a/linkerd/http/prom/src/lib.rs b/linkerd/http/prom/src/lib.rs new file mode 100644 index 0000000000..51ee223934 --- /dev/null +++ b/linkerd/http/prom/src/lib.rs @@ -0,0 +1,8 @@ +#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] +#![forbid(unsafe_code)] + +pub mod body_data; +mod count_reqs; +pub mod record_response; + +pub use self::count_reqs::{CountRequests, NewCountRequests, RequestCount, RequestCountFamilies}; diff --git a/linkerd/http/prom/src/record_response.rs b/linkerd/http/prom/src/record_response.rs new file mode 100644 index 0000000000..cfdc134411 --- /dev/null +++ b/linkerd/http/prom/src/record_response.rs @@ -0,0 +1,330 @@ +use http_body::Body; +use linkerd_error::Error; +use linkerd_http_box::BoxBody; +use linkerd_metrics::prom::Counter; +use linkerd_stack as svc; +use prometheus_client::{ + encoding::EncodeLabelSet, + metrics::{ + family::{Family, MetricConstructor}, + histogram::Histogram, + }, +}; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tokio::{sync::oneshot, time}; + +mod request; +mod response; + +pub use self::{ + request::{NewRequestDuration, RecordRequestDuration, RequestMetrics}, + response::{NewResponseDuration, RecordResponseDuration, ResponseMetrics}, +}; + +/// A strategy for labeling request/responses streams for status and duration +/// metrics. +/// +/// This is specifically to support higher-cardinality status counters and +/// lower-cardinality stream duration histograms. +pub trait MkStreamLabel { + type DurationLabels: EncodeLabelSet + + Clone + + Eq + + std::fmt::Debug + + std::hash::Hash + + Send + + Sync + + 'static; + type StatusLabels: EncodeLabelSet + + Clone + + Eq + + std::fmt::Debug + + std::hash::Hash + + Send + + Sync + + 'static; + + type StreamLabel: StreamLabel< + DurationLabels = Self::DurationLabels, + StatusLabels = Self::StatusLabels, + >; + + /// Returns None when the request should not be recorded. + fn mk_stream_labeler(&self, req: &http::Request) -> Option; +} + +pub trait StreamLabel: Send + 'static { + type DurationLabels: EncodeLabelSet + + Clone + + Eq + + std::fmt::Debug + + std::hash::Hash + + Send + + Sync + + 'static; + type StatusLabels: EncodeLabelSet + + Clone + + Eq + + std::fmt::Debug + + std::hash::Hash + + Send + + Sync + + 'static; + + fn init_response(&mut self, rsp: &http::Response); + fn end_response(&mut self, trailers: Result, &Error>); + + fn status_labels(&self) -> Self::StatusLabels; + fn duration_labels(&self) -> Self::DurationLabels; +} + +/// A set of parameters that can be used to construct a `RecordResponse` layer. +pub struct Params { + pub labeler: L, + pub metric: M, +} + +#[derive(Clone, Debug, thiserror::Error)] +#[error("request was cancelled before completion")] +pub struct RequestCancelled(()); + +/// Builds RecordResponse instances by extracing M-typed parameters from stack +/// targets +#[derive(Clone, Debug)] +pub struct NewRecordResponse { + inner: N, + extract: X, + _marker: std::marker::PhantomData (L, M)>, +} + +/// A Service that can record a request/response durations. +#[derive(Clone, Debug)] +pub struct RecordResponse { + inner: S, + labeler: L, + metric: M, +} + +#[pin_project::pin_project] +pub struct ResponseFuture +where + L: StreamLabel, +{ + #[pin] + inner: F, + state: Option>, +} + +/// Notifies the response labeler when the response body is flushed. +#[pin_project::pin_project(PinnedDrop)] +struct ResponseBody { + #[pin] + inner: BoxBody, + state: Option>, +} + +struct ResponseState { + labeler: L, + statuses: Family, + duration: DurationFamily, + start: oneshot::Receiver, +} + +type DurationFamily = Family; + +#[derive(Clone, Debug)] +struct MkDurationHistogram(Arc<[f64]>); + +// === impl MkDurationHistogram === + +impl MetricConstructor for MkDurationHistogram { + fn new_metric(&self) -> Histogram { + Histogram::new(self.0.iter().copied()) + } +} + +// === impl NewRecordResponse === + +impl NewRecordResponse +where + M: MkStreamLabel, +{ + pub fn new(extract: X, inner: N) -> Self { + Self { + extract, + inner, + _marker: std::marker::PhantomData, + } + } + + pub fn layer_via(extract: X) -> impl svc::layer::Layer + Clone + where + X: Clone, + { + svc::layer::mk(move |inner| Self::new(extract.clone(), inner)) + } +} + +impl NewRecordResponse +where + M: MkStreamLabel, +{ + pub fn layer() -> impl svc::layer::Layer + Clone { + Self::layer_via(()) + } +} + +impl svc::NewService for NewRecordResponse +where + L: MkStreamLabel, + X: svc::ExtractParam, T>, + N: svc::NewService, +{ + type Service = RecordResponse; + + fn new_service(&self, target: T) -> Self::Service { + let Params { labeler, metric } = self.extract.extract_param(&target); + let inner = self.inner.new_service(target); + RecordResponse::new(labeler, metric, inner) + } +} + +// === impl RecordResponse === + +impl RecordResponse +where + L: MkStreamLabel, +{ + pub(crate) fn new(labeler: L, metric: M, inner: S) -> Self { + Self { + inner, + labeler, + metric, + } + } +} + +// === impl ResponseFuture === + +impl Future for ResponseFuture +where + L: StreamLabel, + F: Future, Error>>, +{ + type Output = Result, Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let res = futures::ready!(this.inner.poll(cx)).map_err(Into::into); + let mut state = this.state.take(); + match res { + Ok(rsp) => { + if let Some(ResponseState { labeler, .. }) = state.as_mut() { + labeler.init_response(&rsp); + } + + let (head, inner) = rsp.into_parts(); + if inner.is_end_stream() { + end_stream(&mut state, Ok(None)); + } + Poll::Ready(Ok(http::Response::from_parts( + head, + BoxBody::new(ResponseBody { inner, state }), + ))) + } + Err(error) => { + end_stream(&mut state, Err(&error)); + Poll::Ready(Err(error)) + } + } + } +} + +// === impl ResponseBody === + +impl http_body::Body for ResponseBody +where + L: StreamLabel, +{ + type Data = ::Data; + type Error = Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + // Poll the inner body for the next frame. + let poll = this.inner.as_mut().poll_frame(cx); + let frame = futures::ready!(poll).map(|res| res.map_err(Error::from)); + + match &frame { + Some(Ok(frame)) => { + if let trls @ Some(_) = frame.trailers_ref() { + end_stream(this.state, Ok(trls)); + } else if this.inner.is_end_stream() { + end_stream(this.state, Ok(None)); + } + } + Some(Err(error)) => end_stream(this.state, Err(error)), + None => end_stream(this.state, Ok(None)), + } + + Poll::Ready(frame) + } + + fn is_end_stream(&self) -> bool { + // If the inner response state is still in place, the end of the stream has not been + // classified and recorded yet. + self.state.is_none() + } +} + +#[pin_project::pinned_drop] +impl PinnedDrop for ResponseBody +where + L: StreamLabel, +{ + fn drop(self: Pin<&mut Self>) { + let this = self.project(); + if this.state.is_some() { + end_stream(this.state, Err(&RequestCancelled(()).into())); + } + } +} + +fn end_stream( + state: &mut Option>, + res: Result, &Error>, +) where + L: StreamLabel, +{ + let Some(ResponseState { + duration, + statuses: total, + mut start, + mut labeler, + }) = state.take() + else { + return; + }; + + labeler.end_response(res); + + total.get_or_create(&labeler.status_labels()).inc(); + + let elapsed = if let Ok(start) = start.try_recv() { + time::Instant::now().saturating_duration_since(start) + } else { + time::Duration::ZERO + }; + duration + .get_or_create(&labeler.duration_labels()) + .observe(elapsed.as_secs_f64()); +} diff --git a/linkerd/http/prom/src/record_response/request.rs b/linkerd/http/prom/src/record_response/request.rs new file mode 100644 index 0000000000..44bff77fe8 --- /dev/null +++ b/linkerd/http/prom/src/record_response/request.rs @@ -0,0 +1,132 @@ +use linkerd_error::Error; +use linkerd_http_box::BoxBody; +use linkerd_metrics::prom::Counter; +use linkerd_stack as svc; +use prometheus_client::{ + encoding::EncodeLabelSet, + metrics::family::Family, + registry::{Registry, Unit}, +}; +use std::{ + sync::Arc, + task::{Context, Poll}, +}; +use tokio::{sync::oneshot, time}; + +use super::{DurationFamily, MkDurationHistogram, MkStreamLabel}; + +/// Metrics type that tracks completed requests. +#[derive(Debug)] +pub struct RequestMetrics { + duration: DurationFamily, + statuses: Family, +} + +pub type NewRequestDuration = super::NewRecordResponse< + L, + X, + RequestMetrics<::DurationLabels, ::StatusLabels>, + N, +>; + +pub type RecordRequestDuration = super::RecordResponse< + L, + RequestMetrics<::DurationLabels, ::StatusLabels>, + S, +>; + +// === impl RequestMetrics === + +impl RequestMetrics +where + DurL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, + StatL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + pub fn register(reg: &mut Registry, histo: impl IntoIterator) -> Self { + let duration = + DurationFamily::new_with_constructor(MkDurationHistogram(histo.into_iter().collect())); + reg.register_with_unit( + "request_duration", + "The time between request initialization and response completion", + Unit::Seconds, + duration.clone(), + ); + + let statuses = Family::default(); + reg.register( + "request_statuses", + "Completed request-response streams", + statuses.clone(), + ); + + Self { duration, statuses } + } +} + +#[cfg(feature = "test-util")] +impl RequestMetrics +where + StatL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, + DurL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + pub fn get_statuses(&self, labels: &StatL) -> Counter { + (*self.statuses.get_or_create(labels)).clone() + } + + // TODO(kate): it'd be nice if we could avoid creating a time series if it does not exist, + // so that tests can confirm that certain label sets do not exist within the family. +} + +impl Default for RequestMetrics +where + StatL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, + DurL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + fn default() -> Self { + Self { + duration: DurationFamily::new_with_constructor(MkDurationHistogram(Arc::new([]))), + statuses: Default::default(), + } + } +} + +impl Clone for RequestMetrics { + fn clone(&self) -> Self { + Self { + duration: self.duration.clone(), + statuses: self.statuses.clone(), + } + } +} + +impl svc::Service> for RecordRequestDuration +where + L: MkStreamLabel, + S: svc::Service, Response = http::Response, Error = Error>, +{ + type Response = http::Response; + type Error = S::Error; + type Future = super::ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + let state = self.labeler.mk_stream_labeler(&req).map(|labeler| { + let (tx, start) = oneshot::channel(); + tx.send(time::Instant::now()).unwrap(); + let RequestMetrics { statuses, duration } = self.metric.clone(); + super::ResponseState { + labeler, + start, + duration, + statuses, + } + }); + + let inner = self.inner.call(req); + super::ResponseFuture { state, inner } + } +} diff --git a/linkerd/http/prom/src/record_response/response.rs b/linkerd/http/prom/src/record_response/response.rs new file mode 100644 index 0000000000..ff506191d7 --- /dev/null +++ b/linkerd/http/prom/src/record_response/response.rs @@ -0,0 +1,181 @@ +use http_body::Frame; +use linkerd_error::Error; +use linkerd_http_box::BoxBody; +use linkerd_metrics::prom::Counter; +use linkerd_stack as svc; +use prometheus_client::{ + encoding::EncodeLabelSet, + metrics::family::Family, + registry::{Registry, Unit}, +}; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use tokio::{sync::oneshot, time}; + +use super::{DurationFamily, MkDurationHistogram, MkStreamLabel}; + +#[derive(Debug)] +pub struct ResponseMetrics { + duration: DurationFamily, + statuses: Family, +} + +pub type NewResponseDuration = super::NewRecordResponse< + L, + X, + ResponseMetrics<::DurationLabels, ::StatusLabels>, + N, +>; + +pub type RecordResponseDuration = super::RecordResponse< + L, + ResponseMetrics<::DurationLabels, ::StatusLabels>, + S, +>; + +/// Notifies the response body when the request body is flushed. +#[pin_project::pin_project(PinnedDrop)] +struct RequestBody { + #[pin] + inner: B, + flushed: Option>, +} + +// === impl ResponseMetrics === + +impl ResponseMetrics +where + DurL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, + StatL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + pub fn register(reg: &mut Registry, histo: impl IntoIterator) -> Self { + let duration = + DurationFamily::new_with_constructor(MkDurationHistogram(histo.into_iter().collect())); + reg.register_with_unit( + "response_duration", + "The time between request completion and response completion", + Unit::Seconds, + duration.clone(), + ); + + let statuses = Family::default(); + reg.register("response_statuses", "Completed responses", statuses.clone()); + + Self { duration, statuses } + } +} + +#[cfg(feature = "test-util")] +impl ResponseMetrics +where + StatL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, + DurL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + pub fn get_statuses(&self, labels: &StatL) -> Counter { + (*self.statuses.get_or_create(labels)).clone() + } +} + +impl Default for ResponseMetrics +where + StatL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, + DurL: EncodeLabelSet + Clone + Eq + std::fmt::Debug + std::hash::Hash + Send + Sync + 'static, +{ + fn default() -> Self { + Self { + duration: DurationFamily::new_with_constructor(MkDurationHistogram(Arc::new([]))), + statuses: Default::default(), + } + } +} + +impl Clone for ResponseMetrics { + fn clone(&self) -> Self { + Self { + duration: self.duration.clone(), + statuses: self.statuses.clone(), + } + } +} + +impl svc::Service> for RecordResponseDuration +where + M: MkStreamLabel, + S: svc::Service, Response = http::Response, Error = Error>, +{ + type Response = http::Response; + type Error = Error; + type Future = super::ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: http::Request) -> Self::Future { + // If there's a labeler, wrap the request body to record the time that + // the respond flushes. + let state = if let Some(labeler) = self.labeler.mk_stream_labeler(&req) { + let (tx, start) = oneshot::channel(); + req = req.map(|inner| { + BoxBody::new(RequestBody { + inner, + flushed: Some(tx), + }) + }); + let ResponseMetrics { duration, statuses } = self.metric.clone(); + Some(super::ResponseState { + labeler, + start, + duration, + statuses, + }) + } else { + None + }; + + let inner = self.inner.call(req); + super::ResponseFuture { state, inner } + } +} + +// === impl ResponseBody === + +impl http_body::Body for RequestBody +where + B: http_body::Body, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, B::Error>>> { + let mut this = self.project(); + let res = futures::ready!(this.inner.as_mut().poll_frame(cx)); + if (*this.inner).is_end_stream() { + if let Some(tx) = this.flushed.take() { + let _ = tx.send(time::Instant::now()); + } + } + Poll::Ready(res) + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } +} + +#[pin_project::pinned_drop] +impl PinnedDrop for RequestBody { + fn drop(self: Pin<&mut Self>) { + let this = self.project(); + if let Some(tx) = this.flushed.take() { + let _ = tx.send(time::Instant::now()); + } + } +} diff --git a/linkerd/http/retain/Cargo.toml b/linkerd/http/retain/Cargo.toml new file mode 100644 index 0000000000..5e1124cc0b --- /dev/null +++ b/linkerd/http/retain/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "linkerd-http-retain" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Tower middleware to manage service lifecycles. + +This is mostly intended to support cache eviction. +""" + +[dependencies] +http = { workspace = true } +http-body = { workspace = true } +pin-project = "1" +tower = { workspace = true, default-features = false } + +linkerd-stack = { path = "../../stack" } diff --git a/linkerd/proxy/http/src/retain.rs b/linkerd/http/retain/src/lib.rs similarity index 86% rename from linkerd/proxy/http/src/retain.rs rename to linkerd/http/retain/src/lib.rs index e5ed0f9deb..327ed50555 100644 --- a/linkerd/proxy/http/src/retain.rs +++ b/linkerd/http/retain/src/lib.rs @@ -1,5 +1,7 @@ -//! Provides a middleware that holds an inner service as long as responses are -//! being processed. +//! Tower middleware to manage service lifecycles. +//! +//! Provides a [`Retain`] middleware that holds an inner service as long as responses are +//! being processed. This is mostly intended to support cache eviction. use linkerd_stack::layer; use pin_project::pin_project; @@ -106,19 +108,11 @@ impl http_body::Body for RetainBody { } #[inline] - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - self.project().inner.poll_data(cx) - } - - #[inline] - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>, B::Error>> { - self.project().inner.poll_trailers(cx) + ) -> Poll, Self::Error>>> { + self.project().inner.poll_frame(cx) } #[inline] diff --git a/linkerd/http/retry/Cargo.toml b/linkerd/http/retry/Cargo.toml index ba2f001d06..c2ef5d51e8 100644 --- a/linkerd/http/retry/Cargo.toml +++ b/linkerd/http/retry/Cargo.toml @@ -1,24 +1,32 @@ [package] name = "linkerd-http-retry" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } -http-body = "0.4" -http = "0.2" +http-body = { workspace = true } +http-body-util = { workspace = true } +http = { workspace = true } parking_lot = "0.12" +pin-project = "1" +tokio = { version = "1", features = ["macros", "rt"] } +tower = { workspace = true, features = ["retry"] } tracing = "0.1" -thiserror = "1" +thiserror = "2" +linkerd-http-box = { path = "../box" } linkerd-error = { path = "../../error" } +linkerd-exp-backoff = { path = "../../exp-backoff" } +linkerd-metrics = { path = "../../metrics" } linkerd-stack = { path = "../../stack" } [dev-dependencies] -hyper = "0.14" +hyper = { workspace = true } linkerd-tracing = { path = "../../tracing", features = ["ansi"] } +linkerd-mock-http-body = { path = "../../mock/http-body" } tokio = { version = "1", features = ["macros", "rt"] } diff --git a/linkerd/http/retry/src/lib.rs b/linkerd/http/retry/src/lib.rs index 838b49a5b6..01375b65b1 100644 --- a/linkerd/http/retry/src/lib.rs +++ b/linkerd/http/retry/src/lib.rs @@ -1,7 +1,360 @@ #![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] #![forbid(unsafe_code)] +pub mod peek_trailers; pub mod replay; -pub mod with_trailers; -pub use self::{replay::ReplayBody, with_trailers::WithTrailers}; +pub use self::{peek_trailers::PeekTrailersBody, replay::ReplayBody}; +pub use tower::retry::budget::Budget; + +use futures::{future, prelude::*}; +use linkerd_error::{Error, Result}; +use linkerd_exp_backoff::ExponentialBackoff; +use linkerd_http_box::BoxBody; +use linkerd_metrics::prom; +use linkerd_stack::{layer, ExtractParam, NewService, Param, Service}; +use std::{ + future::Future, + hash::Hash, + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, +}; +use tower::ServiceExt; +use tracing::{debug, trace}; + +/// A HTTP retry strategy. +pub trait Policy: Clone + Sized { + type Future: Future + Send + 'static; + + /// Determines if a response should be retried. + fn is_retryable(&self, result: Result<&http::Response, &Error>) -> bool; + + /// Prepare headers for the next request. + fn set_headers(&self, dst: &mut http::HeaderMap, orig: &http::HeaderMap) { + *dst = orig.clone(); + } + + /// Prepare extensions for the next request. + fn set_extensions(&self, _dst: &mut http::Extensions, _orig: &http::Extensions) {} +} + +#[derive(Clone, Debug)] +pub struct Params { + pub max_retries: usize, + pub max_request_bytes: usize, + pub backoff: Option, +} + +#[derive(Clone, Debug)] +pub struct NewHttpRetry { + inner: N, + metrics: MetricFamilies, + extract: X, + _marker: PhantomData (ReqX, P)>, +} + +/// A Retry middleware that attempts to extract a `P` typed request extension to +/// instrument retries. When the request extension is not set, requests are not +/// retried. +#[derive(Clone, Debug)] +pub struct HttpRetry { + inner: S, + metrics: MetricFamilies, + extract: ReqX, + _marker: PhantomData P>, +} + +#[derive(Clone, Debug)] +pub struct MetricFamilies { + limit_exceeded: prom::Family, + overflow: prom::Family, + requests: prom::Family, + successes: prom::Family, +} + +#[derive(Clone, Debug, Default)] +struct Metrics { + requests: prom::Counter, + successes: prom::Counter, + limit_exceeded: prom::Counter, + overflow: prom::Counter, +} + +// === impl NewHttpRetry === + +impl NewHttpRetry { + pub fn layer( + metrics: MetricFamilies, + ) -> impl tower::layer::Layer + Clone { + Self::layer_via_mk((), metrics) + } +} + +impl NewHttpRetry { + pub fn layer_via_mk( + extract: X, + metrics: MetricFamilies, + ) -> impl tower::layer::Layer + Clone { + layer::mk(move |inner| Self { + inner, + extract: extract.clone(), + metrics: metrics.clone(), + _marker: PhantomData, + }) + } +} + +impl NewService for NewHttpRetry +where + P: Policy, + L: Clone + std::fmt::Debug + Hash + Eq + Send + Sync + prom::encoding::EncodeLabelSet + 'static, + X: Clone + ExtractParam, + N: NewService, +{ + type Service = HttpRetry; + + fn new_service(&self, target: T) -> Self::Service { + let Self { + inner, + metrics, + extract, + _marker, + } = self; + + let metrics = metrics.clone(); + let extract = extract.extract_param(&target); + let svc = inner.new_service(target); + + HttpRetry { + inner: svc, + metrics, + extract, + _marker: PhantomData, + } + } +} + +// === impl MetricFamilies === + +impl Default for MetricFamilies +where + L: Clone + std::fmt::Debug + Hash + Eq + Send + Sync + prom::encoding::EncodeLabelSet + 'static, +{ + fn default() -> Self { + Self { + limit_exceeded: prom::Family::default(), + overflow: prom::Family::default(), + requests: prom::Family::default(), + successes: prom::Family::default(), + } + } +} + +impl MetricFamilies +where + L: Clone + std::fmt::Debug + Hash + Eq + Send + Sync + prom::encoding::EncodeLabelSet + 'static, +{ + pub fn register(registry: &mut prom::Registry) -> Self { + let limit_exceeded = prom::Family::default(); + registry.register( + "limit_exceeded", + "Retryable requests not sent due to retry limits", + limit_exceeded.clone(), + ); + + let overflow = prom::Family::default(); + registry.register( + "overflow", + "Retryable requests not sent due to circuit breakers", + overflow.clone(), + ); + + let requests = prom::Family::default(); + registry.register("requests", "Retry requests emitted", requests.clone()); + + let successes = prom::Family::default(); + registry.register( + "successes", + "Successful responses to retry requests", + successes.clone(), + ); + Self { + limit_exceeded, + overflow, + requests, + successes, + } + } + + fn metrics(&self, labels: &L) -> Metrics { + let requests = (*self.requests.get_or_create(labels)).clone(); + let successes = (*self.successes.get_or_create(labels)).clone(); + let limit_exceeded = (*self.limit_exceeded.get_or_create(labels)).clone(); + let overflow = (*self.overflow.get_or_create(labels)).clone(); + Metrics { + requests, + successes, + limit_exceeded, + overflow, + } + } +} + +// === impl HttpRetry === + +impl Service> for HttpRetry +where + P: Policy, + P: Param, + P: Clone + Send + Sync + std::fmt::Debug + 'static, + L: Clone + std::fmt::Debug + Hash + Eq + Send + Sync + prom::encoding::EncodeLabelSet + 'static, + ReqX: ExtractParam>, + S: Service, Response = http::Response, Error = Error> + + Clone + + Send + + 'static, + S::Future: Send + 'static, +{ + type Response = http::Response; + type Error = Error; + type Future = future::Either< + >>::Future, + Pin>> + Send + 'static>>, + >; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, mut req: http::Request) -> Self::Future { + // Retries are configured from request extensions so that they can be + // configured from both policy and request headers. + let Some(policy) = req.extensions_mut().remove::

() else { + // If there is no policy, there is no need to retry. This avoids + // buffering logic in the default case. + trace!(retryable = false, "Request lacks a retry policy"); + return future::Either::Left(self.inner.call(req)); + }; + + // TODO(kate): extract the params, metrics, and labels. in the future, we would like to + // avoid this middleware needing to know about Prometheus labels. + let params = policy.param(); + let labels = self.extract.extract_param(&req); + let metrics = self.metrics.metrics(&labels); + + // Since this request is retryable, we need to setup the request body to + // be buffered/cloneable. If the request body is too large to be cloned, + // the retry policy is ignored. + let req = { + let (head, body) = req.into_parts(); + match ReplayBody::try_new(body, params.max_request_bytes) { + Ok(body) => http::Request::from_parts(head, body), + Err(body) => { + debug!(retryable = false, "Request body is too large to be retried"); + return future::Either::Left( + self.inner.call(http::Request::from_parts(head, body)), + ); + } + } + }; + debug!(retryable = true, policy = ?policy); + + // Take the inner service, replacing it with a clone. This allows the + // readiness from poll_ready to be preserved. + // + // Retry::poll_ready is just a pass-through to the inner service, so we + // can rely on the fact that we've taken the ready inner service handle. + let pending = self.inner.clone(); + let svc = std::mem::replace(&mut self.inner, pending); + let call = send_req_with_retries(svc, req, policy, metrics, params); + future::Either::Right(Box::pin(call)) + } +} + +async fn send_req_with_retries( + // `svc` must be made ready before calling this function. + mut svc: impl Service, Response = http::Response, Error = Error>, + request: http::Request, + policy: impl Policy, + metrics: Metrics, + params: Params, +) -> Result> { + // Initial request. + let mut backup = mk_backup(&request, &policy); + let mut result = send_req(&mut svc, request).await; + if !policy.is_retryable(result.as_ref()) { + tracing::trace!("Success on first attempt"); + return result.map(|rsp| rsp.map(BoxBody::new)); + } + if matches!(backup.body().is_capped(), None | Some(true)) { + // The body was either too large, or we received an early response + // before the request body was completed read. We cannot safely + // attempt to send this request again. + return result.map(|rsp| rsp.map(BoxBody::new)); + } + + // The response was retryable, so continue trying to dispatch backup + // requests. + let mut backoff = params.backoff.map(|b| b.stream()); + for n in 1..=params.max_retries { + if let Some(backoff) = backoff.as_mut() { + backoff.next().await; + } + + // The service must be buffered to be cloneable; so if it's not ready, + // then a circuit breaker is active and requests will be load shed. + let Some(svc) = svc.ready().now_or_never().transpose()? else { + tracing::debug!("Retry overflow; service is not ready"); + metrics.overflow.inc(); + return result.map(|rsp| rsp.map(BoxBody::new)); + }; + + tracing::debug!(retry.attempt = n); + let request = backup; + backup = mk_backup(&request, &policy); + metrics.requests.inc(); + result = send_req(svc, request).await; + if !policy.is_retryable(result.as_ref()) { + if result.is_ok() { + metrics.successes.inc(); + } + tracing::debug!("Retry success"); + return result.map(|rsp| rsp.map(BoxBody::new)); + } + if matches!(backup.body().is_capped(), None | Some(true)) { + return result.map(|rsp| rsp.map(BoxBody::new)); + } + } + + // The result is retryable but we've run out of attempts. + tracing::debug!("Retry limit exceeded"); + metrics.limit_exceeded.inc(); + result.map(|rsp| rsp.map(BoxBody::new)) +} + +// Make the request and wait for the response. We proactively poll the +// response body for its next frame to convert the response into a +async fn send_req( + svc: &mut impl Service, Response = http::Response, Error = Error>, + req: http::Request, +) -> Result> { + svc.call(req.map(BoxBody::new)) + .and_then(|rsp| async move { + tracing::debug!("Peeking at the response trailers"); + let rsp = PeekTrailersBody::map_response(rsp).await; + Ok(rsp) + }) + .await +} + +fn mk_backup(orig: &http::Request, policy: &impl Policy) -> http::Request { + let mut dst = http::Request::new(orig.body().clone()); + *dst.method_mut() = orig.method().clone(); + *dst.uri_mut() = orig.uri().clone(); + *dst.version_mut() = orig.version(); + policy.set_headers(dst.headers_mut(), orig.headers()); + policy.set_extensions(dst.extensions_mut(), orig.extensions()); + dst +} diff --git a/linkerd/http/retry/src/peek_trailers.rs b/linkerd/http/retry/src/peek_trailers.rs new file mode 100644 index 0000000000..f38f42f368 --- /dev/null +++ b/linkerd/http/retry/src/peek_trailers.rs @@ -0,0 +1,421 @@ +use futures::{ + future::{self, Either}, + FutureExt, +}; +use http::HeaderMap; +use http_body::{Body, Frame}; +use linkerd_http_box::BoxBody; +use pin_project::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +/// An HTTP body that allows inspecting the body's trailers, if a `TRAILERS` +/// frame was the first frame after the initial headers frame. +/// +/// If the first frame of the body stream was *not* a `TRAILERS` frame, this +/// behaves identically to a normal body. +#[pin_project] +pub struct PeekTrailersBody(#[pin] Inner); + +#[pin_project(project = Projection)] +enum Inner { + /// An empty body. + Empty, + /// A body that contains zero or one DATA frame. + /// + /// This variant MAY have trailers that can be peeked. + Unary { + data: Option>, + trailers: Option>, + }, + /// A body that (potentially) contains more than one DATA frame. + /// + /// This variant indicates that the inner body's trailers could not be observed, with some + /// frames that were buffered. + Buffered { + first: Option>, + second: Option>, + /// The inner [`Body`]. + #[pin] + inner: B, + }, + /// A transparent, inert body. + /// + /// This variant will not attempt to peek the inner body's trailers. + Passthru { + /// The inner [`Body`]. + #[pin] + inner: B, + }, +} + +/// A future that yields a response instrumented with [`PeekTrailersBody`]. +pub type WithPeekTrailersBody = Either, ReadingResponse>; +/// A future that immediately yields a response. +type ReadyResponse = future::Ready>>; +/// A boxed future that must poll a body before yielding a response. +type ReadingResponse = + Pin>> + Send + 'static>>; + +// === impl WithTrailers === + +impl PeekTrailersBody { + /// Returns a reference to the body's trailers, if available. + /// + /// This function will return `None` if the body's trailers could not be peeked, or if there + /// were no trailers included. + pub fn peek_trailers(&self) -> Option<&http::HeaderMap> { + let Self(inner) = self; + match inner { + Inner::Unary { + trailers: Some(Ok(trailers)), + .. + } => Some(trailers), + Inner::Unary { + trailers: None | Some(Err(_)), + .. + } + | Inner::Empty + | Inner::Buffered { .. } + | Inner::Passthru { .. } => None, + } + } + + pub fn map_response(rsp: http::Response) -> WithPeekTrailersBody + where + B: Send + Unpin + 'static, + B::Data: Send + Unpin + 'static, + B::Error: Send, + { + use http::Version; + + // If the response isn't an HTTP version that has trailers, skip trying + // to read a trailers frame. + if let Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 = rsp.version() { + return Either::Left(future::ready( + rsp.map(|inner| Self(Inner::Passthru { inner })), + )); + } + + // If the response doesn't have a body stream, also skip trying to read + // a trailers frame. + if rsp.is_end_stream() { + tracing::debug!("Skipping trailers for empty body"); + return Either::Left(future::ready(rsp.map(|_| Self(Inner::Empty)))); + } + + // Otherwise, return a future that tries to read the next frame. + Either::Right(Box::pin(async move { + let (parts, body) = rsp.into_parts(); + let body = Self::read_body(body).await; + http::Response::from_parts(parts, body) + })) + } + + async fn read_body(mut body: B) -> Self + where + B: Send + Unpin, + B::Data: Send + Unpin, + B::Error: Send, + { + use http_body_util::BodyExt; + + // First, poll the body for its first frame. + tracing::debug!("Buffering first data frame"); + let first_frame = body + .frame() + .map(|f| f.map(|r| r.map(Self::split_frame))) + .await; + + let body = Self(match first_frame { + // The body has no frames. It is empty. + None => Inner::Empty, + // The body yielded an error. We are done. + Some(Err(error)) => Inner::Unary { + data: Some(Err(error)), + trailers: None, + }, + // The body yielded a TRAILERS frame. We are done. + Some(Ok(Some(Either::Right(trailers)))) => Inner::Unary { + data: None, + trailers: Some(Ok(trailers)), + }, + // The body yielded an unknown kind of frame. + Some(Ok(None)) => Inner::Buffered { + first: None, + second: None, + inner: body, + }, + // The body yielded a DATA frame. Check for a second frame, without yielding again. + Some(Ok(Some(Either::Left(first)))) => { + if let Some(second) = body + .frame() + .map(|f| f.map(|r| r.map(Self::split_frame))) + .now_or_never() + { + // The second frame is available. Let's inspect it and determine what to do. + match second { + // The body is finished. There is not a TRAILERS frame. + None => Inner::Unary { + data: Some(Ok(first)), + trailers: None, + }, + // We immediately yielded a result, but it was an error. Alas! + Some(Err(error)) => Inner::Unary { + data: Some(Ok(first)), + trailers: Some(Err(error)), + }, + // We immediately yielded another frame, but it was a second DATA frame. + // We hold on to each frame, but we cannot wait for the TRAILERS. + Some(Ok(Some(Either::Left(second)))) => Inner::Buffered { + first: Some(Ok(first)), + second: Some(Ok(second)), + inner: body, + }, + // The body immediately yielded a second TRAILERS frame. Nice! + Some(Ok(Some(Either::Right(trailers)))) => Inner::Unary { + data: Some(Ok(first)), + trailers: Some(Ok(trailers)), + }, + // The body yielded an unknown kind of frame. + Some(Ok(None)) => Inner::Buffered { + first: None, + second: None, + inner: body, + }, + } + } else { + // If we are here, the second frame is not yet available. We cannot be sure + // that a second DATA frame is on the way, and we are no longer willing to + // await additional frames. There are no trailers to peek. + Inner::Buffered { + first: None, + second: None, + inner: body, + } + } + } + }); + + if body.peek_trailers().is_some() { + tracing::debug!("Buffered trailers frame"); + } + + body + } + + /// Splits a `Frame` into a chunk of data or a header map. + /// + /// Frames do not expose their inner enums, and instead expose `into_data()` and + /// `into_trailers()` methods. This function breaks the frame into either `Some(Left(data))` + /// if it is given a DATA frame, and `Some(Right(trailers))` if it is given a TRAILERS frame. + /// + /// This returns `None` if an unknown frame is provided, that is neither. + /// + /// This is an internal helper to facilitate pattern matching in `read_body(..)`, above. + fn split_frame( + frame: http_body::Frame, + ) -> Option> { + use {futures::future::Either, http_body::Frame}; + match frame.into_data().map_err(Frame::into_trailers) { + Ok(data) => Some(Either::Left(data)), + Err(Ok(trailers)) => Some(Either::Right(trailers)), + Err(Err(_unknown)) => { + // It's possible that some sort of unknown frame could be encountered. + tracing::warn!("an unknown body frame has been buffered"); + None + } + } + } +} + +impl Body for PeekTrailersBody +where + B: Body + Unpin, + B::Data: Unpin, + B::Error: Unpin, +{ + type Data = B::Data; + type Error = B::Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let this = self.project().0.project(); + match this { + Projection::Empty => Poll::Ready(None), + Projection::Passthru { inner } => inner.poll_frame(cx), + Projection::Unary { data, trailers } => { + let mut take_data = || data.take().map(|r| r.map(Frame::data)); + let take_trailers = || trailers.take().map(|r| r.map(Frame::trailers)); + let frame = take_data().or_else(take_trailers); + Poll::Ready(frame) + } + Projection::Buffered { + first, + second, + inner, + } => { + if let Some(data) = first.take().or_else(|| second.take()) { + let frame = data.map(Frame::data); + Poll::Ready(Some(frame)) + } else { + inner.poll_frame(cx) + } + } + } + } + + #[inline] + fn is_end_stream(&self) -> bool { + let Self(inner) = self; + match inner { + Inner::Empty => true, + Inner::Passthru { inner } => inner.is_end_stream(), + Inner::Unary { + data: None, + trailers: None, + } => true, + Inner::Unary { .. } => false, + Inner::Buffered { + inner, + first: None, + second: None, + } => inner.is_end_stream(), + Inner::Buffered { .. } => false, + } + } + + #[inline] + fn size_hint(&self) -> http_body::SizeHint { + use bytes::Buf; + let Self(inner) = self; + match inner { + Inner::Empty => http_body::SizeHint::new(), + Inner::Passthru { inner } => inner.size_hint(), + Inner::Unary { + data: Some(Ok(data)), + .. + } => { + let size = data.remaining() as u64; + http_body::SizeHint::with_exact(size) + } + Inner::Unary { + data: None | Some(Err(_)), + .. + } => http_body::SizeHint::new(), + Inner::Buffered { + first, + second, + inner, + } => { + // Add any frames we've buffered to the inner `Body`'s size hint. + let mut hint = inner.size_hint(); + let mut add_to_hint = |frame: &Option>| { + if let Some(Ok(buf)) = frame { + let size = buf.remaining() as u64; + if let Some(upper) = hint.upper() { + hint.set_upper(upper + size); + } + hint.set_lower(hint.lower() + size); + } + }; + add_to_hint(first); + add_to_hint(second); + hint + } + } + } +} + +#[cfg(test)] +mod tests { + use super::PeekTrailersBody; + use bytes::Bytes; + use http::{HeaderMap, HeaderValue}; + use http_body::Body; + use linkerd_error::Error; + use linkerd_mock_http_body::MockBody; + use std::{ops::Not, task::Poll}; + + fn data() -> Option> { + let bytes = Bytes::from_static(b"hello"); + Some(Ok(bytes)) + } + + fn trailers() -> Option> { + let mut trls = HeaderMap::with_capacity(1); + let value = HeaderValue::from_static("shiny"); + trls.insert("trailer", value); + Some(Ok(trls)) + } + + #[tokio::test] + async fn cannot_peek_empty() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let empty = MockBody::default(); + let peek = PeekTrailersBody::read_body(empty).await; + assert!(peek.peek_trailers().is_none()); + assert!(peek.is_end_stream()); + } + + #[tokio::test] + async fn peeks_only_trailers() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let only_trailers = MockBody::default().then_yield_trailer(Poll::Ready(trailers())); + let peek = PeekTrailersBody::read_body(only_trailers).await; + assert!(peek.peek_trailers().is_some()); + assert!(peek.is_end_stream().not()); + } + + #[tokio::test] + async fn peeks_one_frame_with_immediate_trailers() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let body = MockBody::default() + .then_yield_data(Poll::Ready(data())) + .then_yield_trailer(Poll::Ready(trailers())); + let peek = PeekTrailersBody::read_body(body).await; + assert!(peek.peek_trailers().is_some()); + assert!(peek.is_end_stream().not()); + } + + #[tokio::test] + async fn cannot_peek_one_frame_with_eventual_trailers() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let body = MockBody::default() + .then_yield_data(Poll::Ready(data())) + .then_yield_trailer(Poll::Pending) + .then_yield_trailer(Poll::Ready(trailers())); + let peek = PeekTrailersBody::read_body(body).await; + assert!(peek.peek_trailers().is_none()); + assert!(peek.is_end_stream().not()); + } + + #[tokio::test] + async fn peeks_one_eventual_frame_with_immediate_trailers() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let body = MockBody::default() + .then_yield_data(Poll::Pending) + .then_yield_data(Poll::Ready(data())) + .then_yield_trailer(Poll::Ready(trailers())); + let peek = PeekTrailersBody::read_body(body).await; + assert!(peek.peek_trailers().is_some()); + assert!(peek.is_end_stream().not()); + } + + #[tokio::test] + async fn cannot_peek_two_frames_with_immediate_trailers() { + let (_guard, _handle) = linkerd_tracing::test::trace_init(); + let body = MockBody::default() + .then_yield_data(Poll::Ready(data())) + .then_yield_data(Poll::Ready(data())) + .then_yield_trailer(Poll::Ready(trailers())); + let peek = PeekTrailersBody::read_body(body).await; + assert!(peek.peek_trailers().is_none()); + assert!(peek.is_end_stream().not()); + } +} diff --git a/linkerd/http/retry/src/replay.rs b/linkerd/http/retry/src/replay.rs index 662ec68429..66ee9625e2 100644 --- a/linkerd/http/retry/src/replay.rs +++ b/linkerd/http/retry/src/replay.rs @@ -1,11 +1,20 @@ -use bytes::{Buf, BufMut, Bytes, BytesMut}; +use bytes::Buf; use http::HeaderMap; -use http_body::{Body, SizeHint}; +use http_body::{Body, Frame, SizeHint}; use linkerd_error::Error; +use linkerd_http_box::BoxBody; use parking_lot::Mutex; -use std::{collections::VecDeque, io::IoSlice, pin::Pin, sync::Arc, task::Context, task::Poll}; +use std::{pin::Pin, sync::Arc, task::Context, task::Poll}; use thiserror::Error; +pub use self::buffer::{Data, Replay}; + +mod buffer; + +/// Unit tests for [`ReplayBody`]. +#[cfg(test)] +mod tests; + /// Wraps an HTTP body type and lazily buffers data as it is read from the inner /// body. /// @@ -18,7 +27,7 @@ use thiserror::Error; /// The buffered data can then be used to retry the request if the original /// request fails. #[derive(Debug)] -pub struct ReplayBody { +pub struct ReplayBody { /// Buffered state owned by this body if it is actively being polled. If /// this body has been polled and no other body owned the state, this will /// be `Some`. @@ -41,21 +50,6 @@ pub struct ReplayBody { #[error("replay body discarded after reaching maximum buffered bytes limit")] pub struct Capped; -/// Data returned by `ReplayBody`'s `http_body::Body` implementation is either -/// `Bytes` returned by the initial body, or a list of all `Bytes` chunks -/// returned by the initial body (when replaying it). -#[derive(Debug)] -pub enum Data { - Initial(Bytes), - Replay(BufList), -} - -/// Body data composed of multiple `Bytes` chunks. -#[derive(Clone, Debug, Default)] -pub struct BufList { - bufs: VecDeque, -} - #[derive(Debug)] struct SharedState { body: Mutex>>, @@ -72,9 +66,9 @@ struct SharedState { #[derive(Debug)] struct BodyState { - buf: BufList, + replay: Replay, trailers: Option, - rest: Option, + rest: B, is_completed: bool, /// Maximum number of bytes to buffer. @@ -108,13 +102,13 @@ impl ReplayBody { was_empty: body.is_end_stream(), }), state: Some(BodyState { - buf: Default::default(), + replay: Default::default(), trailers: None, - rest: Some(body), + rest: body, is_completed: false, max_bytes: max_bytes + 1, }), - // The initial `ReplayBody` has nothing to replay + // The initial `ReplayBody` has no data to replay. replay_body: false, replay_trailers: false, }) @@ -135,23 +129,19 @@ impl ReplayBody { state.get_or_insert_with(|| shared.lock().take().expect("missing body state")) } - /// Returns `true` if the body previously exceeded the configured maximum + /// Returns `Some(true)` if the body previously exceeded the configured maximum /// length limit. /// /// If this is true, the body is now empty, and the request should *not* be /// retried with this body. - pub fn is_capped(&self) -> bool { + /// + /// If this is `None`, another clone has currently acquired the state, and is + /// still being polled. + pub fn is_capped(&self) -> Option { self.state .as_ref() .map(BodyState::is_capped) - .unwrap_or_else(|| { - self.shared - .body - .lock() - .as_ref() - .expect("if our `state` was `None`, the shared state must be `Some`") - .is_capped() - }) + .or_else(|| self.shared.body.lock().as_ref().map(BodyState::is_capped)) } } @@ -163,17 +153,23 @@ where type Data = Data; type Error = Error; - fn poll_data( + /// Polls for the next frame in this stream. + /// + /// # Panics + /// + /// This panics if another clone has currently acquired the state. A [`ReplayBody`] MUST + /// NOT be polled until the previous body has been dropped. + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { + ) -> Poll, Self::Error>>> { let this = self.get_mut(); let state = Self::acquire_state(&mut this.state, &this.shared.body); // Move these out to avoid mutable borrow issues in the `map` closure // when polling the inner body. tracing::trace!( replay_body = this.replay_body, - buf.has_remaining = state.buf.has_remaining(), + buf.has_remaining = state.replay.has_remaining(), body.is_completed = state.is_completed, body.max_bytes_remaining = state.max_bytes, "ReplayBody::poll_data" @@ -182,11 +178,11 @@ where // If we haven't replayed the buffer yet, and its not empty, return the // buffered data first. if this.replay_body { - if state.buf.has_remaining() { + if state.replay.has_remaining() { tracing::trace!("Replaying body"); // Don't return the buffered data again on the next poll. this.replay_body = false; - return Poll::Ready(Some(Ok(Data::Replay(state.buf.clone())))); + return Poll::Ready(Some(Ok(Frame::data(Data::Replay(state.replay.clone()))))); } if state.is_capped() { @@ -194,6 +190,13 @@ where return Poll::Ready(Some(Err(Capped.into()))); } } + if this.replay_trailers { + this.replay_trailers = false; + if let Some(ref trailers) = state.trailers { + tracing::trace!("Replaying trailers"); + return Poll::Ready(Some(Ok(Frame::trailers(trailers.clone())))); + } + } // If the inner body has previously ended, don't poll it again. // @@ -208,103 +211,75 @@ where // Poll the inner body for more data. If the body has ended, remember // that so that future clones will not try polling it again (as // described above). - let mut data = { - // Get access to the initial body. If we don't have access to the - // inner body, there's no more work to do. - let rest = match state.rest.as_mut() { - Some(rest) => rest, - None => return Poll::Ready(None), - }; - + let frame: Frame = { + use futures::{future::Either, ready}; + // Poll the inner body for the next frame. tracing::trace!("Polling initial body"); - match futures::ready!(Pin::new(rest).poll_data(cx)) { - Some(Ok(data)) => data, - Some(Err(e)) => return Poll::Ready(Some(Err(e.into()))), + let poll = Pin::new(&mut state.rest).poll_frame(cx).map_err(Into::into); + let frame = match ready!(poll) { + // The body yielded a new frame. + Some(Ok(frame)) => frame, + // The body yielded an error. + Some(Err(error)) => return Poll::Ready(Some(Err(error))), + // The body has reached the end of the stream. None => { tracing::trace!("Initial body completed"); state.is_completed = true; return Poll::Ready(None); } + }; + // Now, inspect the frame: was it a chunk of data, or a trailers frame? + match Self::split_frame(frame) { + Some(Either::Left(data)) => { + // If we have buffered the maximum number of bytes, allow *this* body to + // continue, but don't buffer any more. + let chunk = state.record_bytes(data); + Frame::data(chunk) + } + Some(Either::Right(trailers)) => { + tracing::trace!("Initial body completed"); + state.trailers = Some(trailers.clone()); + state.is_completed = true; + return Poll::Ready(Some(Ok(Frame::trailers(trailers)))); + } + None => return Poll::Ready(None), } }; - // If we have buffered the maximum number of bytes, allow *this* body to - // continue, but don't buffer any more. - let length = data.remaining(); - state.max_bytes = state.max_bytes.saturating_sub(length); - let chunk = if state.is_capped() { - // If there's data in the buffer, discard it now, since we won't - // allow any clones to have a complete body. - if state.buf.has_remaining() { - tracing::debug!( - buf.size = state.buf.remaining(), - "Buffered maximum capacity, discarding buffer" - ); - state.buf = Default::default(); - } - data.copy_to_bytes(length) - } else { - // Buffer and return the bytes. - state.buf.push_chunk(data) - }; - - Poll::Ready(Some(Ok(Data::Initial(chunk)))) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.get_mut(); - let state = Self::acquire_state(&mut this.state, &this.shared.body); - tracing::trace!( - replay_trailers = this.replay_trailers, - "Replay::poll_trailers" - ); - - if this.replay_trailers { - this.replay_trailers = false; - if let Some(ref trailers) = state.trailers { - tracing::trace!("Replaying trailers"); - return Poll::Ready(Ok(Some(trailers.clone()))); - } - } - - if let Some(rest) = state.rest.as_mut() { - // If the inner body has previously ended, don't poll it again. - if !rest.is_end_stream() { - let res = futures::ready!(Pin::new(rest).poll_trailers(cx)).map(|tlrs| { - if state.trailers.is_none() { - state.trailers.clone_from(&tlrs); - } - tlrs - }); - return Poll::Ready(res.map_err(Into::into)); - } - } - - Poll::Ready(Ok(None)) + Poll::Ready(Some(Ok(frame))) } + #[tracing::instrument( + skip_all, + level = "trace", + fields( + state.is_some = %self.state.is_some(), + replay_trailers = %self.replay_trailers, + replay_body = %self.replay_body, + is_completed = ?self.state.as_ref().map(|s| s.is_completed), + ) + )] fn is_end_stream(&self) -> bool { - // if the initial body was EOS as soon as it was wrapped, then we are - // empty. + // If the initial body was empty as soon as it was wrapped, then we are finished. if self.shared.was_empty { + tracing::trace!("Initial body was empty, stream has ended"); return true; } - let is_inner_eos = self - .state - .as_ref() - .and_then(|state| state.rest.as_ref().map(Body::is_end_stream)) - .unwrap_or(false); + let Some(state) = self.state.as_ref() else { + // This body is not currently the "active" replay being polled. + tracing::trace!("Inactive replay body is not complete"); + return false; + }; // if this body has data or trailers remaining to play back, it // is not EOS - !self.replay_body && !self.replay_trailers + let eos = !self.replay_body && !self.replay_trailers // if we have replayed everything, the initial body may // still have data remaining, so ask it - && is_inner_eos + && state.rest.is_end_stream(); + tracing::trace!(%eos, "Checked replay body end-of-stream"); + eos } #[inline] @@ -317,11 +292,8 @@ where // Otherwise, if we're holding the state but have dropped the inner // body, the entire body is buffered so we know the exact size hint. - let buffered = state.buf.remaining() as u64; - let rest_hint = match state.rest.as_ref() { - Some(rest) => rest.size_hint(), - None => return SizeHint::with_exact(buffered), - }; + let buffered = state.replay.remaining() as u64; + let rest_hint = state.rest.size_hint(); // Otherwise, add the inner body's size hint to the amount of buffered // data. An upper limit is only set if the inner body has an upper @@ -357,135 +329,27 @@ impl Drop for ReplayBody { } } -// === impl Data === - -impl Buf for Data { - #[inline] - fn remaining(&self) -> usize { - match self { - Data::Initial(buf) => buf.remaining(), - Data::Replay(bufs) => bufs.remaining(), - } - } - - #[inline] - fn chunk(&self) -> &[u8] { - match self { - Data::Initial(buf) => buf.chunk(), - Data::Replay(bufs) => bufs.chunk(), - } - } - - #[inline] - fn chunks_vectored<'iovs>(&'iovs self, iovs: &mut [IoSlice<'iovs>]) -> usize { - match self { - Data::Initial(buf) => buf.chunks_vectored(iovs), - Data::Replay(bufs) => bufs.chunks_vectored(iovs), - } - } - - #[inline] - fn advance(&mut self, amt: usize) { - match self { - Data::Initial(buf) => buf.advance(amt), - Data::Replay(bufs) => bufs.advance(amt), - } - } - - #[inline] - fn copy_to_bytes(&mut self, len: usize) -> Bytes { - match self { - Data::Initial(buf) => buf.copy_to_bytes(len), - Data::Replay(bufs) => bufs.copy_to_bytes(len), - } - } -} - -// === impl BufList === - -impl BufList { - fn push_chunk(&mut self, mut data: impl Buf) -> Bytes { - let len = data.remaining(); - // `data` is (almost) certainly a `Bytes`, so `copy_to_bytes` should - // internally be a cheap refcount bump almost all of the time. - // But, if it isn't, this will copy it to a `Bytes` that we can - // now clone. - let bytes = data.copy_to_bytes(len); - // Buffer a clone of the bytes read on this poll. - self.bufs.push_back(bytes.clone()); - // Return the bytes - bytes - } -} - -impl Buf for BufList { - fn remaining(&self) -> usize { - self.bufs.iter().map(Buf::remaining).sum() - } - - fn chunk(&self) -> &[u8] { - self.bufs.front().map(Buf::chunk).unwrap_or(&[]) - } - - fn chunks_vectored<'iovs>(&'iovs self, iovs: &mut [IoSlice<'iovs>]) -> usize { - // Are there more than zero iovecs to write to? - if iovs.is_empty() { - return 0; - } - - // Loop over the buffers in the replay buffer list, and try to fill as - // many iovecs as we can from each buffer. - let mut filled = 0; - for buf in &self.bufs { - filled += buf.chunks_vectored(&mut iovs[filled..]); - if filled == iovs.len() { - return filled; - } - } - - filled - } - - fn advance(&mut self, mut amt: usize) { - while amt > 0 { - let rem = self.bufs[0].remaining(); - // If the amount to advance by is less than the first buffer in - // the buffer list, advance that buffer's cursor by `amt`, - // and we're done. - if rem > amt { - self.bufs[0].advance(amt); - return; - } - - // Otherwise, advance the first buffer to its end, and - // continue. - self.bufs[0].advance(rem); - amt -= rem; - - self.bufs.pop_front(); - } - } - - fn copy_to_bytes(&mut self, len: usize) -> Bytes { - // If the length of the requested `Bytes` is <= the length of the front - // buffer, we can just use its `copy_to_bytes` implementation (which is - // just a reference count bump). - match self.bufs.front_mut() { - Some(first) if len <= first.remaining() => { - let buf = first.copy_to_bytes(len); - // If we consumed the first buffer, also advance our "cursor" by - // popping it. - if first.remaining() == 0 { - self.bufs.pop_front(); - } - - buf - } - _ => { - assert!(len <= self.remaining(), "`len` greater than remaining"); - let mut buf = BytesMut::with_capacity(len); - buf.put(self.take(len)); - buf.freeze() +impl ReplayBody { + /// Splits a `Frame` into a chunk of data or a header map. + /// + /// Frames do not expose their inner enums, and instead expose `into_data()` and + /// `into_trailers()` methods. This function breaks the frame into either `Some(Left(data))` + /// if it is given a DATA frame, and `Some(Right(trailers))` if it is given a TRAILERS frame. + /// + /// This returns `None` if an unknown frame is provided, that is neither. + /// + /// This is an internal helper to facilitate pattern matching in `read_body(..)`, above. + fn split_frame( + frame: http_body::Frame, + ) -> Option> { + use {futures::future::Either, http_body::Frame}; + match frame.into_data().map_err(Frame::into_trailers) { + Ok(data) => Some(Either::Left(data)), + Err(Ok(trailers)) => Some(Either::Right(trailers)), + Err(Err(_unknown)) => { + // It's possible that some sort of unknown frame could be encountered. + tracing::warn!("An unknown body frame has been buffered"); + None } } } @@ -499,496 +363,3 @@ impl BodyState { self.max_bytes == 0 } } - -#[cfg(test)] -mod tests { - use super::*; - use http::HeaderValue; - - #[tokio::test] - async fn replays_one_chunk() { - let Test { - mut tx, - initial, - replay, - _trace, - } = Test::new(); - tx.send_data("hello world").await; - drop(tx); - - let initial = body_to_string(initial).await; - assert_eq!(initial, "hello world"); - - let replay = body_to_string(replay).await; - assert_eq!(replay, "hello world"); - } - - #[tokio::test] - async fn replays_several_chunks() { - let Test { - mut tx, - initial, - replay, - _trace, - } = Test::new(); - - tokio::spawn(async move { - tx.send_data("hello").await; - tx.send_data(" world").await; - tx.send_data(", have lots").await; - tx.send_data(" of fun!").await; - }); - - let initial = body_to_string(initial).await; - assert_eq!(initial, "hello world, have lots of fun!"); - - let replay = body_to_string(replay).await; - assert_eq!(replay, "hello world, have lots of fun!"); - } - - #[tokio::test] - async fn replays_trailers() { - let Test { - mut tx, - mut initial, - mut replay, - _trace, - } = Test::new(); - - let mut tlrs = HeaderMap::new(); - tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); - tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); - - tx.send_data("hello world").await; - tx.send_trailers(tlrs.clone()).await; - drop(tx); - - while initial.data().await.is_some() { - // do nothing - } - let initial_tlrs = initial.trailers().await.expect("trailers should not error"); - assert_eq!(initial_tlrs.as_ref(), Some(&tlrs)); - - // drop the initial body to send the data to the replay - drop(initial); - - while replay.data().await.is_some() { - // do nothing - } - let replay_tlrs = replay.trailers().await.expect("trailers should not error"); - assert_eq!(replay_tlrs.as_ref(), Some(&tlrs)); - } - - #[tokio::test] - async fn trailers_only() { - let Test { - mut tx, - mut initial, - mut replay, - _trace, - } = Test::new(); - - let mut tlrs = HeaderMap::new(); - tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); - tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); - - tx.send_trailers(tlrs.clone()).await; - - drop(tx); - - assert!(dbg!(initial.data().await).is_none(), "no data in body"); - let initial_tlrs = initial.trailers().await.expect("trailers should not error"); - assert_eq!(initial_tlrs.as_ref(), Some(&tlrs)); - - // drop the initial body to send the data to the replay - drop(initial); - - assert!(dbg!(replay.data().await).is_none(), "no data in body"); - let replay_tlrs = replay.trailers().await.expect("trailers should not error"); - assert_eq!(replay_tlrs.as_ref(), Some(&tlrs)); - } - - #[tokio::test(flavor = "current_thread")] - async fn switches_with_body_remaining() { - // This simulates a case where the server returns an error _before_ the - // entire body has been read. - let Test { - mut tx, - mut initial, - mut replay, - _trace, - } = Test::new(); - - tx.send_data("hello").await; - assert_eq!(chunk(&mut initial).await.unwrap(), "hello"); - - tx.send_data(" world").await; - assert_eq!(chunk(&mut initial).await.unwrap(), " world"); - - // drop the initial body to send the data to the replay - drop(initial); - tracing::info!("dropped initial body"); - - tokio::spawn(async move { - tx.send_data(", have lots of fun").await; - tx.send_trailers(HeaderMap::new()).await; - }); - - assert_eq!( - body_to_string(&mut replay).await, - "hello world, have lots of fun" - ); - } - - #[tokio::test(flavor = "current_thread")] - async fn multiple_replays() { - let Test { - mut tx, - mut initial, - mut replay, - _trace, - } = Test::new(); - - let mut tlrs = HeaderMap::new(); - tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); - tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); - - let tlrs2 = tlrs.clone(); - tokio::spawn(async move { - tx.send_data("hello").await; - tx.send_data(" world").await; - tx.send_trailers(tlrs2).await; - }); - - assert_eq!(body_to_string(&mut initial).await, "hello world"); - - let initial_tlrs = initial.trailers().await.expect("trailers should not error"); - assert_eq!(initial_tlrs.as_ref(), Some(&tlrs)); - - // drop the initial body to send the data to the replay - drop(initial); - - let mut replay2 = replay.clone(); - assert_eq!(body_to_string(&mut replay).await, "hello world"); - - let replay_tlrs = replay.trailers().await.expect("trailers should not error"); - assert_eq!(replay_tlrs.as_ref(), Some(&tlrs)); - - // drop the initial body to send the data to the replay - drop(replay); - - assert_eq!(body_to_string(&mut replay2).await, "hello world"); - - let replay2_tlrs = replay2.trailers().await.expect("trailers should not error"); - assert_eq!(replay2_tlrs.as_ref(), Some(&tlrs)); - } - - #[tokio::test(flavor = "current_thread")] - async fn multiple_incomplete_replays() { - let Test { - mut tx, - mut initial, - mut replay, - _trace, - } = Test::new(); - - let mut tlrs = HeaderMap::new(); - tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); - tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); - - tx.send_data("hello").await; - assert_eq!(chunk(&mut initial).await.unwrap(), "hello"); - - // drop the initial body to send the data to the replay - drop(initial); - tracing::info!("dropped initial body"); - - let mut replay2 = replay.clone(); - - tx.send_data(" world").await; - assert_eq!(chunk(&mut replay).await.unwrap(), "hello"); - assert_eq!(chunk(&mut replay).await.unwrap(), " world"); - - // drop the replay body to send the data to the second replay - drop(replay); - tracing::info!("dropped first replay body"); - - let tlrs2 = tlrs.clone(); - tokio::spawn(async move { - tx.send_data(", have lots").await; - tx.send_data(" of fun!").await; - tx.send_trailers(tlrs2).await; - }); - - assert_eq!( - body_to_string(&mut replay2).await, - "hello world, have lots of fun!" - ); - - let replay2_tlrs = replay2.trailers().await.expect("trailers should not error"); - assert_eq!(replay2_tlrs.as_ref(), Some(&tlrs)); - } - - #[tokio::test(flavor = "current_thread")] - async fn drop_clone_early() { - let Test { - mut tx, - mut initial, - mut replay, - _trace, - } = Test::new(); - - let mut tlrs = HeaderMap::new(); - tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); - tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); - - let tlrs2 = tlrs.clone(); - tokio::spawn(async move { - tx.send_data("hello").await; - tx.send_data(" world").await; - tx.send_trailers(tlrs2).await; - }); - - assert_eq!(body_to_string(&mut initial).await, "hello world"); - - let initial_tlrs = initial.trailers().await.expect("trailers should not error"); - assert_eq!(initial_tlrs.as_ref(), Some(&tlrs)); - - // drop the initial body to send the data to the replay - drop(initial); - - // clone the body again and then drop it - let replay2 = replay.clone(); - drop(replay2); - - assert_eq!(body_to_string(&mut replay).await, "hello world"); - let replay_tlrs = replay.trailers().await.expect("trailers should not error"); - assert_eq!(replay_tlrs.as_ref(), Some(&tlrs)); - } - - // This test is specifically for behavior across clones, so the clippy lint - // is wrong here. - #[allow(clippy::redundant_clone)] - #[test] - fn empty_body_is_always_eos() { - // If the initial body was empty, every clone should always return - // `true` from `is_end_stream`. - let initial = ReplayBody::try_new(hyper::Body::empty(), 64 * 1024) - .expect("empty body can't be too large"); - assert!(initial.is_end_stream()); - - let replay = initial.clone(); - assert!(replay.is_end_stream()); - - let replay2 = replay.clone(); - assert!(replay2.is_end_stream()); - } - - #[tokio::test(flavor = "current_thread")] - async fn eos_only_when_fully_replayed() { - // Test that each clone of a body is not EOS until the data has been - // fully replayed. - let mut initial = ReplayBody::try_new(hyper::Body::from("hello world"), 64 * 1024) - .expect("body must not be too large"); - let mut replay = initial.clone(); - - body_to_string(&mut initial).await; - assert!(!replay.is_end_stream()); - - initial.trailers().await.expect("trailers should not error"); - assert!(initial.is_end_stream()); - assert!(!replay.is_end_stream()); - - // drop the initial body to send the data to the replay - drop(initial); - - assert!(!replay.is_end_stream()); - - body_to_string(&mut replay).await; - assert!(!replay.is_end_stream()); - - replay.trailers().await.expect("trailers should not error"); - assert!(replay.is_end_stream()); - - // Even if we clone a body _after_ it has been driven to EOS, the clone - // must not be EOS. - let mut replay2 = replay.clone(); - assert!(!replay2.is_end_stream()); - - // drop the initial body to send the data to the replay - drop(replay); - - body_to_string(&mut replay2).await; - assert!(!replay2.is_end_stream()); - - replay2.trailers().await.expect("trailers should not error"); - assert!(replay2.is_end_stream()); - } - - #[tokio::test(flavor = "current_thread")] - async fn caps_buffer() { - // Test that, when the initial body is longer than the preconfigured - // cap, we allow the request to continue, but stop buffering. The - // initial body will complete, but the replay will immediately fail. - let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); - - let (mut tx, body) = hyper::Body::channel(); - let mut initial = ReplayBody::try_new(body, 8).expect("channel body must not be too large"); - let mut replay = initial.clone(); - - // Send enough data to reach the cap - tx.send_data(Bytes::from("aaaaaaaa")).await.unwrap(); - assert_eq!(chunk(&mut initial).await, Some("aaaaaaaa".to_string())); - - // Further chunks are still forwarded on the initial body - tx.send_data(Bytes::from("bbbbbbbb")).await.unwrap(); - assert_eq!(chunk(&mut initial).await, Some("bbbbbbbb".to_string())); - - drop(initial); - - // The request's replay should error, since we discarded the buffer when - // we hit the cap. - let err = replay - .data() - .await - .expect("replay must yield Some(Err(..)) when capped") - .expect_err("replay must error when cappped"); - assert!(err.is::()) - } - - #[tokio::test(flavor = "current_thread")] - async fn caps_across_replays() { - // Test that, when the initial body is longer than the preconfigured - // cap, we allow the request to continue, but stop buffering. - let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=debug"); - - let (mut tx, body) = hyper::Body::channel(); - let mut initial = ReplayBody::try_new(body, 8).expect("channel body must not be too large"); - let mut replay = initial.clone(); - - // Send enough data to reach the cap - tx.send_data(Bytes::from("aaaaaaaa")).await.unwrap(); - assert_eq!(chunk(&mut initial).await, Some("aaaaaaaa".to_string())); - drop(initial); - - let mut replay2 = replay.clone(); - - // The replay will reach the cap, but it should still return data from - // the original body. - tx.send_data(Bytes::from("bbbbbbbb")).await.unwrap(); - assert_eq!(chunk(&mut replay).await, Some("aaaaaaaa".to_string())); - assert_eq!(chunk(&mut replay).await, Some("bbbbbbbb".to_string())); - drop(replay); - - // The second replay will fail, though, because the buffer was discarded. - let err = replay2 - .data() - .await - .expect("replay must yield Some(Err(..)) when capped") - .expect_err("replay must error when cappped"); - assert!(err.is::()) - } - - #[test] - fn body_too_big() { - let max_size = 8; - let mk_body = - |sz: usize| -> hyper::Body { (0..sz).map(|_| "x").collect::().into() }; - - assert!( - ReplayBody::try_new(hyper::Body::empty(), max_size).is_ok(), - "empty body is not too big" - ); - - assert!( - ReplayBody::try_new(mk_body(max_size), max_size).is_ok(), - "body at maximum capacity is not too big" - ); - - assert!( - ReplayBody::try_new(mk_body(max_size + 1), max_size).is_err(), - "over-sized body is too big" - ); - - let (_sender, body) = hyper::Body::channel(); - assert!( - ReplayBody::try_new(body, max_size).is_ok(), - "body without size hint is not too big" - ); - } - - struct Test { - tx: Tx, - initial: ReplayBody, - replay: ReplayBody, - _trace: tracing::subscriber::DefaultGuard, - } - - struct Tx(hyper::body::Sender); - - impl Test { - fn new() -> Self { - let (tx, body) = hyper::Body::channel(); - let initial = ReplayBody::try_new(body, 64 * 1024).expect("body too large"); - let replay = initial.clone(); - Self { - tx: Tx(tx), - initial, - replay, - _trace: linkerd_tracing::test::with_default_filter("linkerd_http_retry=debug").0, - } - } - } - - impl Tx { - #[tracing::instrument(skip(self))] - async fn send_data(&mut self, data: impl Into + std::fmt::Debug) { - let data = data.into(); - tracing::trace!("sending data..."); - self.0.send_data(data).await.expect("rx is not dropped"); - tracing::info!("sent data"); - } - - #[tracing::instrument(skip(self))] - async fn send_trailers(&mut self, trailers: HeaderMap) { - tracing::trace!("sending trailers..."); - self.0 - .send_trailers(trailers) - .await - .expect("rx is not dropped"); - tracing::info!("sent trailers"); - } - } - - async fn chunk(body: &mut T) -> Option - where - T: http_body::Body + Unpin, - { - tracing::trace!("waiting for a body chunk..."); - let chunk = body - .data() - .await - .map(|res| res.map_err(|_| ()).unwrap()) - .map(string); - tracing::info!(?chunk); - chunk - } - - async fn body_to_string(mut body: T) -> String - where - T: http_body::Body + Unpin, - T::Error: std::fmt::Debug, - { - let mut s = String::new(); - while let Some(chunk) = chunk(&mut body).await { - s.push_str(&chunk[..]); - } - tracing::info!(body = ?s, "no more data"); - s - } - - fn string(mut data: impl Buf) -> String { - let bytes = data.copy_to_bytes(data.remaining()); - String::from_utf8(bytes.to_vec()).unwrap() - } -} diff --git a/linkerd/http/retry/src/replay/buffer.rs b/linkerd/http/retry/src/replay/buffer.rs new file mode 100644 index 0000000000..e962e64313 --- /dev/null +++ b/linkerd/http/retry/src/replay/buffer.rs @@ -0,0 +1,177 @@ +use super::BodyState; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use http_body::Body; +use std::{collections::VecDeque, io::IoSlice}; + +/// Data returned by `ReplayBody`'s `http_body::Body` implementation is either +/// `Bytes` returned by the initial body, or a list of all `Bytes` chunks +/// returned by the initial body (when replaying it). +#[derive(Debug)] +pub enum Data { + Initial(Bytes), + Replay(Replay), +} + +/// A replayable [`Buf`] of body data. +/// +/// This storage is backed by cheaply cloneable [`Bytes`]. +#[derive(Clone, Debug, Default)] +pub struct Replay { + bufs: VecDeque, +} + +// === impl BodyState === + +impl BodyState { + /// Records a chunk of data yielded by the inner `B`-typed [`Body`]. + /// + /// This returns the next chunk of data as a chunk of [`Bytes`]. + /// + /// This records the chunk in the replay buffer, unless the maximum capacity has been exceeded. + /// If the buffer's capacity has been exceeded, the buffer will be emptied. The initial body + /// will be permitted to continue, but cloned replays will fail with a + /// [`Capped`][super::Capped] error when polled. + pub(super) fn record_bytes(&mut self, mut data: B::Data) -> Data { + let length = data.remaining(); + self.max_bytes = self.max_bytes.saturating_sub(length); + + let bytes = if self.is_capped() { + // If there's data in the buffer, discard it now, since we won't + // allow any clones to have a complete body. + if self.replay.has_remaining() { + tracing::debug!( + buf.size = self.replay.remaining(), + "Buffered maximum capacity, discarding buffer" + ); + self.replay = Default::default(); + } + data.copy_to_bytes(length) + } else { + // Buffer a clone of the bytes read on this poll. + let bytes = data.copy_to_bytes(length); + self.replay.bufs.push_back(bytes.clone()); + bytes + }; + + Data::Initial(bytes) + } +} + +// === impl Data === + +impl Buf for Data { + #[inline] + fn remaining(&self) -> usize { + match self { + Data::Initial(buf) => buf.remaining(), + Data::Replay(replay) => replay.remaining(), + } + } + + #[inline] + fn chunk(&self) -> &[u8] { + match self { + Data::Initial(buf) => buf.chunk(), + Data::Replay(replay) => replay.chunk(), + } + } + + #[inline] + fn chunks_vectored<'iovs>(&'iovs self, iovs: &mut [IoSlice<'iovs>]) -> usize { + match self { + Data::Initial(buf) => buf.chunks_vectored(iovs), + Data::Replay(replay) => replay.chunks_vectored(iovs), + } + } + + #[inline] + fn advance(&mut self, amt: usize) { + match self { + Data::Initial(buf) => buf.advance(amt), + Data::Replay(replay) => replay.advance(amt), + } + } + + #[inline] + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + match self { + Data::Initial(buf) => buf.copy_to_bytes(len), + Data::Replay(replay) => replay.copy_to_bytes(len), + } + } +} + +// === impl Replay === + +impl Buf for Replay { + fn remaining(&self) -> usize { + self.bufs.iter().map(Buf::remaining).sum() + } + + fn chunk(&self) -> &[u8] { + self.bufs.front().map(Buf::chunk).unwrap_or(&[]) + } + + fn chunks_vectored<'iovs>(&'iovs self, iovs: &mut [IoSlice<'iovs>]) -> usize { + // Are there more than zero iovecs to write to? + if iovs.is_empty() { + return 0; + } + + // Loop over the buffers in the replay buffer list, and try to fill as + // many iovecs as we can from each buffer. + let mut filled = 0; + for buf in &self.bufs { + filled += buf.chunks_vectored(&mut iovs[filled..]); + if filled == iovs.len() { + return filled; + } + } + + filled + } + + fn advance(&mut self, mut amt: usize) { + while amt > 0 { + let rem = self.bufs[0].remaining(); + // If the amount to advance by is less than the first buffer in + // the buffer list, advance that buffer's cursor by `amt`, + // and we're done. + if rem > amt { + self.bufs[0].advance(amt); + return; + } + + // Otherwise, advance the first buffer to its end, and + // continue. + self.bufs[0].advance(rem); + amt -= rem; + + self.bufs.pop_front(); + } + } + + fn copy_to_bytes(&mut self, len: usize) -> Bytes { + // If the length of the requested `Bytes` is <= the length of the front + // buffer, we can just use its `copy_to_bytes` implementation (which is + // just a reference count bump). + match self.bufs.front_mut() { + Some(first) if len <= first.remaining() => { + let buf = first.copy_to_bytes(len); + // If we consumed the first buffer, also advance our "cursor" by + // popping it. + if first.remaining() == 0 { + self.bufs.pop_front(); + } + + buf + } + _ => { + assert!(len <= self.remaining(), "`len` greater than remaining"); + let mut buf = BytesMut::with_capacity(len); + buf.put(self.take(len)); + buf.freeze() + } + } + } +} diff --git a/linkerd/http/retry/src/replay/tests.rs b/linkerd/http/retry/src/replay/tests.rs new file mode 100644 index 0000000000..93e83fe756 --- /dev/null +++ b/linkerd/http/retry/src/replay/tests.rs @@ -0,0 +1,782 @@ +use super::*; +use bytes::Bytes; +use http::{HeaderName, HeaderValue}; +use http_body_util::BodyExt; +use std::collections::VecDeque; + +struct Test { + // Sends body data. + tx: Tx, + /// The "initial" body. + initial: ReplayBody, + /// Replays the initial body. + replay: ReplayBody, + /// An RAII guard for the tracing subscriber. + _trace: tracing::subscriber::DefaultGuard, +} + +#[derive(Debug, Default)] +struct TestBody { + data: VecDeque<&'static str>, + trailers: Option, +} + +struct Tx(http_body_util::channel::Sender); + +#[tokio::test] +async fn replays_one_chunk() { + let Test { + mut tx, + initial, + replay, + _trace, + } = Test::new(); + tx.send_data("hello world").await; + drop(tx); + + { + let (data, trailers) = body_to_string(initial).await; + assert_eq!(data, "hello world"); + assert_eq!(trailers, None); + } + { + let (data, trailers) = body_to_string(replay).await; + assert_eq!(data, "hello world"); + assert_eq!(trailers, None); + } +} + +#[tokio::test] +async fn replays_several_chunks() { + let Test { + mut tx, + initial, + replay, + _trace, + } = Test::new(); + + tokio::spawn(async move { + tx.send_data("hello").await; + tx.send_data(" world").await; + tx.send_data(", have lots").await; + tx.send_data(" of fun!").await; + }); + + let (initial, trailers) = body_to_string(initial).await; + assert_eq!(initial, "hello world, have lots of fun!"); + assert!(trailers.is_none()); + + let (replay, trailers) = body_to_string(replay).await; + assert_eq!(replay, "hello world, have lots of fun!"); + assert!(trailers.is_none()); +} + +#[tokio::test] +async fn replays_trailers() { + let Test { + mut tx, + initial, + replay, + _trace, + } = Test::new(); + let replay2 = replay.clone(); + + let mut tlrs = HeaderMap::new(); + tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); + tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); + + tx.send_data("hello world").await; + tx.send_trailers(tlrs.clone()).await; + drop(tx); + + let read_trailers = |mut body: ReplayBody<_>| async move { + let _ = body + .frame() + .await + .expect("should yield a result") + .expect("should yield a frame") + .into_data() + .expect("should yield data"); + let trls = body + .frame() + .await + .expect("should yield a result") + .expect("should yield a frame") + .into_trailers() + .expect("should yield trailers"); + assert!(body.frame().await.is_none()); + trls + }; + + let initial_tlrs = read_trailers(initial).await; + assert_eq!(&initial_tlrs, &tlrs); + + let replay_tlrs = read_trailers(replay).await; + assert_eq!(&replay_tlrs, &tlrs); + + let replay_tlrs = read_trailers(replay2).await; + assert_eq!(&replay_tlrs, &tlrs); +} + +#[tokio::test] +async fn replays_trailers_only() { + let Test { + mut tx, + mut initial, + mut replay, + _trace, + } = Test::new(); + + let mut tlrs = HeaderMap::new(); + tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); + tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); + + tx.send_trailers(tlrs.clone()).await; + + drop(tx); + + let initial_tlrs = initial + .frame() + .await + .expect("should yield a result") + .expect("should yield a frame") + .into_trailers() + .expect("should yield trailers"); + assert_eq!(&initial_tlrs, &tlrs); + + // drop the initial body to send the data to the replay + drop(initial); + + let replay_tlrs = replay + .frame() + .await + .expect("should yield a result") + .expect("should yield a frame") + .into_trailers() + .expect("should yield trailers"); + assert_eq!(&replay_tlrs, &tlrs); +} + +#[tokio::test(flavor = "current_thread")] +async fn switches_with_body_remaining() { + // This simulates a case where the server returns an error _before_ the + // entire body has been read. + let Test { + mut tx, + mut initial, + mut replay, + _trace, + } = Test::new(); + + tx.send_data("hello").await; + assert_eq!(chunk(&mut initial).await.unwrap(), "hello"); + + tx.send_data(" world").await; + assert_eq!(chunk(&mut initial).await.unwrap(), " world"); + + // drop the initial body to send the data to the replay + drop(initial); + tracing::info!("dropped initial body"); + + tokio::spawn(async move { + tx.send_data(", have lots of fun").await; + tx.send_trailers(HeaderMap::new()).await; + }); + + let (data, trailers) = body_to_string(&mut replay).await; + assert_eq!(data, "hello world, have lots of fun"); + assert!(trailers.is_some()); +} + +#[tokio::test(flavor = "current_thread")] +async fn multiple_replays() { + let Test { + mut tx, + initial, + replay, + _trace, + } = Test::new(); + + let mut tlrs = HeaderMap::new(); + tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); + tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); + + let tlrs2 = tlrs.clone(); + tokio::spawn(async move { + tx.send_data("hello").await; + tx.send_data(" world").await; + tx.send_trailers(tlrs2).await; + }); + + let read = |body| async { + let (data, trailers) = body_to_string(body).await; + assert_eq!(data, "hello world"); + assert_eq!(trailers.as_ref(), Some(&tlrs)); + }; + + read(initial).await; + + // Replay the body twice. + let replay2 = replay.clone(); + read(replay).await; + read(replay2).await; +} + +#[tokio::test(flavor = "current_thread")] +async fn multiple_incomplete_replays() { + let Test { + mut tx, + mut initial, + mut replay, + _trace, + } = Test::new(); + + let mut tlrs = HeaderMap::new(); + tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); + tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); + + tx.send_data("hello").await; + assert_eq!(chunk(&mut initial).await.unwrap(), "hello"); + + // drop the initial body to send the data to the replay + drop(initial); + tracing::info!("dropped initial body"); + + let replay2 = replay.clone(); + + tx.send_data(" world").await; + assert_eq!(chunk(&mut replay).await.unwrap(), "hello"); + assert_eq!(chunk(&mut replay).await.unwrap(), " world"); + + // drop the replay body to send the data to the second replay + drop(replay); + tracing::info!("dropped first replay body"); + + let tlrs2 = tlrs.clone(); + tokio::spawn(async move { + tx.send_data(", have lots").await; + tx.send_data(" of fun!").await; + tx.send_trailers(tlrs2).await; + }); + + let (data, replay2_trailers) = body_to_string(replay2).await; + assert_eq!(data, "hello world, have lots of fun!"); + assert_eq!(replay2_trailers.as_ref(), Some(&tlrs)); +} + +#[tokio::test(flavor = "current_thread")] +async fn drop_clone_early() { + let Test { + mut tx, + initial, + replay, + _trace, + } = Test::new(); + + let mut tlrs = HeaderMap::new(); + tlrs.insert("x-hello", HeaderValue::from_str("world").unwrap()); + tlrs.insert("x-foo", HeaderValue::from_str("bar").unwrap()); + + let tlrs2 = tlrs.clone(); + tokio::spawn(async move { + tx.send_data("hello").await; + tx.send_data(" world").await; + tx.send_trailers(tlrs2).await; + }); + + { + let body = initial; + let (data, trailers) = body_to_string(body).await; + assert_eq!(data, "hello world"); + assert_eq!(trailers.as_ref(), Some(&tlrs)); + } + + // Clone the body, and then drop it before polling it. + let replay2 = replay.clone(); + drop(replay2); + + { + let body = replay; + let (data, trailers) = body_to_string(body).await; + assert_eq!(data, "hello world"); + assert_eq!(trailers.as_ref(), Some(&tlrs)); + } +} + +// This test is specifically for behavior across clones, so the clippy lint +// is wrong here. +#[allow(clippy::redundant_clone)] +#[test] +fn empty_body_is_always_eos() { + // If the initial body was empty, every clone should always return + // `true` from `is_end_stream`. + let initial = + ReplayBody::try_new(BoxBody::empty(), 64 * 1024).expect("empty body can't be too large"); + assert!(initial.is_end_stream()); + + let replay = initial.clone(); + assert!(replay.is_end_stream()); + + let replay2 = replay.clone(); + assert!(replay2.is_end_stream()); +} + +#[tokio::test(flavor = "current_thread")] +async fn eos_only_when_fully_replayed() { + // Test that each clone of a body is not EOS until the data has been + // fully replayed. + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); + let mut initial = ReplayBody::try_new(TestBody::one_data_frame(), 64 * 1024) + .expect("body must not be too large"); + let mut replay = initial.clone(); + + // Read the initial body, show that the replay does not consider itself to have reached the + // end-of-stream. Then drop the initial body, show that the replay is still not done. + assert!(!initial.is_end_stream()); + initial + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields a data frame"); + // TODO(kate): the initial body doesn't report ending until it has (not) yielded trailers. + assert!(initial.frame().await.is_none()); + assert!(initial.is_end_stream()); + assert!(!replay.is_end_stream()); + drop(initial); + assert!(!replay.is_end_stream()); + + // Read the replay body. + assert!(!replay.is_end_stream()); + replay + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields a data frame"); + // TODO(kate): the replay doesn't report ending until it has (not) yielded trailers. + assert!(replay.frame().await.is_none()); + assert!(replay.is_end_stream()); + + // Even if we clone a body _after_ it has been driven to EOS, the clone must not be EOS. + let mut replay2 = replay.clone(); + assert!(!replay2.is_end_stream()); + + // Drop the first replay body to send the data to the second replay. + drop(replay); + + // Read the second replay body. + replay2 + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields a data frame"); + // TODO(kate): the replay doesn't report ending until it has (not) yielded trailers. + assert!(replay2.frame().await.is_none()); + assert!(replay2.is_end_stream()); +} + +#[tokio::test(flavor = "current_thread")] +async fn eos_only_when_fully_replayed_with_trailers() { + // Test that each clone of a body is not EOS until the data has been + // fully replayed. + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); + let mut initial = ReplayBody::try_new(TestBody::one_data_frame().with_trailers(), 64 * 1024) + .expect("body must not be too large"); + let mut replay = initial.clone(); + + // Read the initial body, show that the replay does not consider itself to have reached the + // end-of-stream. Then drop the initial body, show that the replay is still not done. + assert!(!initial.is_end_stream()); + initial + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields a data frame"); + initial + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_trailers() + .map_err(drop) + .expect("yields a trailers frame"); + assert!(initial.is_end_stream()); + assert!(!replay.is_end_stream()); + drop(initial); + assert!(!replay.is_end_stream()); + + // Read the replay body. + assert!(!replay.is_end_stream()); + replay + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields a data frame"); + replay + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_trailers() + .map_err(drop) + .expect("yields a trailers frame"); + assert!(replay.is_end_stream()); + + // Even if we clone a body _after_ it has been driven to EOS, the clone must not be EOS. + let mut replay2 = replay.clone(); + assert!(!replay2.is_end_stream()); + + // Drop the first replay body to send the data to the second replay. + drop(replay); + + // Read the second replay body. + replay2 + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields a data frame"); + replay2 + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_trailers() + .map_err(drop) + .expect("yields a trailers frame"); + assert!(replay2.is_end_stream()); +} + +#[tokio::test(flavor = "current_thread")] +async fn caps_buffer() { + const CAPACITY: usize = 8; + const FILL: Bytes = Bytes::from_static(b"abcdefgh"); + const OVERFLOW: Bytes = Bytes::from_static(b"i"); + debug_assert!(FILL.len() == CAPACITY, "fills the body's capacity"); + + // Test that, when the initial body is longer than the preconfigured + // cap, we allow the request to continue, but stop buffering. The + // initial body will complete, but the replay will immediately fail. + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); + let (mut tx, body) = http_body_util::channel::Channel::::new(512); + let mut initial = + ReplayBody::try_new(body, CAPACITY).expect("channel body must not be too large"); + let mut replay = initial.clone(); + + // The initial body isn't capped yet, and the replay body is waiting. + assert_eq!(initial.is_capped(), Some(false)); + assert_eq!(replay.is_capped(), None); + + // Send enough data to reach the cap, but do not exceed it. + tx.send_data(FILL).await.unwrap(); + assert_eq!(chunk(&mut initial).await, Some("abcdefgh".to_string())); + assert_eq!(initial.is_capped(), Some(false)); + + // Any more bytes sent to the initial body exceeds its capacity. + tx.send_data(OVERFLOW).await.unwrap(); + assert_eq!(chunk(&mut initial).await, Some("i".to_string())); + assert_eq!(initial.is_capped(), Some(true)); + assert_eq!(replay.is_capped(), None); + + drop(initial); + + // The request's replay should error, since we discarded the buffer when + // we hit the cap. + let err = replay + .frame() + .await + .expect("yields a result") + .expect_err("yields an error when capped"); + assert!(err.is::()); + assert_eq!(replay.is_capped(), Some(true)); +} + +#[tokio::test(flavor = "current_thread")] +async fn caps_across_replays() { + const CAPACITY: usize = 8; + const FILL: Bytes = Bytes::from_static(b"abcdefgh"); + const OVERFLOW: Bytes = Bytes::from_static(b"i"); + debug_assert!(FILL.len() == CAPACITY, "fills the body's capacity"); + + // Test that, when the initial body is longer than the preconfigured + // cap, we allow the request to continue, but stop buffering. + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=debug"); + let (mut tx, body) = http_body_util::channel::Channel::::new(512); + let mut initial = + ReplayBody::try_new(body, CAPACITY).expect("channel body must not be too large"); + let mut replay = initial.clone(); + + // Send enough data to reach the cap, but do not exceed it. + tx.send_data(FILL).await.unwrap(); + assert_eq!(chunk(&mut initial).await, Some("abcdefgh".to_string())); + drop(initial); + + let mut replay2 = replay.clone(); + + // The replay will reach the cap, but it should still return data from + // the original body. + tx.send_data(OVERFLOW).await.unwrap(); + assert_eq!(chunk(&mut replay).await, Some("abcdefgh".to_string())); + assert_eq!(replay.is_capped(), Some(false)); + assert_eq!(chunk(&mut replay).await, Some("i".to_string())); + assert_eq!(replay.is_capped(), Some(true)); + drop(replay); + + // The second replay will fail, though, because the buffer was discarded. + let err = replay2 + .frame() + .await + .expect("yields a result") + .expect_err("yields an error when capped"); + assert!(err.is::()) +} + +#[test] +fn body_too_big() { + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); + let max_size = 8; + let mk_body = |sz: usize| -> BoxBody { + let s = (0..sz).map(|_| "x").collect::(); + BoxBody::new(s) + }; + + assert!( + ReplayBody::try_new(BoxBody::empty(), max_size).is_ok(), + "empty body is not too big" + ); + + assert!( + ReplayBody::try_new(mk_body(max_size), max_size).is_ok(), + "body at maximum capacity is not too big" + ); + + assert!( + ReplayBody::try_new(mk_body(max_size + 1), max_size).is_err(), + "over-sized body is too big" + ); + + let (mut _tx, body) = http_body_util::channel::Channel::::new(512); + assert!( + ReplayBody::try_new(body, max_size).is_ok(), + "body without size hint is not too big" + ); +} + +// This test is specifically for behavior across clones, so the clippy lint +// is wrong here. +#[allow(clippy::redundant_clone)] +#[test] +fn size_hint_is_correct_for_empty_body() { + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); + let initial = + ReplayBody::try_new(BoxBody::empty(), 64 * 1024).expect("empty body can't be too large"); + let size = initial.size_hint(); + assert_eq!(size.lower(), 0); + assert_eq!(size.upper(), Some(0)); + + let replay = initial.clone(); + let size = replay.size_hint(); + assert_eq!(size.lower(), 0); + assert_eq!(size.upper(), Some(0)); +} + +#[tokio::test(flavor = "current_thread")] +async fn size_hint_is_correct_across_replays() { + const CAPACITY: usize = 8; + const BODY: &str = "contents"; + const SIZE: u64 = BODY.len() as u64; + debug_assert!(SIZE as usize <= CAPACITY); + + // Create the initial body, and a replay. + let _trace = linkerd_tracing::test::with_default_filter("linkerd_http_retry=trace"); + let mut initial = ReplayBody::try_new(BoxBody::from_static(BODY), CAPACITY) + .expect("empty body can't be too large"); + let mut replay = initial.clone(); + + // Show that the body reports a proper size hint. + let initial_size = initial.size_hint(); + assert_eq!(initial_size.lower(), SIZE); + assert_eq!(initial_size.exact(), Some(SIZE)); + assert_eq!(initial_size.upper(), Some(SIZE)); + + // Read the body, check the size hint again. + assert_eq!(chunk(&mut initial).await.as_deref(), Some(BODY)); + // TODO(kate): the initial body doesn't report ending until it has (not) yielded trailers. + assert!(initial.frame().await.is_none()); + debug_assert!(initial.is_end_stream()); + // TODO(kate): this currently misreports the *remaining* size of the body. + // let size = initial.size_hint(); + // assert_eq!(size.lower(), 0); + // assert_eq!(size.upper(), Some(0)); + + // The replay reports the initial size hint, before and after dropping the initial body. + let size = replay.size_hint(); + assert_eq!(size.lower(), initial_size.lower()); + assert_eq!(size.upper(), initial_size.upper()); + drop(initial); + let size = replay.size_hint(); + assert_eq!(size.lower(), initial_size.lower()); + assert_eq!(size.upper(), initial_size.upper()); + + // Drop the initial body, read the replay and check its size hint. + assert_eq!(chunk(&mut replay).await.as_deref(), Some(BODY)); + // let replay = { + // // TODO(kate): the replay doesn't report ending until it has (not) yielded trailers. + // let mut body = linkerd_http_body_compat::ForwardCompatibleBody::new(replay); + // assert!(body.frame().await.is_none()); + // body.into_inner() + // }; + // let size = replay.size_hint(); + // debug_assert!(replay.is_end_stream()); + // assert_eq!(size.lower(), 0); + // assert_eq!(size.upper(), Some(0)); +} + +// === impl Test === + +impl Test { + fn new() -> Self { + let (tx, rx) = http_body_util::channel::Channel::::new(512); + let initial = ReplayBody::try_new(BoxBody::new(rx), 64 * 1024).expect("body too large"); + let replay = initial.clone(); + Self { + tx: Tx(tx), + initial, + replay, + _trace: linkerd_tracing::test::with_default_filter("linkerd_http_retry=debug").0, + } + } +} + +// === impl Tx === + +impl Tx { + #[tracing::instrument(skip(self))] + async fn send_data(&mut self, data: impl Into + std::fmt::Debug) { + let data = data.into(); + tracing::trace!("sending data..."); + self.0.send_data(data).await.expect("rx is not dropped"); + tracing::info!("sent data"); + } + + #[tracing::instrument(skip(self))] + async fn send_trailers(&mut self, trailers: HeaderMap) { + tracing::trace!("sending trailers..."); + self.0 + .send_trailers(trailers) + .await + .expect("rx is not dropped"); + tracing::info!("sent trailers"); + } +} + +// === impl TestBody === + +impl TestBody { + /// A body that yields a single DATA frame. + fn one_data_frame() -> Self { + Self { + data: ["one"].into(), + trailers: None, + } + } + + /// Adds a TRAILERS frame to the body. + fn with_trailers(self) -> Self { + let name = HeaderName::from_static("name"); + let value = HeaderValue::from_static("value"); + let trailers = [(name, value)].into_iter().collect(); + + Self { + trailers: Some(trailers), + ..self + } + } +} + +impl Body for TestBody { + type Data = ::Data; + type Error = std::convert::Infallible; + + fn poll_frame( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let Self { data, trailers } = self.get_mut(); + let mut next_data = || data.pop_front().map(Bytes::from).map(Frame::data).map(Ok); + let take_trailers = || trailers.take().map(Frame::trailers).map(Ok); + + Poll::Ready(next_data().or_else(take_trailers)) + } + + fn is_end_stream(&self) -> bool { + let Self { data, trailers } = self; + data.is_empty() && trailers.is_none() + } +} + +// === helper functions === + +async fn chunk(body: &mut T) -> Option +where + T: Body + Unpin, +{ + tracing::trace!("waiting for a body chunk..."); + let chunk = body + .frame() + .await + .expect("yields a result") + .ok() + .expect("yields a frame") + .into_data() + .ok() + .map(string); + tracing::info!(?chunk); + chunk +} + +async fn body_to_string(mut body: B) -> (String, Option) +where + B: Body + Unpin, + B::Error: std::fmt::Debug, +{ + let mut data = String::new(); + let mut trailers = None; + + // Continue reading frames from the body until it is finished. + while let Some(frame) = body + .frame() + .await + .transpose() + .expect("reading a frame succeeds") + { + match frame.into_data().map(string) { + Ok(ref s) => data.push_str(s), + Err(frame) => { + let trls = frame + .into_trailers() + .map_err(drop) + .expect("test frame is either data or trailers"); + trailers = Some(trls); + } + } + } + + tracing::info!(?data, ?trailers, "finished reading body"); + (data, trailers) +} + +fn string(mut data: impl Buf) -> String { + let bytes = data.copy_to_bytes(data.remaining()); + String::from_utf8(bytes.to_vec()).unwrap() +} diff --git a/linkerd/http/retry/src/with_trailers.rs b/linkerd/http/retry/src/with_trailers.rs deleted file mode 100644 index eb92c855f1..0000000000 --- a/linkerd/http/retry/src/with_trailers.rs +++ /dev/null @@ -1,177 +0,0 @@ -use futures::{ - future::{self, Either}, - FutureExt, -}; -use http_body::Body; -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, -}; - -/// An HTTP body that allows inspecting the body's trailers, if a `TRAILERS` -/// frame was the first frame after the initial headers frame. -/// -/// If the first frame of the body stream was *not* a `TRAILERS` frame, this -/// behaves identically to a normal body. -pub struct WithTrailers { - inner: B, - - /// The first DATA frame received from the inner body, or an error that - /// occurred while polling for data. - /// - /// If this is `None`, then the body has completed without any DATA frames. - first_data: Option>, - - /// The inner body's trailers, if it was terminated by a `TRAILERS` frame - /// after 0-1 DATA frames, or an error if polling for trailers failed. - /// - /// Yes, this is a bit of a complex type, so let's break it down: - /// - the outer `Option` indicates whether any trailers were received by - /// `WithTrailers`; if it's `None`, then we don't *know* if the response - /// had trailers, as it is not yet complete. - /// - the inner `Result` and `Option` are the `Result` and `Option` returned - /// by `HttpBody::trailers` on the inner body. If this is `Ok(None)`, then - /// the body has terminated without trailers --- it is *known* to not have - /// trailers. - trailers: Option, B::Error>>, -} - -pub type WithTrailersFuture = Either< - futures::future::Ready>>, - Pin>> + Send + 'static>>, ->; - -// === impl WithTrailers === - -impl WithTrailers { - pub fn trailers(&self) -> Option<&http::HeaderMap> { - self.trailers - .as_ref() - .and_then(|trls| trls.as_ref().ok()?.as_ref()) - } - - pub fn map_response(rsp: http::Response) -> WithTrailersFuture - where - B: Send + Unpin + 'static, - B::Data: Send + Unpin + 'static, - B::Error: Send, - { - use http::Version; - - // If the response isn't an HTTP version that has trailers, skip trying - // to read a trailers frame. - if let Version::HTTP_09 | Version::HTTP_10 | Version::HTTP_11 = rsp.version() { - return Either::Left(future::ready(Self::no_trailers(rsp))); - } - - // If the response doesn't have a body stream, also skip trying to read - // a trailers frame. - if rsp.is_end_stream() { - return Either::Left(future::ready(Self::no_trailers(rsp))); - } - - // Otherwise, return a future that tries to read the next frame. - Either::Right(Box::pin(Self::read_response(rsp))) - } - - async fn read_response(rsp: http::Response) -> http::Response - where - B: Send + Unpin, - B::Data: Send + Unpin, - B::Error: Send, - { - let (parts, body) = rsp.into_parts(); - let mut body = Self { - inner: body, - first_data: None, - trailers: None, - }; - - if let Some(data) = body.inner.data().await { - // The body has data; stop waiting for trailers. - body.first_data = Some(data); - - // Peek to see if there's immediately a trailers frame, and grab - // it if so. Otherwise, bail. - // XXX(eliza): the documentation for the `http::Body` trait says - // that `poll_trailers` should only be called after `poll_data` - // returns `None`...but, in practice, I'm fairly sure that this just - // means that it *will not return `Ready`* until there are no data - // frames left, which is fine for us here, because we `now_or_never` - // it. - body.trailers = body.inner.trailers().now_or_never(); - } else { - // Okay, `poll_data` has returned `None`, so there are no data - // frames left. Let's see if there's trailers... - body.trailers = Some(body.inner.trailers().await); - } - - http::Response::from_parts(parts, body) - } - - fn no_trailers(rsp: http::Response) -> http::Response { - rsp.map(|inner| Self { - inner, - first_data: None, - trailers: None, - }) - } -} - -impl Body for WithTrailers -where - B: Body + Unpin, - B::Data: Unpin, - B::Error: Unpin, -{ - type Data = B::Data; - type Error = B::Error; - - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - let this = self.get_mut(); - if let Some(first_data) = this.first_data.take() { - return Poll::Ready(Some(first_data)); - } - - Pin::new(&mut this.inner).poll_data(cx) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let this = self.get_mut(); - if let Some(trailers) = this.trailers.take() { - return Poll::Ready(trailers); - } - - Pin::new(&mut this.inner).poll_trailers(cx) - } - - #[inline] - fn is_end_stream(&self) -> bool { - self.first_data.is_none() && self.trailers.is_none() && self.inner.is_end_stream() - } - - #[inline] - fn size_hint(&self) -> http_body::SizeHint { - use bytes::Buf; - - let mut hint = self.inner.size_hint(); - // If we're holding onto a chunk of data, add its length to the inner - // `Body`'s size hint. - if let Some(Ok(chunk)) = self.first_data.as_ref() { - let buffered = chunk.remaining() as u64; - if let Some(upper) = hint.upper() { - hint.set_upper(upper + buffered); - } - hint.set_lower(hint.lower() + buffered); - } - - hint - } -} diff --git a/linkerd/http/route/Cargo.toml b/linkerd/http/route/Cargo.toml index 43bb594925..e4ddace378 100644 --- a/linkerd/http/route/Cargo.toml +++ b/linkerd/http/route/Cargo.toml @@ -1,23 +1,24 @@ [package] name = "linkerd-http-route" -version = "0.1.0" -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] proto = ["linkerd2-proxy-api"] [dependencies] -http = "0.2" +http = { workspace = true } regex = "1" -rand = "0.8" -thiserror = "1" +rand = "0.9" +thiserror = "2" tracing = "0.1" url = "2" [dependencies.linkerd2-proxy-api] -version = "0.13" +workspace = true features = ["http-route", "grpc-route"] optional = true diff --git a/linkerd/http/route/src/grpc/tests.rs b/linkerd/http/route/src/grpc/tests.rs index 402bcb8ed0..4360ff1290 100644 --- a/linkerd/http/route/src/grpc/tests.rs +++ b/linkerd/http/route/src/grpc/tests.rs @@ -13,6 +13,25 @@ impl Default for Policy { } } +#[test] +fn default() { + let rts = vec![Route { + hosts: vec![], + rules: vec![Rule { + matches: vec![MatchRoute::default()], + policy: Policy::Expected, + }], + }]; + + let req = http::Request::builder() + .method(http::Method::POST) + .uri("http://foo.example.com/foo/bar") + .body(()) + .unwrap(); + let (_, policy) = find(&rts, &req).expect("must match"); + assert_eq!(*policy, Policy::Expected, "incorrect rule matched"); +} + /// Given two equivalent routes, choose the explicit hostname match and not /// the wildcard. #[test] diff --git a/linkerd/http/route/src/http/filter/inject_failure.rs b/linkerd/http/route/src/http/filter/inject_failure.rs index 47fe440dc4..6df7579847 100644 --- a/linkerd/http/route/src/http/filter/inject_failure.rs +++ b/linkerd/http/route/src/http/filter/inject_failure.rs @@ -19,16 +19,16 @@ pub struct FailureResponse { pub struct Distribution { numerator: u32, denominator: u32, - inner: rand::distributions::Bernoulli, + inner: rand::distr::Bernoulli, } // === impl InjectFailure === impl InjectFailure { pub fn apply(&self) -> Option { - use rand::distributions::Distribution; + use rand::distr::Distribution; - if self.distribution.sample(&mut rand::thread_rng()) { + if self.distribution.sample(&mut rand::rng()) { return Some(self.response.clone()); } @@ -43,8 +43,8 @@ impl Distribution { pub fn from_ratio( numerator: u32, denominator: u32, - ) -> Result { - let inner = rand::distributions::Bernoulli::from_ratio(numerator, denominator)?; + ) -> Result { + let inner = rand::distr::Bernoulli::from_ratio(numerator, denominator)?; Ok(Self { numerator, denominator, @@ -59,7 +59,7 @@ impl Default for Distribution { } } -impl rand::distributions::Distribution for Distribution { +impl rand::distr::Distribution for Distribution { #[inline] fn sample(&self, rng: &mut R) -> bool { self.inner.sample(rng) @@ -100,7 +100,7 @@ pub mod proto { #[derive(Debug, thiserror::Error)] #[error("invalid request distribution: {0}")] - pub struct InvalidDistribution(#[from] rand::distributions::BernoulliError); + pub struct InvalidDistribution(#[from] rand::distr::BernoulliError); // === impl InjectFailure === diff --git a/linkerd/http/stream-timeouts/Cargo.toml b/linkerd/http/stream-timeouts/Cargo.toml new file mode 100644 index 0000000000..8d34a8cfe4 --- /dev/null +++ b/linkerd/http/stream-timeouts/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "linkerd-http-stream-timeouts" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Tower middleware to express deadlines on streams. +""" + +[dependencies] +futures = { version = "0.3", default-features = false } +http = { workspace = true } +http-body = { workspace = true } +parking_lot = "0.12" +pin-project = "1" +thiserror = "2" +tokio = { version = "1", default-features = false } +tracing = "0.1" + +linkerd-error = { path = "../../error" } +linkerd-stack = { path = "../../stack" } diff --git a/linkerd/http/stream-timeouts/src/lib.rs b/linkerd/http/stream-timeouts/src/lib.rs new file mode 100644 index 0000000000..7d618bb5f1 --- /dev/null +++ b/linkerd/http/stream-timeouts/src/lib.rs @@ -0,0 +1,481 @@ +//! Tower middleware to express deadlines on streams. +//! +//! See [`EnforceTimeouts`]. + +use futures::FutureExt; +use http_body::Frame; +use linkerd_error::{Error, Result}; +use linkerd_stack as svc; +use parking_lot::RwLock; +use pin_project::pin_project; +use std::{ + future::Future, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; +use thiserror::Error; +use tokio::{sync::oneshot, time}; + +/// A request extension set on HTTP requests that expresses deadlines to be +/// enforced by the proxy. +#[derive(Clone, Debug, Default)] +pub struct StreamTimeouts { + /// The maximum amount of time between the body of the request being fully + /// flushed and the response headers being received. + pub response_headers: Option, + + /// The maximum amount of time between the body of the request being fully + /// flushed (or the response headers being received, if that occurs first) + /// and the response being fully received. + pub response_end: Option, + + /// The maximum amount of time the stream may be idle. + pub idle: Option, + + /// Limits the total time the stream may be active in the proxy. + pub limit: Option, +} + +#[derive(Clone, Copy, Debug)] +pub struct StreamLifetime { + /// The deadline for the stream. + pub deadline: time::Instant, + /// The maximum amount of time the stream may be active, used for error reporting. + pub lifetime: time::Duration, +} + +#[derive(Clone, Debug)] +pub struct EnforceTimeouts { + inner: S, +} + +#[derive(Clone, Copy, Debug, Error)] +#[error("response header timeout: {0:?}")] +pub struct ResponseHeadersTimeoutError(time::Duration); + +#[derive(Clone, Copy, Debug, Error)] +#[error("response stream timeout: {0:?}")] +pub struct ResponseStreamTimeoutError(time::Duration); + +#[derive(Clone, Copy, Debug, Error)] +#[error("request timeout: {0:?}")] +pub struct StreamDeadlineError(time::Duration); + +#[derive(Clone, Copy, Debug, Error)] +#[error("idle timeout: {0:?}")] +pub struct StreamIdleError(time::Duration); + +#[derive(Clone, Copy, Debug, Error)] +pub enum ResponseTimeoutError { + #[error("timed out waiting for response headers: {0}")] + Headers(#[from] ResponseHeadersTimeoutError), + + #[error("timed out waiting for response headers: {0}")] + Response(#[from] ResponseStreamTimeoutError), + + #[error("timed out waiting for response headers: {0}")] + Lifetime(#[from] StreamDeadlineError), +} + +#[derive(Clone, Copy, Debug, Error)] +pub enum BodyTimeoutError { + #[error("timed out processing response stream: {0}")] + Response(#[from] ResponseStreamTimeoutError), + + #[error("timed out processing response stream: {0}")] + Lifetime(#[from] StreamDeadlineError), + + #[error("timed out processing response stream: {0}")] + Idle(#[from] StreamIdleError), +} + +#[derive(Debug)] +#[pin_project] +pub struct ResponseFuture { + #[pin] + inner: F, + + #[pin] + deadline: Option>, + + #[pin] + request_flushed: Option>, + request_flushed_at: Option, + + idle: Option<(IdleTimestamp, time::Duration)>, + + timeouts: StreamTimeouts, +} + +#[derive(Debug, Default)] +#[pin_project] +pub struct RequestBody { + #[pin] + inner: B, + + #[pin] + deadline: Option>, + idle: Option, + + request_flushed: Option>, +} + +#[derive(Debug, Default)] +#[pin_project] +pub struct ResponseBody { + #[pin] + inner: B, + + #[pin] + deadline: Option>, + idle: Option, + + timeouts: StreamTimeouts, +} + +#[derive(Debug)] +#[pin_project] +struct Deadline { + #[pin] + sleep: time::Sleep, + error: E, +} + +type IdleTimestamp = Arc>; + +#[derive(Debug)] +struct Idle { + sleep: Pin>, + timestamp: IdleTimestamp, + timeout: time::Duration, +} + +// === impl StreamLifetime === + +impl From for StreamLifetime { + fn from(lifetime: time::Duration) -> Self { + Self { + deadline: time::Instant::now() + lifetime, + lifetime, + } + } +} + +// === impl EnforceTimeouts === + +impl EnforceTimeouts { + pub fn layer() -> impl svc::layer::Layer + Clone { + svc::layer::mk(|inner| Self { inner }) + } +} + +impl svc::Service> for EnforceTimeouts +where + S: svc::Service>, Response = http::Response>, + S::Error: Into, +{ + type Response = http::Response>; + type Error = Error; + type Future = ResponseFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(Into::into) + } + + fn call(&mut self, req: http::Request) -> Self::Future { + let timeouts = req + .extensions() + .get::() + .cloned() + .unwrap_or_default(); + tracing::trace!(?timeouts, "Enforcing timeouts on stream"); + + let (req_idle, rsp_idle) = if let Some(timeout) = timeouts.idle { + let last_update = Arc::new(RwLock::new(time::Instant::now())); + let req = Idle { + sleep: Box::pin(time::sleep(timeout)), + timestamp: last_update.clone(), + timeout, + }; + (Some(req), Some((last_update, timeout))) + } else { + (None, None) + }; + + let (tx, rx) = oneshot::channel(); + let inner = self.inner.call(req.map(move |inner| RequestBody { + inner, + request_flushed: Some(tx), + deadline: timeouts.limit.map(|l| Deadline { + sleep: time::sleep_until(l.deadline), + error: StreamDeadlineError(l.lifetime).into(), + }), + idle: req_idle, + })); + ResponseFuture { + inner, + deadline: timeouts.limit.map(|l| Deadline { + sleep: time::sleep_until(l.deadline), + error: StreamDeadlineError(l.lifetime).into(), + }), + request_flushed: Some(rx), + request_flushed_at: None, + timeouts, + idle: rsp_idle, + } + } +} + +// === impl ResponseFuture === + +impl Future for ResponseFuture +where + F: Future, E>>, + E: Into, +{ + type Output = Result>>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + // Mark the time at which the request body was fully flushed and adjust + // the response deadline as necessary. + if let Some(flushed) = this.request_flushed.as_mut().as_pin_mut() { + if let Poll::Ready(res) = flushed.poll(cx) { + tracing::trace!("Request body fully flushed"); + let start = res.unwrap_or_else(|_| time::Instant::now()); + *this.request_flushed = None; + *this.request_flushed_at = Some(start); + + let timeout = match (this.timeouts.response_headers, this.timeouts.response_end) { + (Some(eoh), Some(eos)) if eoh < eos => Some(( + eoh, + ResponseTimeoutError::from(ResponseHeadersTimeoutError(eoh)), + )), + (Some(eoh), _) => Some(( + eoh, + ResponseTimeoutError::from(ResponseHeadersTimeoutError(eoh)), + )), + (_, Some(eos)) => Some((eos, ResponseStreamTimeoutError(eos).into())), + _ => None, + }; + if let Some((timeout, error)) = timeout { + tracing::debug!(?timeout); + let headers_by = start + timeout; + if let Some(deadline) = this.deadline.as_mut().as_pin_mut() { + if headers_by < deadline.sleep.deadline() { + tracing::trace!(?timeout, "Updating response headers deadline"); + let dl = deadline.project(); + *dl.error = error; + dl.sleep.reset(headers_by); + } else { + tracing::trace!("Using original stream deadline"); + } + } else { + tracing::trace!(?timeout, "Setting response headers deadline"); + this.deadline.set(Some(Deadline { + sleep: time::sleep_until(headers_by), + error, + })); + } + } + } + } + + // Poll for the response headers. + let rsp = match this.inner.poll(cx) { + Poll::Ready(Ok(rsp)) => rsp, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())), + Poll::Pending => { + // If the response headers are not ready, check the deadline and + // return an error if it is exceeded. + if let Some(deadline) = this.deadline.as_pin_mut() { + let dl = deadline.project(); + if dl.sleep.poll(cx).is_ready() { + // TODO telemetry + return Poll::Ready(Err((*dl.error).into())); + } + } + return Poll::Pending; + } + }; + // We've received response headers, so we prepare the response body to + // timeout. + + // Share the idle state across request and response bodies. Update the + // state to reflect that we've accepted headers. + let idle = this.idle.take().map(|(timestamp, timeout)| { + let now = time::Instant::now(); + *timestamp.write() = now; + Idle { + timestamp, + timeout, + sleep: Box::pin(time::sleep_until(now + timeout)), + } + }); + + // We use the more restrictive of the response-end timeout (as + // measured since the request body was fully flushed) and the stream + // lifetime limit. + let start = this.request_flushed_at.unwrap_or_else(time::Instant::now); + let timeout = match (this.timeouts.response_end, this.timeouts.limit) { + (Some(eos), Some(lim)) if start + eos < lim.deadline => { + tracing::debug!(?eos, "Setting response stream timeout"); + Some((start + eos, ResponseStreamTimeoutError(eos).into())) + } + (Some(eos), None) => { + tracing::debug!(?eos, "Setting response stream timeout"); + Some((start + eos, ResponseStreamTimeoutError(eos).into())) + } + (_, Some(lim)) => { + tracing::debug!("Using stream deadline"); + Some((lim.deadline, StreamDeadlineError(lim.lifetime).into())) + } + (None, None) => None, + }; + + Poll::Ready(Ok(rsp.map(move |inner| ResponseBody { + inner, + deadline: timeout.map(|(t, error)| Deadline { + sleep: time::sleep_until(t), + error, + }), + idle, + timeouts: this.timeouts.clone(), + }))) + } +} + +// === impl RequestBody === + +impl http_body::Body for RequestBody +where + B: http_body::Body, +{ + type Data = B::Data; + type Error = Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + // Poll for the next frame. + if let Poll::Ready(res) = this.inner.as_mut().poll_frame(cx) { + let now = time::Instant::now(); + if let Some(idle) = this.idle { + idle.reset(now); + } + // Send a timestamp when the end of the stream is reached. + if this.inner.as_ref().is_end_stream() { + if let Some(tx) = this.request_flushed.take() { + let _ = tx.send(now); + } + } + return Poll::Ready(res); + } + + // Poll for a timeout error. + if let Poll::Ready(e) = poll_body_timeout(this.deadline, this.idle, cx) { + // TODO telemetry + return Poll::Ready(Some(Err(Error::from(e)))); + } + + Poll::Pending + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } +} + +// === impl ResponseBody === + +impl http_body::Body for ResponseBody +where + B: http_body::Body, +{ + type Data = B::Data; + type Error = Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let mut this = self.project(); + + // Poll for the next frame. + if let Poll::Ready(res) = this.inner.as_mut().poll_frame(cx) { + let now = time::Instant::now(); + if let Some(idle) = this.idle { + idle.reset(now); + } + return Poll::Ready(res); + } + + // Poll for a timeout error. + if let Poll::Ready(e) = poll_body_timeout(this.deadline, this.idle, cx) { + // TODO telemetry + return Poll::Ready(Some(Err(Error::from(e)))); + } + + Poll::Pending + } + + fn is_end_stream(&self) -> bool { + self.inner.is_end_stream() + } +} + +fn poll_body_timeout( + mut deadline: Pin<&mut Option>>, + idle: &mut Option, + cx: &mut Context<'_>, +) -> Poll { + if let Some(dl) = deadline.as_mut().as_pin_mut() { + let d = dl.project(); + if d.sleep.poll(cx).is_ready() { + let error = *d.error; + deadline.set(None); // Prevent polling again. + return Poll::Ready(error); + } + } + + if let Some(idle) = idle { + if let Poll::Ready(e) = idle.poll_idle(cx) { + return Poll::Ready(e.into()); + } + } + + Poll::Pending +} + +// === impl Idle === + +impl Idle { + fn reset(&mut self, now: time::Instant) { + self.sleep.as_mut().reset(now + self.timeout); + *self.timestamp.write() = now; + } + + fn poll_idle(&mut self, cx: &mut Context<'_>) -> Poll { + loop { + if self.sleep.poll_unpin(cx).is_pending() { + return Poll::Pending; + } + + // If the idle timeout has expired, we first need to ensure that the + // other half of the stream hasn't updated the timestamp. If it has, + // reset the timer with the expected idle timeout. + let now = time::Instant::now(); + let expiry = *self.timestamp.read() + self.timeout; + if expiry <= now { + return Poll::Ready(StreamIdleError(self.timeout)); + } + self.sleep.as_mut().reset(expiry); + } + } +} diff --git a/linkerd/http/upgrade/Cargo.toml b/linkerd/http/upgrade/Cargo.toml new file mode 100644 index 0000000000..0756483e41 --- /dev/null +++ b/linkerd/http/upgrade/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "linkerd-http-upgrade" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Facilities for HTTP/1 upgrades. +""" + +[dependencies] +bytes = { workspace = true } +drain = { workspace = true } +futures = { version = "0.3", default-features = false } +http = { workspace = true } +http-body = { workspace = true } +hyper = { workspace = true, default-features = false, features = ["client"] } +hyper-util = { workspace = true, default-features = false, features = [ + "client", + "client-legacy", +] } +pin-project = "1" +thiserror = "2" +tokio = { version = "1", default-features = false } +tower = { workspace = true, default-features = false } +tracing = "0.1" +try-lock = "0.2" + +linkerd-duplex = { path = "../../duplex" } +linkerd-error = { path = "../../error" } +linkerd-http-box = { path = "../box" } +linkerd-http-variant = { path = "../variant" } +linkerd-io = { path = "../../io" } +linkerd-stack = { path = "../../stack" } diff --git a/linkerd/proxy/http/src/glue.rs b/linkerd/http/upgrade/src/glue.rs similarity index 56% rename from linkerd/proxy/http/src/glue.rs rename to linkerd/http/upgrade/src/glue.rs index 16c4dde464..771afa38f8 100644 --- a/linkerd/proxy/http/src/glue.rs +++ b/linkerd/http/upgrade/src/glue.rs @@ -1,9 +1,8 @@ use crate::upgrade::Http11Upgrade; -use bytes::Bytes; -use futures::TryFuture; -use hyper::body::HttpBody; -use hyper::client::connect as hyper_connect; +use futures::{ready, TryFuture}; +use http_body::{Body, Frame}; use linkerd_error::{Error, Result}; +use linkerd_http_box::BoxBody; use linkerd_io::{self as io, AsyncRead, AsyncWrite}; use linkerd_stack::{MakeConnection, Service}; use pin_project::{pin_project, pinned_drop}; @@ -17,11 +16,11 @@ use tracing::debug; /// Provides optional HTTP/1.1 upgrade support on the body. #[pin_project(PinnedDrop)] #[derive(Debug)] -pub struct UpgradeBody { - /// In UpgradeBody::drop, if this was an HTTP upgrade, the body is taken - /// to be inserted into the Http11Upgrade half. - body: hyper::Body, - pub(super) upgrade: Option<(Http11Upgrade, hyper::upgrade::OnUpgrade)>, +pub struct UpgradeBody { + /// The inner [`Body`] being wrapped. + #[pin] + body: B, + upgrade: Option<(Http11Upgrade, hyper::upgrade::OnUpgrade)>, } /// Glue for any `tokio_connect::Connect` to implement `hyper::client::Connect`. @@ -47,42 +46,36 @@ pub struct HyperConnectFuture { inner: F, absolute_form: bool, } + // === impl UpgradeBody === -impl HttpBody for UpgradeBody { - type Data = Bytes; - type Error = hyper::Error; +impl Body for UpgradeBody +where + B: Body, + B::Error: std::fmt::Display, +{ + type Data = B::Data; + type Error = B::Error; fn is_end_stream(&self) -> bool { self.body.is_end_stream() } - fn poll_data( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - let body = self.project().body; - let poll = futures::ready!(Pin::new(body) // `hyper::Body` is Unpin - .poll_data(cx)); - Poll::Ready(poll.map(|x| { - x.map_err(|e| { - debug!("http body error: {}", e); - e - }) - })) - } - - fn poll_trailers( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - let body = self.project().body; - Pin::new(body) // `hyper::Body` is Unpin - .poll_trailers(cx) - .map_err(|e| { - debug!("http trailers error: {}", e); - e - }) + ) -> Poll, Self::Error>>> { + // Poll the next frame from the body. + let this = self.project(); + let body = this.body; + let frame = ready!(body.poll_frame(cx)); + + // Log errors. + if let Some(Err(e)) = &frame { + debug!("http body error: {}", e); + } + + Poll::Ready(frame) } #[inline] @@ -91,24 +84,18 @@ impl HttpBody for UpgradeBody { } } -impl Default for UpgradeBody { +impl Default for UpgradeBody { fn default() -> Self { - hyper::Body::empty().into() - } -} - -impl From for UpgradeBody { - fn from(body: hyper::Body) -> Self { Self { - body, + body: B::default(), upgrade: None, } } } -impl UpgradeBody { +impl UpgradeBody { pub(crate) fn new( - body: hyper::Body, + body: B, upgrade: Option<(Http11Upgrade, hyper::upgrade::OnUpgrade)>, ) -> Self { Self { body, upgrade } @@ -116,12 +103,17 @@ impl UpgradeBody { } #[pinned_drop] -impl PinnedDrop for UpgradeBody { +impl PinnedDrop for UpgradeBody { fn drop(self: Pin<&mut Self>) { let this = self.project(); // If an HTTP/1 upgrade was wanted, send the upgrade future. if let Some((upgrade, on_upgrade)) = this.upgrade.take() { - upgrade.insert_half(on_upgrade); + if let Err(error) = upgrade.insert_half(on_upgrade) { + tracing::warn!( + ?error, + "upgrade body could not send upgrade future upon completion" + ); + } } } } @@ -129,7 +121,7 @@ impl PinnedDrop for UpgradeBody { // === impl HyperConnect === impl HyperConnect { - pub(super) fn new(connect: C, target: T, absolute_form: bool) -> Self { + pub fn new(connect: C, target: T, absolute_form: bool) -> Self { HyperConnect { connect, target, @@ -140,12 +132,12 @@ impl HyperConnect { impl Service for HyperConnect where - C: MakeConnection<(crate::Version, T)> + Clone + Send + Sync, + C: MakeConnection<(linkerd_http_variant::Variant, T)> + Clone + Send + Sync, C::Connection: Unpin + Send, C::Future: Unpin + Send + 'static, T: Clone + Send + Sync, { - type Response = Connection; + type Response = hyper_util::rt::TokioIo>; type Error = Error; type Future = HyperConnectFuture; @@ -157,30 +149,32 @@ where HyperConnectFuture { inner: self .connect - .connect((crate::Version::Http1, self.target.clone())), + .connect((linkerd_http_variant::Variant::Http1, self.target.clone())), absolute_form: self.absolute_form, } } } +// === impl HyperConnectFuture === + impl Future for HyperConnectFuture where F: TryFuture + 'static, F::Error: Into, { - type Output = Result>; + type Output = Result>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); let (transport, _) = futures::ready!(this.inner.try_poll(cx)).map_err(Into::into)?; - Poll::Ready(Ok(Connection { + Poll::Ready(Ok(hyper_util::rt::TokioIo::new(Connection { transport, absolute_form: *this.absolute_form, - })) + }))) } } -// === impl Connected === +// === impl Connection === impl AsyncRead for Connection where @@ -228,8 +222,54 @@ where } } -impl hyper_connect::Connection for Connection { - fn connected(&self) -> hyper_connect::Connected { - hyper_connect::Connected::new().proxy(self.absolute_form) +impl hyper::rt::Read for Connection { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: hyper::rt::ReadBufCursor<'_>, + ) -> Poll> { + self.project().transport.poll_read(cx, buf) + } +} + +impl hyper::rt::Write for Connection { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + self.project().transport.poll_write(cx, buf) + } + + fn poll_flush( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.project().transport.poll_flush(cx) + } + + fn poll_shutdown( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + self.project().transport.poll_shutdown(cx) + } + + fn is_write_vectored(&self) -> bool { + self.transport.is_write_vectored() + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + self.project().transport.poll_write_vectored(cx, bufs) + } +} + +impl hyper_util::client::legacy::connect::Connection for Connection { + fn connected(&self) -> hyper_util::client::legacy::connect::Connected { + hyper_util::client::legacy::connect::Connected::new().proxy(self.absolute_form) } } diff --git a/linkerd/http/upgrade/src/lib.rs b/linkerd/http/upgrade/src/lib.rs new file mode 100644 index 0000000000..d3167d7518 --- /dev/null +++ b/linkerd/http/upgrade/src/lib.rs @@ -0,0 +1,75 @@ +//! Facilities for HTTP/1.1 upgrades. +//! +//! HTTP/1.1 specifies an `Upgrade` header field that may be used in tandem with the `Connection` +//! header field as a simple mechanism to transition from HTTP/1.1 to another protocol. This crate +//! provides [`tower`] middleware that enable upgrades to HTTP/2 for services running within a +//! [`tokio`] runtime. +//! +//! Use [`Service::new()`] to add upgrade support to a [`tower::Service`]. +//! +//! [RFC 9110 § 7.6.1][rfc9110-connection] for more information about the `Connection` header +//! field, [RFC 9110 § 7.8][rfc9110-upgrade] for more information about HTTP/1.1's `Upgrade` +//! header field, and [RFC 9110 § 15.2.2][rfc9110-101] for more information about the +//! `101 (Switching Protocols)` response status code. +//! +//! Note that HTTP/2 does *NOT* provide support for the `Upgrade` header field, per +//! [RFC 9113 § 8.6][rfc9113]. HTTP/2 is a multiplexed protocol, and connection upgrades are +//! thus inapplicable. +//! +//! [rfc9110-connection]: https://www.rfc-editor.org/rfc/rfc9110#name-connection +//! [rfc9110-upgrade]: https://www.rfc-editor.org/rfc/rfc9110#field.upgrade +//! [rfc9110-101]: https://www.rfc-editor.org/rfc/rfc9110#name-101-switching-protocols +//! [rfc9113]: https://www.rfc-editor.org/rfc/rfc9113.html#name-the-upgrade-header-field + +pub use self::upgrade::Service; + +pub mod glue; +pub mod upgrade; + +/// Removes connection headers from the given [`HeaderMap`][http::HeaderMap]. +/// +/// An HTTP proxy is required to do this, according to [RFC 9110 § 7.6.1 ¶ 5][rfc9110-761-5]: +/// +/// > Intermediaries MUST parse a received Connection header field before a message is forwarded +/// > and, for each connection-option in this field, remove any header or trailer field(s) from the +/// > message with the same name as the connection-option, and then remove the Connection header +/// > field itself (or replace it with the intermediary's own control options for the forwarded +/// > message). +/// +/// This function additionally removes some headers mentioned in +/// [RFC 9110 § 7.6.1 ¶ 7-8.5][rfc9110-761-7] +/// +/// > Furthermore, intermediaries SHOULD remove or replace fields that are known to require removal +/// > before forwarding, whether or not they appear as a connection-option, after applying those +/// > fields' semantics. This includes but is not limited to: +/// > +/// > - `Proxy-Connection` (Appendix C.2.2 of [HTTP/1.1]) +/// > - `Keep-Alive` (Section 19.7.1 of \[RFC2068\]) +/// > - `TE` (Section 10.1.4) +/// > - `Transfer-Encoding` (Section 6.1 of [HTTP/1.1]) +/// > - `Upgrade` (Section 7.8) +/// +/// [rfc9110-761-5]: https://www.rfc-editor.org/rfc/rfc9110#section-7.6.1-5 +/// [rfc9110-761-7]: https://www.rfc-editor.org/rfc/rfc9110#section-7.6.1-7 +pub fn strip_connection_headers(headers: &mut http::HeaderMap) { + use http::header; + if let Some(val) = headers.remove(header::CONNECTION) { + if let Ok(conn_header) = val.to_str() { + // A `Connection` header may have a comma-separated list of + // names of other headers that are meant for only this specific + // connection. + // + // Iterate these names and remove them as headers. + for name in conn_header.split(',') { + let name = name.trim(); + headers.remove(name); + } + } + } + + // Additionally, strip these "connection-level" headers always, since + // they are otherwise illegal if upgraded to HTTP2. + headers.remove(header::UPGRADE); + headers.remove("proxy-connection"); + headers.remove("keep-alive"); +} diff --git a/linkerd/proxy/http/src/upgrade.rs b/linkerd/http/upgrade/src/upgrade.rs similarity index 62% rename from linkerd/proxy/http/src/upgrade.rs rename to linkerd/http/upgrade/src/upgrade.rs index 088a3b7c4f..82a2a590f9 100644 --- a/linkerd/proxy/http/src/upgrade.rs +++ b/linkerd/http/upgrade/src/upgrade.rs @@ -1,6 +1,6 @@ //! HTTP/1.1 Upgrades -use crate::{glue::UpgradeBody, h1}; +use crate::glue::UpgradeBody; use futures::{ future::{self, Either}, TryFutureExt, @@ -22,35 +22,28 @@ use try_lock::TryLock; /// inserted into the `Request::extensions()`. If the HTTP1 client service /// also detects an upgrade, the two `OnUpgrade` futures will be joined /// together with the glue in this type. -// Note: this relies on their only having been 2 Inner clones, so don't -// implement `Clone` for this type. pub struct Http11Upgrade { half: Half, - inner: Arc, + inner: Option>, } -/// A named "tuple" returned by `Http11Upgade::new()` of the two halves of +/// A named "tuple" returned by [`Http11Upgade::halves()`] of the two halves of /// an upgrade. #[derive(Debug)] -pub struct Http11UpgradeHalves { +struct Http11UpgradeHalves { /// The "server" half. - pub server: Http11Upgrade, + server: Http11Upgrade, /// The "client" half. - pub client: Http11Upgrade, + client: Http11Upgrade, } -/// A marker type inserted into Extensions to signal it was an HTTP CONNECT -/// request. -#[derive(Debug)] -pub struct HttpConnect; - struct Inner { server: TryLock>, client: TryLock>, upgrade_drain_signal: Option, } -#[derive(Debug)] +#[derive(Clone, Copy, Debug)] enum Half { Server, Client, @@ -63,6 +56,13 @@ pub struct Service { upgrade_drain_signal: drain::Watch, } +#[derive(Debug, thiserror::Error)] +#[error("OnUpgrade future has already been inserted: half={half:?}")] +pub struct AlreadyInserted { + half: Half, + pub upgrade: OnUpgrade, +} + // === impl Http11Upgrade === impl Http11Upgrade { @@ -70,7 +70,7 @@ impl Http11Upgrade { /// /// Each handle is used to insert 1 half of the upgrade. When both handles /// have inserted, the upgrade future will be spawned onto the executor. - pub fn halves(upgrade_drain_signal: drain::Watch) -> Http11UpgradeHalves { + fn halves(upgrade_drain_signal: drain::Watch) -> Http11UpgradeHalves { let inner = Arc::new(Inner { server: TryLock::new(None), client: TryLock::new(None), @@ -80,35 +80,42 @@ impl Http11Upgrade { Http11UpgradeHalves { server: Http11Upgrade { half: Half::Server, - inner: inner.clone(), + inner: Some(inner.clone()), }, client: Http11Upgrade { half: Half::Client, - inner, + inner: Some(inner.clone()), }, } } - pub fn insert_half(self, upgrade: OnUpgrade) { - match self.half { - Half::Server => { - let mut lock = self - .inner + pub fn insert_half(self, upgrade: OnUpgrade) -> Result<(), AlreadyInserted> { + match self { + Self { + inner: Some(inner), + half: Half::Server, + } => { + let mut lock = inner .server .try_lock() .expect("only Half::Server touches server TryLock"); debug_assert!(lock.is_none()); *lock = Some(upgrade); + Ok(()) } - Half::Client => { - let mut lock = self - .inner + Self { + inner: Some(inner), + half: Half::Client, + } => { + let mut lock = inner .client .try_lock() .expect("only Half::Client touches client TryLock"); debug_assert!(lock.is_none()); *lock = Some(upgrade); + Ok(()) } + Self { inner: None, half } => Err(AlreadyInserted { half, upgrade }), } } } @@ -121,6 +128,25 @@ impl fmt::Debug for Http11Upgrade { } } +/// An [`Http11Upgrade`] can be cloned. +/// +/// NB: Only the original copy of this extension may insert an [`OnUpgrade`] future into its half +/// of the channel. Calling [`insert_half()`][Http11Upgrade::insert_half] on any clones of an +/// upgrade extension will result in an error. +// See the [`Drop`] implementation provided by `Inner` for more information. +impl Clone for Http11Upgrade { + fn clone(&self) -> Self { + Self { + half: self.half, + // We do *NOT* deeply clone our reference to `Inner`. + // + // `Http11Upgrade::insert_half()` and the `Inner` type's `Drop` glue rely on there only + // being one copy of the client and sender halves of the upgrade channel. + inner: None, + } + } +} + /// When both halves have dropped, check if both sides are inserted, /// and if so, spawn the upgrade task. impl Drop for Inner { @@ -139,6 +165,9 @@ impl Drop for Inner { let both_upgrades = async move { let (server_conn, client_conn) = tokio::try_join!(server_upgrade, client_upgrade)?; trace!("HTTP upgrade successful"); + use hyper_util::rt::TokioIo; + let client_conn = TokioIo::new(client_conn); + let server_conn = TokioIo::new(server_conn); if let Err(e) = Duplex::new(client_conn, server_conn).await { info!("tcp duplex error: {}", e) } @@ -162,6 +191,7 @@ impl Drop for Inner { } // === impl Service === + impl Service { pub fn new(service: S, upgrade_drain_signal: drain::Watch) -> Self { Self { @@ -171,29 +201,38 @@ impl Service { } } +impl Clone for Service { + fn clone(&self) -> Self { + Self { + service: self.service.clone(), + upgrade_drain_signal: self.upgrade_drain_signal.clone(), + } + } +} + type ResponseFuture = Either, E>>>; -impl tower::Service> for Service +impl tower::Service> for Service where - S: tower::Service, Response = http::Response>, - B: Default, + S: tower::Service>, Response = http::Response>, + RespB: Default, { type Response = S::Response; type Error = S::Error; - type Future = ResponseFuture; + type Future = ResponseFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.service.poll_ready(cx) } - fn call(&mut self, mut req: http::Request) -> Self::Future { + fn call(&mut self, mut req: http::Request) -> Self::Future { // Should this rejection happen later in the Service stack? // // Rejecting here means telemetry doesn't record anything about it... // // At the same time, this stuff is specifically HTTP1, so it feels // proper to not have the HTTP2 requests going through it... - if h1::is_bad_request(&req) { + if is_bad_request(&req) { let mut res = http::Response::default(); *res.status_mut() = http::StatusCode::BAD_REQUEST; return Either::Right(future::ok(res)); @@ -221,9 +260,46 @@ where } } -/// Checks requests to determine if they want to perform an HTTP upgrade. +/// Returns if the received request is definitely bad. +/// +/// Just because a request parses doesn't mean it's correct. For examples: +/// +/// - `GET example.com` +/// - `CONNECT /just-a-path +pub(crate) fn is_bad_request(req: &http::Request) -> bool { + if req.method() == http::Method::CONNECT { + // CONNECT is only valid over HTTP/1.1 + if req.version() != http::Version::HTTP_11 { + debug!("CONNECT request not valid for HTTP/1.0: {:?}", req.uri()); + return true; + } + + // CONNECT requests are only valid in authority-form. + if !is_origin_form(req.uri()) { + debug!("CONNECT request with illegal URI: {:?}", req.uri()); + return true; + } + } else if is_origin_form(req.uri()) { + // If not CONNECT, refuse any origin-form URIs + debug!("{} request with illegal URI: {:?}", req.method(), req.uri()); + return true; + } + + false +} + +/// Returns if the request target is in `origin-form`. +/// +/// This is `origin-form`: `example.com` +fn is_origin_form(uri: &http::uri::Uri) -> bool { + uri.scheme().is_none() && uri.path_and_query().is_none() +} + +/// Returns true if the given [Request][http::Request] is attempting an HTTP/1.1 upgrade. fn wants_upgrade(req: &http::Request) -> bool { - // HTTP upgrades were added in 1.1, not 1.0. + // Upgrades are specific to HTTP/1.1. They are not included in HTTP/1.0, nor are they supported + // in HTTP/2. If this request is associated with any protocol version besides HTTP/1.1, we can + // dismiss it immediately as not being applicable to an upgrade. if req.version() != http::Version::HTTP_11 { return false; } diff --git a/linkerd/http/variant/Cargo.toml b/linkerd/http/variant/Cargo.toml new file mode 100644 index 0000000000..8463c41a3d --- /dev/null +++ b/linkerd/http/variant/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "linkerd-http-variant" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +HTTP version types. +""" + +[dependencies] +http = { workspace = true } +thiserror = "2" diff --git a/linkerd/proxy/http/src/version.rs b/linkerd/http/variant/src/lib.rs similarity index 73% rename from linkerd/proxy/http/src/version.rs rename to linkerd/http/variant/src/lib.rs index c8a71b37d5..0bcbb65ca2 100644 --- a/linkerd/proxy/http/src/version.rs +++ b/linkerd/http/variant/src/lib.rs @@ -1,16 +1,24 @@ +//! HTTP version variants. +//! +//! See [`Variant`]. + use thiserror::Error; +/// HTTP protocol version. #[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub enum Version { +pub enum Variant { + /// HTTP/1 Http1, + /// HTTP/2 H2, } +/// An unsupported HTTP version error. #[derive(Debug, Error)] #[error("unsupported HTTP version {:?}", self.0)] pub struct Unsupported(http::Version); -impl std::convert::TryFrom for Version { +impl std::convert::TryFrom for Variant { type Error = Unsupported; fn try_from(v: http::Version) -> Result { match v { @@ -21,7 +29,7 @@ impl std::convert::TryFrom for Version { } } -impl std::fmt::Debug for Version { +impl std::fmt::Debug for Variant { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Http1 => write!(f, "HTTP/1"), diff --git a/linkerd/identity/Cargo.toml b/linkerd/identity/Cargo.toml index c053377b1b..5c3542bc63 100644 --- a/linkerd/identity/Cargo.toml +++ b/linkerd/identity/Cargo.toml @@ -1,16 +1,16 @@ [package] name = "linkerd-identity" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -prometheus-client = "0.22" -thiserror = "1" +prometheus-client = { workspace = true } +thiserror = "2" tracing = "0.1" -url = "2.5.0" +url = "2.5.4" linkerd-dns-name = { path = "../dns/name" } linkerd-metrics = { path = "../metrics" } diff --git a/linkerd/idle-cache/Cargo.toml b/linkerd/idle-cache/Cargo.toml index 9b014c610d..90fe48e494 100644 --- a/linkerd/idle-cache/Cargo.toml +++ b/linkerd/idle-cache/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-idle-cache" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] test-util = [] @@ -20,7 +20,7 @@ tokio = { version = "1", default-features = false, features = [ "sync", "time", ] } -tower = { version = "0.4", default-features = false, features = ["util"] } +tower = { workspace = true, default-features = false, features = ["util"] } tracing = "0.1" [dev-dependencies] diff --git a/linkerd/io/Cargo.toml b/linkerd/io/Cargo.toml index b255f40780..88f5c349d8 100644 --- a/linkerd/io/Cargo.toml +++ b/linkerd/io/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-io" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ General I/O primitives. """ @@ -15,7 +15,7 @@ default = [] [dependencies] async-trait = "0.1" futures = { version = "0.3", default-features = false } -bytes = "1" +bytes = { workspace = true } linkerd-errno = { path = "../errno" } tokio = { version = "1", features = ["io-util", "net"] } tokio-test = { version = "0.4", optional = true } diff --git a/linkerd/io/src/sensor.rs b/linkerd/io/src/sensor.rs index 7721bb6ac1..9175b2ab35 100644 --- a/linkerd/io/src/sensor.rs +++ b/linkerd/io/src/sensor.rs @@ -1,4 +1,4 @@ -use crate::{IoSlice, PeerAddr, Poll}; +use crate::{IoSlice, Peek, PeerAddr, Poll}; use futures::ready; use linkerd_errno::Errno; use pin_project::pin_project; @@ -82,3 +82,10 @@ impl PeerAddr for SensorIo { self.io.peer_addr() } } + +#[async_trait::async_trait] +impl Peek for SensorIo { + async fn peek(&self, buf: &mut [u8]) -> Result { + self.io.peek(buf).await + } +} diff --git a/linkerd/meshtls/Cargo.toml b/linkerd/meshtls/Cargo.toml index 7e67a36ac3..77582f4e1a 100644 --- a/linkerd/meshtls/Cargo.toml +++ b/linkerd/meshtls/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-meshtls" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } edition = "2018" -publish = false +publish = { workspace = true } [features] rustls = ["linkerd-meshtls-rustls", "__has_any_tls_impls"] @@ -29,7 +29,7 @@ linkerd-tls = { path = "../tls" } [dev-dependencies] tokio = { version = "1", features = ["macros", "net", "rt-multi-thread"] } tracing = "0.1" -rcgen = "0.12.0" +rcgen = "0.13.2" linkerd-conditional = { path = "../conditional" } linkerd-proxy-transport = { path = "../proxy/transport" } diff --git a/linkerd/meshtls/boring/Cargo.toml b/linkerd/meshtls/boring/Cargo.toml index ae928c0e90..71ee19c77c 100644 --- a/linkerd/meshtls/boring/Cargo.toml +++ b/linkerd/meshtls/boring/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "linkerd-meshtls-boring" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } edition = "2018" -publish = false +publish = { workspace = true } [dependencies] -boring = "3" +boring = "4" futures = { version = "0.3", default-features = false } -hex = "0.4" # used for debug logging +hex = "0.4" # used for debug logging linkerd-error = { path = "../../error" } linkerd-dns-name = { path = "../../dns/name" } linkerd-identity = { path = "../../identity" } @@ -19,7 +19,7 @@ linkerd-tls = { path = "../../tls" } linkerd-meshtls-verifier = { path = "../verifier" } tokio = { version = "1", features = ["macros", "sync"] } -tokio-boring = "3" +tokio-boring = "4" tracing = "0.1" [features] @@ -28,4 +28,3 @@ fips = ["boring/fips"] [dev-dependencies] linkerd-tls-test-util = { path = "../../tls/test-util" } linkerd-meshtls = { path = "../../meshtls" } - diff --git a/linkerd/meshtls/rustls/Cargo.toml b/linkerd/meshtls/rustls/Cargo.toml index 5a09ea31e4..2fbd673796 100644 --- a/linkerd/meshtls/rustls/Cargo.toml +++ b/linkerd/meshtls/rustls/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-meshtls-rustls" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } edition = "2018" -publish = false +publish = { workspace = true } [features] test-util = ["linkerd-tls-test-util"] @@ -12,11 +12,11 @@ test-util = ["linkerd-tls-test-util"] [dependencies] futures = { version = "0.3", default-features = false } ring = { version = "0.17", features = ["std"] } -rustls-pemfile = "1.0" -rustls-webpki = { version = "0.101.5", features = ["std"] } -thiserror = "1" +rustls-pemfile = "2.2" +rustls-webpki = { version = "0.103.1", features = ["std"] } +thiserror = "2" tokio = { version = "1", features = ["macros", "rt", "sync"] } -tokio-rustls = { version = "0.24", features = ["dangerous_configuration"] } +tokio-rustls = { workspace = true } tracing = "0.1" linkerd-dns-name = { path = "../../dns/name" } diff --git a/linkerd/meshtls/rustls/src/client.rs b/linkerd/meshtls/rustls/src/client.rs index 962135d42d..9856d38998 100644 --- a/linkerd/meshtls/rustls/src/client.rs +++ b/linkerd/meshtls/rustls/src/client.rs @@ -6,7 +6,7 @@ use linkerd_stack::{NewService, Service}; use linkerd_tls::{client::AlpnProtocols, ClientTls, NegotiatedProtocolRef}; use std::{convert::TryFrom, pin::Pin, sync::Arc, task::Context}; use tokio::sync::watch; -use tokio_rustls::rustls::{self, ClientConfig}; +use tokio_rustls::rustls::{self, pki_types::CertificateDer, ClientConfig}; /// A `NewService` that produces `Connect` services from a dynamic TLS configuration. #[derive(Clone)] @@ -18,7 +18,7 @@ pub struct NewClient { #[derive(Clone)] pub struct Connect { server_id: id::Id, - server_name: rustls::ServerName, + server_name: rustls::pki_types::ServerName<'static>, config: Arc, } @@ -68,8 +68,9 @@ impl Connect { } }; - let server_name = rustls::ServerName::try_from(client_tls.server_name.as_str()) - .expect("identity must be a valid DNS name"); + let server_name = + rustls::pki_types::ServerName::try_from(client_tls.server_name.to_string()) + .expect("identity must be a valid DNS name"); Self { server_id: client_tls.server_id.into(), @@ -79,7 +80,7 @@ impl Connect { } } -fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&rustls::Certificate> { +fn extract_cert(c: &rustls::ClientConnection) -> io::Result<&CertificateDer<'_>> { match c.peer_certificates().and_then(|certs| certs.first()) { Some(leaf_cert) => io::Result::Ok(leaf_cert), None => Err(io::Error::new(io::ErrorKind::Other, "missing tls end cert")), @@ -113,7 +114,7 @@ where let s = s?; let (_, conn) = s.get_ref(); let end_cert = extract_cert(conn)?; - verifier::verify_id(&end_cert.0, &server_id)?; + verifier::verify_id(end_cert, &server_id)?; Ok(ClientIo(s)) }), ) diff --git a/linkerd/meshtls/rustls/src/creds.rs b/linkerd/meshtls/rustls/src/creds.rs index cd8332534e..a61c3e0179 100644 --- a/linkerd/meshtls/rustls/src/creds.rs +++ b/linkerd/meshtls/rustls/src/creds.rs @@ -10,7 +10,7 @@ use ring::error::KeyRejected; use std::sync::Arc; use thiserror::Error; use tokio::sync::watch; -use tokio_rustls::rustls; +use tokio_rustls::rustls::{self, crypto::CryptoProvider}; use tracing::warn; #[derive(Debug, Error)] @@ -27,7 +27,9 @@ pub fn watch( roots_pem: &str, ) -> Result<(Store, Receiver)> { let mut roots = rustls::RootCertStore::empty(); - let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem)) { + let certs = match rustls_pemfile::certs(&mut std::io::Cursor::new(roots_pem)) + .collect::, _>>() + { Err(error) => { warn!(%error, "invalid trust anchors file"); return Err(error.into()); @@ -39,7 +41,7 @@ pub fn watch( Ok(certs) => certs, }; - let (added, skipped) = roots.add_parsable_certificates(&certs[..]); + let (added, skipped) = roots.add_parsable_certificates(certs); if skipped != 0 { warn!("Skipped {} invalid trust anchors", skipped); } @@ -88,6 +90,12 @@ pub fn watch( Ok((store, rx)) } +fn default_provider() -> CryptoProvider { + let mut provider = rustls::crypto::ring::default_provider(); + provider.cipher_suites = params::TLS_SUPPORTED_CIPHERSUITES.to_vec(); + provider +} + #[cfg(feature = "test-util")] pub fn for_test(ent: &linkerd_tls_test_util::Entity) -> (Store, Receiver) { watch( @@ -104,7 +112,7 @@ pub fn default_for_test() -> (Store, Receiver) { } mod params { - use tokio_rustls::rustls; + use tokio_rustls::rustls::{self, crypto::WebPkiSupportedAlgorithms}; // These must be kept in sync: pub static SIGNATURE_ALG_RING_SIGNING: &ring::signature::EcdsaSigningAlgorithm = @@ -113,7 +121,51 @@ mod params { rustls::SignatureScheme::ECDSA_NISTP256_SHA256; pub const SIGNATURE_ALG_RUSTLS_ALGORITHM: rustls::SignatureAlgorithm = rustls::SignatureAlgorithm::ECDSA; + // A subset of the algorithms supported by rustls+ring, imported from + // https://github.com/rustls/rustls/blob/v/0.23.21/rustls/src/crypto/ring/mod.rs#L107 + pub static SUPPORTED_SIG_ALGS: &WebPkiSupportedAlgorithms = &WebPkiSupportedAlgorithms { + all: &[ + webpki::ring::ECDSA_P256_SHA256, + webpki::ring::ECDSA_P256_SHA384, + webpki::ring::ECDSA_P384_SHA256, + webpki::ring::ECDSA_P384_SHA384, + webpki::ring::ED25519, + webpki::ring::RSA_PKCS1_2048_8192_SHA256, + webpki::ring::RSA_PKCS1_2048_8192_SHA384, + webpki::ring::RSA_PKCS1_2048_8192_SHA512, + webpki::ring::RSA_PKCS1_3072_8192_SHA384, + ], + mapping: &[ + ( + rustls::SignatureScheme::ECDSA_NISTP384_SHA384, + &[ + webpki::ring::ECDSA_P384_SHA384, + webpki::ring::ECDSA_P256_SHA384, + ], + ), + ( + rustls::SignatureScheme::ECDSA_NISTP256_SHA256, + &[ + webpki::ring::ECDSA_P256_SHA256, + webpki::ring::ECDSA_P384_SHA256, + ], + ), + (rustls::SignatureScheme::ED25519, &[webpki::ring::ED25519]), + ( + rustls::SignatureScheme::RSA_PKCS1_SHA512, + &[webpki::ring::RSA_PKCS1_2048_8192_SHA512], + ), + ( + rustls::SignatureScheme::RSA_PKCS1_SHA384, + &[webpki::ring::RSA_PKCS1_2048_8192_SHA384], + ), + ( + rustls::SignatureScheme::RSA_PKCS1_SHA256, + &[webpki::ring::RSA_PKCS1_2048_8192_SHA256], + ), + ], + }; pub static TLS_VERSIONS: &[&rustls::SupportedProtocolVersion] = &[&rustls::version::TLS13]; pub static TLS_SUPPORTED_CIPHERSUITES: &[rustls::SupportedCipherSuite] = - &[rustls::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]; + &[rustls::crypto::ring::cipher_suite::TLS13_CHACHA20_POLY1305_SHA256]; } diff --git a/linkerd/meshtls/rustls/src/creds/receiver.rs b/linkerd/meshtls/rustls/src/creds/receiver.rs index 1c06f87c1c..fd451fbef8 100644 --- a/linkerd/meshtls/rustls/src/creds/receiver.rs +++ b/linkerd/meshtls/rustls/src/creds/receiver.rs @@ -70,10 +70,13 @@ mod tests { /// incoming handshakes, but that doesn't matter for these tests, where we /// don't actually do any TLS. fn empty_server_config() -> rustls::ServerConfig { - rustls::ServerConfig::builder() - .with_safe_defaults() - .with_client_cert_verifier(Arc::new(rustls::server::NoClientAuth)) - .with_cert_resolver(Arc::new(rustls::server::ResolvesServerCertUsingSni::new())) + rustls::ServerConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_protocol_versions(rustls::ALL_VERSIONS) + .expect("client config must be valid") + .with_client_cert_verifier(Arc::new(rustls::server::NoClientAuth)) + .with_cert_resolver(Arc::new(rustls::server::ResolvesServerCertUsingSni::new())) } /// Returns the simplest default rustls client config. @@ -82,10 +85,13 @@ mod tests { /// it doesn't trust any root certificates. However, that doesn't actually /// matter for these tests, which don't actually do TLS. fn empty_client_config() -> rustls::ClientConfig { - rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(rustls::RootCertStore::empty()) - .with_no_client_auth() + rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_protocol_versions(rustls::ALL_VERSIONS) + .expect("client config must be valid") + .with_root_certificates(rustls::RootCertStore::empty()) + .with_no_client_auth() } #[tokio::test] diff --git a/linkerd/meshtls/rustls/src/creds/store.rs b/linkerd/meshtls/rustls/src/creds/store.rs index b0a692856e..ee1f686a4c 100644 --- a/linkerd/meshtls/rustls/src/creds/store.rs +++ b/linkerd/meshtls/rustls/src/creds/store.rs @@ -1,5 +1,4 @@ -use super::params::*; -use super::InvalidKey; +use super::{default_provider, params::*, InvalidKey}; use linkerd_dns_name as dns; use linkerd_error::Result; use linkerd_identity as id; @@ -7,12 +6,12 @@ use linkerd_meshtls_verifier as verifier; use ring::{rand, signature::EcdsaKeyPair}; use std::{convert::TryFrom, sync::Arc}; use tokio::sync::watch; -use tokio_rustls::rustls; +use tokio_rustls::rustls::{self, pki_types::UnixTime, server::WebPkiClientVerifier}; use tracing::debug; pub struct Store { roots: rustls::RootCertStore, - server_cert_verifier: Arc, + server_cert_verifier: Arc, server_id: id::Id, server_name: dns::Name, client_tx: watch::Sender>, @@ -20,18 +19,16 @@ pub struct Store { random: ring::rand::SystemRandom, } -#[derive(Clone)] +#[derive(Clone, Debug)] struct Key(Arc); -#[derive(Clone)] +#[derive(Clone, Debug)] struct CertResolver(Arc); pub(super) fn client_config_builder( - cert_verifier: Arc, + cert_verifier: Arc, ) -> rustls::ConfigBuilder { - rustls::ClientConfig::builder() - .with_cipher_suites(TLS_SUPPORTED_CIPHERSUITES) - .with_safe_default_kx_groups() + rustls::ClientConfig::builder_with_provider(Arc::new(default_provider())) .with_protocol_versions(TLS_VERSIONS) .expect("client config must be valid") // XXX: Rustls's built-in verifiers don't let us tweak things as fully @@ -44,6 +41,7 @@ pub(super) fn client_config_builder( // builder API does internally. However, we want to share the verifier // with the `Store` so that it can be used in `Store::validate` which // requires using this API. + .dangerous() .with_custom_certificate_verifier(cert_verifier) } @@ -57,12 +55,15 @@ pub(super) fn server_config( // controlling the set of trusted signature algorithms), but they provide good enough // defaults for now. // TODO: lock down the verification further. - let client_cert_verifier = Arc::new( - rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(roots), - ); - rustls::ServerConfig::builder() - .with_cipher_suites(TLS_SUPPORTED_CIPHERSUITES) - .with_safe_default_kx_groups() + let provider = Arc::new(default_provider()); + + let client_cert_verifier = + WebPkiClientVerifier::builder_with_provider(Arc::new(roots), provider.clone()) + .allow_unauthenticated() + .build() + .expect("server verifier must be valid"); + + rustls::ServerConfig::builder_with_provider(provider) .with_protocol_versions(TLS_VERSIONS) .expect("server config must be valid") .with_client_cert_verifier(client_cert_verifier) @@ -76,7 +77,7 @@ impl Store { #[allow(clippy::too_many_arguments)] pub(super) fn new( roots: rustls::RootCertStore, - server_cert_verifier: Arc, + server_cert_verifier: Arc, server_id: id::Id, server_name: dns::Name, client_tx: watch::Sender>, @@ -107,25 +108,23 @@ impl Store { /// Ensures the certificate is valid for the services we terminate for TLS. This assumes that /// server cert validation does the same or more validation than client cert validation. - fn validate(&self, certs: &[rustls::Certificate]) -> Result<()> { - let name = rustls::ServerName::try_from(self.server_name.as_str()) + fn validate(&self, certs: &[rustls::pki_types::CertificateDer<'_>]) -> Result<()> { + let name = rustls::pki_types::ServerName::try_from(self.server_name.as_str()) .expect("server name must be a valid DNS name"); static NO_OCSP: &[u8] = &[]; let end_entity = &certs[0]; let intermediates = &certs[1..]; - let no_scts = &mut std::iter::empty(); - let now = std::time::SystemTime::now(); + let now = UnixTime::now(); self.server_cert_verifier.verify_server_cert( end_entity, intermediates, &name, - no_scts, NO_OCSP, now, )?; // verify the id as the cert verifier does not do that (on purpose) - verifier::verify_id(&end_entity.0, &self.server_id).map_err(Into::into) + verifier::verify_id(end_entity, &self.server_id).map_err(Into::into) } } impl id::Credentials for Store { @@ -138,11 +137,11 @@ impl id::Credentials for Store { _expiry: std::time::SystemTime, ) -> Result<()> { let mut chain = Vec::with_capacity(intermediates.len() + 1); - chain.push(rustls::Certificate(leaf)); + chain.push(rustls::pki_types::CertificateDer::from(leaf)); chain.extend( intermediates .into_iter() - .map(|id::DerX509(der)| rustls::Certificate(der)), + .map(|id::DerX509(der)| rustls::pki_types::CertificateDer::from(der)), ); // Use the client's verifier to validate the certificate for our local name. diff --git a/linkerd/meshtls/rustls/src/creds/verify.rs b/linkerd/meshtls/rustls/src/creds/verify.rs index be7058bf57..42adeb75e2 100644 --- a/linkerd/meshtls/rustls/src/creds/verify.rs +++ b/linkerd/meshtls/rustls/src/creds/verify.rs @@ -1,26 +1,34 @@ -use std::convert::TryFrom; -use std::sync::Arc; -use std::time::SystemTime; +use crate::creds::params::SUPPORTED_SIG_ALGS; +use std::{convert::TryFrom, sync::Arc}; use tokio_rustls::rustls::{ self, - client::{self, ServerCertVerified, ServerCertVerifier}, + client::{ + self, + danger::{ServerCertVerified, ServerCertVerifier}, + }, + pki_types::{CertificateDer, ServerName, UnixTime}, server::ParsedCertificate, - Certificate, RootCertStore, ServerName, + RootCertStore, }; use tracing::trace; -pub(crate) struct AnySanVerifier(Arc); +#[derive(Debug)] +pub(crate) struct AnySanVerifier { + roots: Arc, +} impl AnySanVerifier { pub(crate) fn new(roots: impl Into>) -> Self { - Self(roots.into()) + Self { + roots: roots.into(), + } } } -// This is derived from `rustls::client::WebPkiVerifier`. +// This is derived from `rustls::client::WebPkiServerVerifier`. // // Copyright (c) 2016, Joseph Birr-Pixton -// https://github.com/rustls/rustls/blob/ccb79947a4811412ee7dcddcd0f51ea56bccf101/rustls/src/webpki/server_verifier.rs#L239 +// https://github.com/rustls/rustls/blob/v/0.23.15/rustls/src/webpki/server_verifier.rs#L134 // // The only difference is that we omit the step that performs // DNS SAN validation. The reason for that stems from the fact that @@ -32,16 +40,21 @@ impl ServerCertVerifier for AnySanVerifier { /// - Not Expired fn verify_server_cert( &self, - end_entity: &Certificate, - intermediates: &[Certificate], - _: &ServerName, - _: &mut dyn Iterator, + end_entity: &CertificateDer<'_>, + intermediates: &[CertificateDer<'_>], + _: &ServerName<'_>, ocsp_response: &[u8], - now: SystemTime, + now: UnixTime, ) -> Result { let cert = ParsedCertificate::try_from(end_entity)?; - client::verify_server_cert_signed_by_trust_anchor(&cert, &self.0, intermediates, now)?; + client::verify_server_cert_signed_by_trust_anchor( + &cert, + &self.roots, + intermediates, + now, + SUPPORTED_SIG_ALGS.all, + )?; if !ocsp_response.is_empty() { trace!("Unvalidated OCSP response: {ocsp_response:?}"); @@ -49,4 +62,26 @@ impl ServerCertVerifier for AnySanVerifier { Ok(ServerCertVerified::assertion()) } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + tokio_rustls::rustls::crypto::verify_tls12_signature(message, cert, dss, SUPPORTED_SIG_ALGS) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &rustls::DigitallySignedStruct, + ) -> Result { + tokio_rustls::rustls::crypto::verify_tls13_signature(message, cert, dss, SUPPORTED_SIG_ALGS) + } + + fn supported_verify_schemes(&self) -> Vec { + SUPPORTED_SIG_ALGS.supported_schemes() + } } diff --git a/linkerd/meshtls/rustls/src/server.rs b/linkerd/meshtls/rustls/src/server.rs index cac87589f7..844830e1ee 100644 --- a/linkerd/meshtls/rustls/src/server.rs +++ b/linkerd/meshtls/rustls/src/server.rs @@ -7,7 +7,7 @@ use linkerd_tls::{ClientId, NegotiatedProtocol, NegotiatedProtocolRef, ServerNam use std::{pin::Pin, sync::Arc, task::Context}; use thiserror::Error; use tokio::sync::watch; -use tokio_rustls::rustls::{Certificate, ServerConfig}; +use tokio_rustls::rustls::{pki_types::CertificateDer, ServerConfig}; use tracing::debug; /// A Service that terminates TLS connections using a dynamically updated server configuration. @@ -129,7 +129,7 @@ where fn client_identity(tls: &tokio_rustls::server::TlsStream) -> Option { let (_io, session) = tls.get_ref(); let certs = session.peer_certificates()?; - let c = certs.first().map(Certificate::as_ref)?; + let c = certs.first().map(CertificateDer::as_ref)?; verifier::client_identity(c).map(ClientId) } diff --git a/linkerd/meshtls/tests/util.rs b/linkerd/meshtls/tests/util.rs index c294ecfecc..2060cc4a34 100644 --- a/linkerd/meshtls/tests/util.rs +++ b/linkerd/meshtls/tests/util.rs @@ -11,14 +11,14 @@ use linkerd_meshtls as meshtls; use linkerd_proxy_transport::{ addrs::*, listen::{Addrs, Bind, BindTcp}, - ConnectTcp, Keepalive, + ConnectTcp, Keepalive, UserTimeout, }; use linkerd_stack::{ layer::Layer, service_fn, ExtractParam, InsertParam, NewService, Param, ServiceExt, }; use linkerd_tls as tls; use linkerd_tls_test_util as test_util; -use rcgen::{BasicConstraints, Certificate, CertificateParams, IsCa, SanType}; +use rcgen::{BasicConstraints, CertificateParams, IsCa, KeyPair, SanType}; use std::str::FromStr; use std::{ net::SocketAddr, @@ -29,20 +29,25 @@ use tokio::net::TcpStream; use tracing::Instrument; fn generate_cert_with_name(subject_alt_names: Vec) -> (Vec, Vec, String) { + let root_key = KeyPair::generate().unwrap(); let mut root_params = CertificateParams::default(); root_params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained); - let root_cert = Certificate::from_params(root_params).expect("should generate root"); + let root_cert = root_params + .self_signed(&root_key) + .expect("should generate root"); + + let issuer_key = KeyPair::generate().unwrap(); let mut params = CertificateParams::default(); params.subject_alt_names = subject_alt_names; - - let cert = Certificate::from_params(params).expect("should generate cert"); + let cert = params + .signed_by(&issuer_key, &root_cert, &root_key) + .expect("should generate cert"); ( - cert.serialize_der_with_signer(&root_cert) - .expect("should serialize"), - cert.serialize_private_key_der(), - root_cert.serialize_pem().expect("should serialize"), + cert.der().to_vec(), + issuer_key.serialize_der(), + root_cert.pem(), ) } @@ -51,7 +56,7 @@ pub fn fails_processing_cert_when_wrong_id_configured(mode: meshtls::Mode) { let id = Id::Dns(server_name.clone()); let (cert, key, roots) = - generate_cert_with_name(vec![SanType::URI("spiffe://system/local".into())]); + generate_cert_with_name(vec![SanType::URI("spiffe://system/local".parse().unwrap())]); let (mut store, _) = mode .watch(id, server_name.clone(), &roots) .expect("should construct"); @@ -283,7 +288,7 @@ where let tls = Some(client_server_id.clone()); let client = async move { let conn = tls::Client::layer(client_tls) - .layer(ConnectTcp::new(Keepalive(None))) + .layer(ConnectTcp::new(Keepalive(None), UserTimeout(None))) .oneshot(Target(server_addr.into(), client_server_id)) .await; match conn { @@ -406,6 +411,11 @@ impl Param for Server { Keepalive(None) } } +impl Param for Server { + fn param(&self) -> UserTimeout { + UserTimeout(None) + } +} // === impl ServerParams === diff --git a/linkerd/meshtls/verifier/Cargo.toml b/linkerd/meshtls/verifier/Cargo.toml index f476eaf0c3..602c66f02a 100644 --- a/linkerd/meshtls/verifier/Cargo.toml +++ b/linkerd/meshtls/verifier/Cargo.toml @@ -1,18 +1,18 @@ [package] name = "linkerd-meshtls-verifier" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] tracing = "0.1" -x509-parser = "0.16.0" +x509-parser = "0.17.0" linkerd-error = { path = "../../error" } linkerd-identity = { path = "../../identity" } [dev-dependencies] -rcgen = "0.12.0" +rcgen = "0.13.2" diff --git a/linkerd/meshtls/verifier/src/lib.rs b/linkerd/meshtls/verifier/src/lib.rs index e2d55392ab..c7fce564e4 100644 --- a/linkerd/meshtls/verifier/src/lib.rs +++ b/linkerd/meshtls/verifier/src/lib.rs @@ -61,22 +61,24 @@ mod tests { use crate::client_identity; use crate::verify_id; use linkerd_identity::Id; - use rcgen::{Certificate, CertificateParams, SanType}; + use rcgen::{CertificateParams, KeyPair, SanType}; fn generate_cert_with_names(subject_alt_names: Vec) -> Vec { + let key = KeyPair::generate().expect("should generate key"); let mut params = CertificateParams::default(); params.subject_alt_names = subject_alt_names; - Certificate::from_params(params) + params + .self_signed(&key) .expect("should generate cert") - .serialize_der() - .expect("should serialize") + .der() + .to_vec() } #[test] pub fn cert_with_dns_san_matches_dns_id() { let dns_name = "foo.ns1.serviceaccount.identity.linkerd.cluster.local"; - let cert = generate_cert_with_names(vec![SanType::DnsName(dns_name.into())]); + let cert = generate_cert_with_names(vec![SanType::DnsName(dns_name.parse().unwrap())]); let id = Id::parse_dns_name(dns_name).expect("should parse DNS id"); assert!(verify_id(&cert, &id).is_ok()); } @@ -84,7 +86,7 @@ mod tests { #[test] fn cert_with_spiffe_san_matches_spiffe_id() { let spiffe_uri = "spiffe://identity.linkerd.cluster.local/ns/ns1/sa/foo"; - let cert = generate_cert_with_names(vec![SanType::URI(spiffe_uri.into())]); + let cert = generate_cert_with_names(vec![SanType::URI(spiffe_uri.parse().unwrap())]); let id = Id::parse_uri(spiffe_uri).expect("should parse SPIFFE id"); assert!(verify_id(&cert, &id).is_ok()); } @@ -92,7 +94,9 @@ mod tests { #[test] pub fn cert_with_dns_san_does_not_match_dns_id() { let dns_name_cert = vec![SanType::DnsName( - "foo.ns1.serviceaccount.identity.linkerd.cluster.local".into(), + "foo.ns1.serviceaccount.identity.linkerd.cluster.local" + .parse() + .unwrap(), )]; let dns_name = "bar.ns1.serviceaccount.identity.linkerd.cluster.local"; @@ -104,7 +108,9 @@ mod tests { #[test] fn cert_with_dns_san_does_not_match_spiffe_id() { let dns_name_cert = vec![SanType::DnsName( - "bar.ns1.serviceaccount.identity.linkerd.cluster.local".into(), + "bar.ns1.serviceaccount.identity.linkerd.cluster.local" + .parse() + .unwrap(), )]; let spiffe_uri = "spiffe://some-trust-comain/some-system/some-component"; @@ -136,9 +142,9 @@ mod tests { let spiffe_id = "spiffe://some-trust-comain/some-system/some-component"; let cert = generate_cert_with_names(vec![ - SanType::DnsName(foo_dns_id.into()), - SanType::DnsName(bar_dns_id.into()), - SanType::URI(spiffe_id.into()), + SanType::DnsName(foo_dns_id.parse().unwrap()), + SanType::DnsName(bar_dns_id.parse().unwrap()), + SanType::URI(spiffe_id.parse().unwrap()), ]); let id = Id::parse_dns_name(foo_dns_id).expect("should parse DNS id"); assert!(verify_id(&cert, &id).is_ok()); @@ -151,9 +157,9 @@ mod tests { let spiffe_id = "spiffe://some-trust-comain/some-system/some-component"; let cert = generate_cert_with_names(vec![ - SanType::DnsName(foo_dns_id.into()), - SanType::DnsName(bar_dns_id.into()), - SanType::URI(spiffe_id.into()), + SanType::DnsName(foo_dns_id.parse().unwrap()), + SanType::DnsName(bar_dns_id.parse().unwrap()), + SanType::URI(spiffe_id.parse().unwrap()), ]); let id = Id::parse_uri(spiffe_id).expect("should parse SPIFFE id"); assert!(verify_id(&cert, &id).is_ok()); @@ -167,9 +173,9 @@ mod tests { let spiffe_id = "spiffe://some-trust-comain/some-system/some-component"; let cert = generate_cert_with_names(vec![ - SanType::DnsName(foo_dns_id.into()), - SanType::DnsName(bar_dns_id.into()), - SanType::URI(spiffe_id.into()), + SanType::DnsName(foo_dns_id.parse().unwrap()), + SanType::DnsName(bar_dns_id.parse().unwrap()), + SanType::URI(spiffe_id.parse().unwrap()), ]); let id = Id::parse_dns_name(nar_dns_id).expect("should parse DNS id"); assert!(verify_id(&cert, &id).is_err()); @@ -183,9 +189,9 @@ mod tests { let spiffe_id = "spiffe://some-trust-comain/some-system/some-component"; let cert = generate_cert_with_names(vec![ - SanType::DnsName(foo_dns_id.into()), - SanType::DnsName(bar_dns_id.into()), - SanType::DnsName(nar_dns_id.into()), + SanType::DnsName(foo_dns_id.parse().unwrap()), + SanType::DnsName(bar_dns_id.parse().unwrap()), + SanType::DnsName(nar_dns_id.parse().unwrap()), ]); let id = Id::parse_uri(spiffe_id).expect("should parse SPIFFE id"); assert!(verify_id(&cert, &id).is_err()); @@ -195,7 +201,7 @@ mod tests { fn can_extract_spiffe_client_identity_one_san() { let spiffe_id = "spiffe://some-trust-comain/some-system/some-component"; - let cert = generate_cert_with_names(vec![SanType::URI(spiffe_id.into())]); + let cert = generate_cert_with_names(vec![SanType::URI(spiffe_id.parse().unwrap())]); let id = Id::parse_uri(spiffe_id).expect("should parse SPIFFE id"); let client_id = client_identity(&cert); assert_eq!(client_id, Some(id)); @@ -208,9 +214,9 @@ mod tests { let nar_dns_id = "nar.ns1.serviceaccount.identity.linkerd.cluster.local"; let cert = generate_cert_with_names(vec![ - SanType::URI(spiffe_id.into()), - SanType::DnsName(bar_dns_id.into()), - SanType::DnsName(nar_dns_id.into()), + SanType::URI(spiffe_id.parse().unwrap()), + SanType::DnsName(bar_dns_id.parse().unwrap()), + SanType::DnsName(nar_dns_id.parse().unwrap()), ]); let id = Id::parse_uri(spiffe_id).expect("should parse SPIFFE id"); let client_id = client_identity(&cert); @@ -221,7 +227,7 @@ mod tests { fn can_extract_dns_client_identity_one_san() { let dns_id = "foo.ns1.serviceaccount.identity.linkerd.cluster.local"; - let cert = generate_cert_with_names(vec![SanType::DnsName(dns_id.into())]); + let cert = generate_cert_with_names(vec![SanType::DnsName(dns_id.parse().unwrap())]); let id = Id::parse_dns_name(dns_id).expect("should parse DNS id"); let client_id = client_identity(&cert); assert_eq!(client_id, Some(id)); @@ -235,10 +241,10 @@ mod tests { let spiffe_id = "spiffe://some-trust-comain/some-system/some-component"; let cert = generate_cert_with_names(vec![ - SanType::DnsName(dns_id.into()), - SanType::DnsName(bar_dns_id.into()), - SanType::DnsName(nar_dns_id.into()), - SanType::URI(spiffe_id.into()), + SanType::DnsName(dns_id.parse().unwrap()), + SanType::DnsName(bar_dns_id.parse().unwrap()), + SanType::DnsName(nar_dns_id.parse().unwrap()), + SanType::URI(spiffe_id.parse().unwrap()), ]); let id = Id::parse_dns_name(dns_id).expect("should parse DNS id"); let client_id = client_identity(&cert); @@ -252,9 +258,9 @@ mod tests { let email_san_2 = "bar@bar.com"; let cert = generate_cert_with_names(vec![ - SanType::DnsName(dns_id.into()), - SanType::Rfc822Name(email_san_1.into()), - SanType::Rfc822Name(email_san_2.into()), + SanType::DnsName(dns_id.parse().unwrap()), + SanType::Rfc822Name(email_san_1.parse().unwrap()), + SanType::Rfc822Name(email_san_2.parse().unwrap()), ]); let id = Id::parse_dns_name(dns_id).expect("should parse DNS id"); let client_id = client_identity(&cert); @@ -268,9 +274,9 @@ mod tests { let email_san_2 = "bar@bar.com"; let cert = generate_cert_with_names(vec![ - SanType::URI(spiffe_id.into()), - SanType::Rfc822Name(email_san_1.into()), - SanType::Rfc822Name(email_san_2.into()), + SanType::URI(spiffe_id.parse().unwrap()), + SanType::Rfc822Name(email_san_1.parse().unwrap()), + SanType::Rfc822Name(email_san_2.parse().unwrap()), ]); let id = Id::parse_uri(spiffe_id).expect("should parse SPIFFE id"); let client_id = client_identity(&cert); @@ -281,7 +287,7 @@ mod tests { fn skips_dns_san_with_trailing_dot() { let dns_id = "foo.ns1.serviceaccount.identity.linkerd.cluster.local."; - let cert = generate_cert_with_names(vec![SanType::DnsName(dns_id.into())]); + let cert = generate_cert_with_names(vec![SanType::DnsName(dns_id.parse().unwrap())]); let client_id = client_identity(&cert); assert_eq!(client_id, None); } diff --git a/linkerd/metrics/Cargo.toml b/linkerd/metrics/Cargo.toml index 5e9d40ce19..3b3a646ce3 100644 --- a/linkerd/metrics/Cargo.toml +++ b/linkerd/metrics/Cargo.toml @@ -1,28 +1,33 @@ [package] name = "linkerd-metrics" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] default = [] -process = ["linkerd-system"] -stack = ["linkerd-stack"] +process = ["dep:kubert-prometheus-process"] +stack = ["dep:linkerd-stack"] test_util = [] [dependencies] +bytes = { workspace = true } deflate = { version = "1", features = ["gzip"] } -http = "0.2" -hyper = { version = "0.14", features = ["http1", "http2"] } -linkerd-stack = { path = "../stack", optional = true } -linkerd-system = { path = "../system", optional = true } +http = { workspace = true } +http-body = { workspace = true } +http-body-util = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } +kubert-prometheus-process = { version = "0.2", optional = true } parking_lot = "0.12" -prometheus-client = "0.22" +prometheus-client = { workspace = true } tokio = { version = "1", features = ["time"] } tracing = "0.1" +linkerd-http-box = { path = "../http/box" } +linkerd-stack = { path = "../stack", optional = true } + [dev-dependencies] quickcheck = { version = "1", default-features = false } tokio = { version = "1", features = ["rt", "macros", "test-util", "time"] } diff --git a/linkerd/metrics/src/lib.rs b/linkerd/metrics/src/lib.rs index ce48077dd7..111ae1ee85 100644 --- a/linkerd/metrics/src/lib.rs +++ b/linkerd/metrics/src/lib.rs @@ -8,14 +8,15 @@ mod fmt; mod gauge; mod histogram; pub mod latency; -#[cfg(feature = "linkerd-stack")] +#[cfg(feature = "stack")] mod new_metrics; -#[cfg(feature = "process")] -pub mod process; mod serve; mod store; -#[cfg(feature = "linkerd-stack")] +#[cfg(feature = "process")] +pub use kubert_prometheus_process as process; + +#[cfg(feature = "stack")] pub use self::new_metrics::NewMetrics; pub use self::{ counter::Counter, @@ -36,7 +37,7 @@ pub mod prom { metrics::{ counter::{ConstCounter, Counter}, family::Family, - gauge::{ConstGauge, Gauge}, + gauge::{Atomic as GaugeAtomic, ConstGauge, Gauge}, histogram::Histogram, info::Info, }, @@ -85,7 +86,12 @@ pub trait Factor { const MAX_PRECISE_UINT64: u64 = 0x20_0000_0000_0000; impl Factor for () { + #[inline] fn factor(n: u64) -> f64 { - n.wrapping_rem(MAX_PRECISE_UINT64 + 1) as f64 + to_f64(n) } } + +pub fn to_f64(n: u64) -> f64 { + n.wrapping_rem(MAX_PRECISE_UINT64 + 1) as f64 +} diff --git a/linkerd/metrics/src/new_metrics.rs b/linkerd/metrics/src/new_metrics.rs index 4be95dbb83..8f4f4c6c8a 100644 --- a/linkerd/metrics/src/new_metrics.rs +++ b/linkerd/metrics/src/new_metrics.rs @@ -7,67 +7,86 @@ use std::{fmt, hash::Hash, marker::PhantomData, sync::Arc}; /// Wraps an `N`-typed inner `NewService`, extracting `K`-typed label from each target. The label /// scope is used to procure an `M`-typed sensor that is used to actually record metrics. The new /// service uses the inner service and the `M`-typed sensor to construct a new `S`-typed service. -pub struct NewMetrics { +pub struct NewMetrics { store: SharedStore, inner: N, + params: X, _svc: PhantomData S>, } -impl NewMetrics +impl NewMetrics where K: Hash + Eq, + X: Clone, { - pub fn layer(store: SharedStore) -> impl svc::layer::Layer + Clone { + pub fn layer_via( + store: SharedStore, + params: X, + ) -> impl svc::layer::Layer + Clone { svc::layer::mk(move |inner| Self { store: store.clone(), inner, + params: params.clone(), _svc: PhantomData, }) } } -impl svc::NewService for NewMetrics +impl NewMetrics +where + K: Hash + Eq, +{ + pub fn layer(store: SharedStore) -> impl svc::layer::Layer + Clone { + Self::layer_via(store, ()) + } +} + +impl svc::NewService for NewMetrics where - T: svc::Param, N: svc::NewService, S: From<(N::Service, Arc)>, M: Default, K: Hash + Eq, + X: svc::ExtractParam, { type Service = S; fn new_service(&self, target: T) -> Self::Service { - let key = target.param(); + let key = self.params.extract_param(&target); let inner = self.inner.new_service(target); let metric = self.store.lock().get_or_default(key).clone(); S::from((inner, metric)) } } -impl Clone for NewMetrics +impl Clone for NewMetrics where N: Clone, K: Hash + Eq, + X: Clone, { fn clone(&self) -> Self { Self { store: self.store.clone(), inner: self.inner.clone(), + params: self.params.clone(), _svc: PhantomData, } } } -impl fmt::Debug for NewMetrics +impl fmt::Debug for NewMetrics where N: fmt::Debug, K: Hash + Eq + fmt::Debug, + X: fmt::Debug, M: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use std::any::type_name; f.debug_struct(type_name::()) .field("store", &self.store) + .field("params", &self.params) .field("inner", &self.inner) .field("svc", &format_args!("PhantomData<{}>", type_name::())) .finish() diff --git a/linkerd/metrics/src/process.rs b/linkerd/metrics/src/process.rs deleted file mode 100644 index 04fbe3bf36..0000000000 --- a/linkerd/metrics/src/process.rs +++ /dev/null @@ -1,186 +0,0 @@ -use crate::prom::{self, encoding::EncodeMetric}; -use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::time::Instant; - -pub fn register(reg: &mut prom::Registry) { - let start_time = Instant::now(); - let start_time_from_epoch = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("process start time"); - - reg.register_with_unit( - "start_time", - "Time that the process started (in seconds since the UNIX epoch)", - prom::Unit::Seconds, - prom::ConstGauge::new(start_time_from_epoch.as_secs_f64()), - ); - - reg.register_collector(Box::new(ProcessCollector { - start_time, - #[cfg(target_os = "linux")] - system: linux::System::new(), - })); - - tracing::debug!("Process metrics registered"); - - #[cfg(not(target_os = "linux"))] - tracing::debug!("System-level process metrics are only supported on Linux"); -} - -#[derive(Debug)] -struct ProcessCollector { - start_time: Instant, - #[cfg(target_os = "linux")] - system: linux::System, -} - -impl prom::collector::Collector for ProcessCollector { - fn encode(&self, mut encoder: prom::encoding::DescriptorEncoder<'_>) -> std::fmt::Result { - let uptime = prom::ConstCounter::new( - Instant::now() - .saturating_duration_since(self.start_time) - .as_secs_f64(), - ); - let ue = encoder.encode_descriptor( - "uptime", - "Total time since the process started (in seconds)", - Some(&prom::Unit::Seconds), - prom::metrics::MetricType::Counter, - )?; - uptime.encode(ue)?; - - #[cfg(target_os = "linux")] - self.system.encode(encoder)?; - - Ok(()) - } -} - -#[cfg(target_os = "linux")] -mod linux { - use crate::prom::{self, encoding::EncodeMetric}; - use linkerd_system as sys; - use tokio::time::Duration; - - #[derive(Clone, Debug)] - pub(super) struct System { - page_size: Option, - ms_per_tick: Option, - } - - impl System { - pub fn new() -> Self { - let page_size = match sys::page_size() { - Ok(ps) => Some(ps), - Err(err) => { - tracing::debug!("Failed to load page size: {}", err); - None - } - }; - let ms_per_tick = match sys::ms_per_tick() { - Ok(mpt) => Some(mpt), - Err(err) => { - tracing::debug!("Failed to load cpu clock speed: {}", err); - None - } - }; - - Self { - page_size, - ms_per_tick, - } - } - } - - impl prom::collector::Collector for System { - fn encode(&self, mut encoder: prom::encoding::DescriptorEncoder<'_>) -> std::fmt::Result { - let stat = match sys::blocking_stat() { - Ok(stat) => stat, - Err(error) => { - tracing::warn!(%error, "Failed to read process stats"); - return Ok(()); - } - }; - - if let Some(mpt) = self.ms_per_tick { - let clock_ticks = stat.utime + stat.stime; - let cpu = - prom::ConstCounter::new(Duration::from_millis(clock_ticks * mpt).as_secs_f64()); - let cpue = encoder.encode_descriptor( - "cpu", - "Total user and system CPU time spent in seconds", - Some(&prom::Unit::Seconds), - prom::metrics::MetricType::Counter, - )?; - cpu.encode(cpue)?; - } else { - tracing::debug!("Could not determine CPU usage"); - } - - let vm_bytes = prom::ConstGauge::new(stat.vsize as i64); - let vme = encoder.encode_descriptor( - "virtual_memory", - "Virtual memory size in bytes", - Some(&prom::Unit::Bytes), - prom::metrics::MetricType::Gauge, - )?; - vm_bytes.encode(vme)?; - - if let Some(ps) = self.page_size { - let rss_bytes = prom::ConstGauge::new((stat.rss * ps) as i64); - let rsse = encoder.encode_descriptor( - "resident_memory", - "Resident memory size in bytes", - Some(&prom::Unit::Bytes), - prom::metrics::MetricType::Gauge, - )?; - rss_bytes.encode(rsse)?; - } else { - tracing::debug!("Could not determine RSS"); - } - - match sys::open_fds(stat.pid) { - Ok(open_fds) => { - let fds = prom::ConstGauge::new(open_fds as i64); - let fdse = encoder.encode_descriptor( - "open_fds", - "Number of open file descriptors", - None, - prom::metrics::MetricType::Gauge, - )?; - fds.encode(fdse)?; - } - Err(error) => { - tracing::warn!(%error, "Could not determine open fds"); - } - } - - match sys::max_fds() { - Ok(max_fds) => { - let fds = prom::ConstGauge::new(max_fds as i64); - let fdse = encoder.encode_descriptor( - "max_fds", - "Maximum number of open file descriptors", - None, - prom::metrics::MetricType::Gauge, - )?; - fds.encode(fdse)?; - } - Err(error) => { - tracing::warn!(%error, "Could not determine max fds"); - } - } - - let threads = prom::ConstGauge::new(stat.num_threads); - let te = encoder.encode_descriptor( - "threads", - "Number of OS threads in the process.", - None, - prom::metrics::MetricType::Gauge, - )?; - threads.encode(te)?; - - Ok(()) - } - } -} diff --git a/linkerd/metrics/src/serve.rs b/linkerd/metrics/src/serve.rs index 96edfb6a03..6993a89e3f 100644 --- a/linkerd/metrics/src/serve.rs +++ b/linkerd/metrics/src/serve.rs @@ -1,5 +1,6 @@ +use bytes::Bytes; use deflate::{write::GzEncoder, CompressionOptions}; -use hyper::Body; +use linkerd_http_box::BoxBody; use std::io::Write; use tracing::trace; @@ -33,7 +34,7 @@ impl Serve { } impl Serve { - pub fn serve(&self, req: http::Request) -> std::io::Result> { + pub fn serve(&self, req: http::Request) -> std::io::Result> { if Self::is_gzip(&req) { trace!("gzipping metrics"); let mut writer = GzEncoder::new(Vec::::new(), CompressionOptions::fast()); @@ -41,14 +42,18 @@ impl Serve { Ok(http::Response::builder() .header(http::header::CONTENT_ENCODING, "gzip") .header(http::header::CONTENT_TYPE, "text/plain") - .body(writer.finish()?.into()) + .body(BoxBody::new(http_body_util::Full::::from( + writer.finish().map(Bytes::from)?, + ))) .expect("Response must be valid")) } else { let mut writer = Vec::::new(); write!(&mut writer, "{}", self.metrics.as_display())?; Ok(http::Response::builder() .header(http::header::CONTENT_TYPE, "text/plain") - .body(Body::from(writer)) + .body(BoxBody::new(http_body_util::Full::::from( + Bytes::from(writer), + ))) .expect("Response must be valid")) } } diff --git a/linkerd/mock/http-body/Cargo.toml b/linkerd/mock/http-body/Cargo.toml new file mode 100644 index 0000000000..c25abd041a --- /dev/null +++ b/linkerd/mock/http-body/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "linkerd-mock-http-body" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +Mock `http_body::Body` facilities for use in tests. +""" + +[dependencies] +bytes = { workspace = true } +http = { workspace = true } +http-body = { workspace = true } +linkerd-error = { path = "../../error" } +tokio = { version = "1", default-features = false, features = ["rt"] } diff --git a/linkerd/mock/http-body/src/lib.rs b/linkerd/mock/http-body/src/lib.rs new file mode 100644 index 0000000000..c052641a47 --- /dev/null +++ b/linkerd/mock/http-body/src/lib.rs @@ -0,0 +1,84 @@ +//! Mock [`http_body::Body`] facilities for use in tests. +//! +//! See [`MockBody`] for more information. + +use bytes::Bytes; +use http_body::{Body, Frame}; +use linkerd_error::Error; +use std::{ + collections::VecDeque, + pin::Pin, + task::{Context, Poll}, +}; + +/// A "mock" body. +/// +/// This type contains polling results for [`Body`]. +#[derive(Default)] +pub struct MockBody { + data_polls: VecDeque>>>, + trailer_polls: VecDeque>>>, +} + +// === impl MockBody === + +impl MockBody { + /// Appends a poll outcome for [`Body::poll_frame()`]. + pub fn then_yield_data(mut self, poll: Poll>>) -> Self { + self.data_polls.push_back(poll); + self + } + + /// Appends a [`Poll`] outcome for [`Body::poll_frame()`]. + /// + /// These this will be yielded after data has been polled. + pub fn then_yield_trailer( + mut self, + poll: Poll>>, + ) -> Self { + self.trailer_polls.push_back(poll); + self + } + + /// Schedules a task to be awoken. + fn schedule(cx: &Context<'_>) { + let waker = cx.waker().clone(); + tokio::spawn(async move { + waker.wake(); + }); + } +} + +impl Body for MockBody { + type Data = Bytes; + type Error = Error; + + fn poll_frame( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>>> { + let Self { + data_polls, + trailer_polls, + } = self.get_mut(); + + let poll = { + let mut next_data = || data_polls.pop_front().map(|p| p.map_ok(Frame::data)); + let next_trailer = || trailer_polls.pop_front().map(|p| p.map_ok(Frame::trailers)); + next_data() + .or_else(next_trailer) + .unwrap_or(Poll::Ready(None)) + }; + + // If we return `Poll::Pending`, we must schedule the task to be awoken. + if poll.is_pending() { + Self::schedule(cx); + } + + poll + } + + fn is_end_stream(&self) -> bool { + self.data_polls.is_empty() && self.trailer_polls.is_empty() + } +} diff --git a/linkerd/opaq-route/Cargo.toml b/linkerd/opaq-route/Cargo.toml new file mode 100644 index 0000000000..918c66d9be --- /dev/null +++ b/linkerd/opaq-route/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "linkerd-opaq-route" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } diff --git a/linkerd/opaq-route/src/lib.rs b/linkerd/opaq-route/src/lib.rs new file mode 100644 index 0000000000..de69767672 --- /dev/null +++ b/linkerd/opaq-route/src/lib.rs @@ -0,0 +1,18 @@ +//! An TCP route matching library for Linkerd to support the TCPRoute +//! Kubernetes Gateway API types. + +#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] +#![forbid(unsafe_code)] + +/// Groups routing rules under a common set of SNIs. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct Route

{ + /// Must not be empty. + pub policy: P, +} + +/// Policies for a given set of route matches. +#[derive(Clone, Debug, Default, Hash, PartialEq, Eq)] +pub struct Rule

{ + pub policy: P, +} diff --git a/linkerd/opencensus/Cargo.toml b/linkerd/opencensus/Cargo.toml index 65d1b45eed..5e388dd2b7 100644 --- a/linkerd/opencensus/Cargo.toml +++ b/linkerd/opencensus/Cargo.toml @@ -1,19 +1,19 @@ [package] name = "linkerd-opencensus" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } -http = "0.2" -http-body = "0.4" +http-body = { workspace = true } linkerd-error = { path = "../error" } linkerd-metrics = { path = "../metrics" } +linkerd-trace-context = { path = "../trace-context" } opencensus-proto = { path = "../../opencensus-proto" } -tonic = { version = "0.10", default-features = false, features = [ +tonic = { workspace = true, default-features = false, features = [ "prost", "codegen", ] } diff --git a/linkerd/opencensus/src/lib.rs b/linkerd/opencensus/src/lib.rs index dd7d0a156c..68ac4b6451 100644 --- a/linkerd/opencensus/src/lib.rs +++ b/linkerd/opencensus/src/lib.rs @@ -3,28 +3,32 @@ pub mod metrics; +use self::metrics::Registry; use futures::stream::{Stream, StreamExt}; -use http_body::Body as HttpBody; +use http_body::Body; use linkerd_error::Error; -use metrics::Registry; +use linkerd_trace_context::export::{ExportSpan, SpanKind}; pub use opencensus_proto as proto; -use opencensus_proto::agent::common::v1::Node; -use opencensus_proto::agent::trace::v1::{ - trace_service_client::TraceServiceClient, ExportTraceServiceRequest, +use opencensus_proto::{ + agent::{ + common::v1::Node, + trace::v1::{trace_service_client::TraceServiceClient, ExportTraceServiceRequest}, + }, + trace::v1::{Span, TruncatableString}, }; -use opencensus_proto::trace::v1::Span; +use std::collections::HashMap; use tokio::{sync::mpsc, time}; use tokio_stream::wrappers::ReceiverStream; use tonic::{self as grpc, body::BoxBody, client::GrpcService}; -use tracing::{debug, trace}; +use tracing::{debug, info, trace}; pub async fn export_spans(client: T, node: Node, spans: S, metrics: Registry) where T: GrpcService + Clone, T::Error: Into, - T::ResponseBody: Default + HttpBody + Send + 'static, - ::Error: Into + Send, - S: Stream + Unpin, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + S: Stream + Unpin, { debug!("Span exporter running"); SpanExporter::new(client, node, spans, metrics).run().await @@ -47,9 +51,9 @@ impl SpanExporter where T: GrpcService, T::Error: Into, - T::ResponseBody: Default + HttpBody + Send + 'static, - ::Error: Into + Send, - S: Stream + Unpin, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + S: Stream + Unpin, { const MAX_BATCH_SIZE: usize = 1000; const MAX_BATCH_IDLE: time::Duration = time::Duration::from_secs(10); @@ -175,6 +179,13 @@ where res = spans.next() => match res { Some(span) => { trace!(?span, "Adding to batch"); + let span = match convert_span(span) { + Ok(span) => span, + Err(error) => { + info!(%error, "Span dropped"); + continue; + } + }; accum.push(span); } None => return Err(SpanRxClosed), @@ -192,3 +203,61 @@ where } } } + +fn convert_span(span: ExportSpan) -> Result { + use proto::trace::v1 as oc; + + let ExportSpan { + mut span, + kind, + labels, + } = span; + + let mut attributes = HashMap::::new(); + for (k, v) in labels.iter() { + attributes.insert( + k.clone(), + oc::AttributeValue { + value: Some(oc::attribute_value::Value::StringValue(truncatable( + v.clone(), + ))), + }, + ); + } + for (k, v) in span.labels.drain() { + attributes.insert( + k.to_string(), + oc::AttributeValue { + value: Some(oc::attribute_value::Value::StringValue(truncatable(v))), + }, + ); + } + Ok(Span { + trace_id: span.trace_id.into_bytes::<16>()?.to_vec(), + span_id: span.span_id.into_bytes::<8>()?.to_vec(), + tracestate: None, + parent_span_id: span.parent_id.into_bytes::<8>()?.to_vec(), + name: Some(truncatable(span.span_name)), + kind: kind as i32, + start_time: Some(span.start.into()), + end_time: Some(span.end.into()), + attributes: Some(oc::span::Attributes { + attribute_map: attributes, + dropped_attributes_count: 0, + }), + stack_trace: None, + time_events: None, + links: None, + status: None, // TODO: this is gRPC status; we must read response trailers to populate this + resource: None, + same_process_as_parent_span: Some(kind == SpanKind::Client), + child_span_count: None, + }) +} + +fn truncatable(value: String) -> TruncatableString { + TruncatableString { + value, + truncated_byte_count: 0, + } +} diff --git a/linkerd/opentelemetry/Cargo.toml b/linkerd/opentelemetry/Cargo.toml new file mode 100644 index 0000000000..790135fd38 --- /dev/null +++ b/linkerd/opentelemetry/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "linkerd-opentelemetry" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } + +[dependencies] +futures = { version = "0.3", default-features = false } +http-body = { workspace = true } +linkerd-error = { path = "../error" } +linkerd-metrics = { path = "../metrics" } +linkerd-trace-context = { path = "../trace-context" } +opentelemetry = { version = "0.29", default-features = false, features = ["trace"] } +opentelemetry_sdk = { version = "0.29", default-features = false, features = ["trace"] } +opentelemetry-proto = { path = "../../opentelemetry-proto" } +tonic = { workspace = true, default-features = false, features = [ + "prost", + "codegen", +] } +tokio = { version = "1", features = ["macros", "sync", "time"] } +tracing = "0.1" diff --git a/linkerd/opentelemetry/src/lib.rs b/linkerd/opentelemetry/src/lib.rs new file mode 100644 index 0000000000..86a32bea0b --- /dev/null +++ b/linkerd/opentelemetry/src/lib.rs @@ -0,0 +1,267 @@ +#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] +#![forbid(unsafe_code)] + +pub mod metrics; + +use self::metrics::Registry; +use futures::stream::{Stream, StreamExt}; +use http_body::Body; +use linkerd_error::Error; +use linkerd_trace_context::{self as trace_context, export::ExportSpan}; +pub use opentelemetry as otel; +use opentelemetry::{ + trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, + KeyValue, +}; +pub use opentelemetry_proto as proto; +use opentelemetry_proto::{ + proto::{ + collector::trace::v1::{ + trace_service_client::TraceServiceClient, ExportTraceServiceRequest, + }, + trace::v1::ResourceSpans, + }, + transform::{common::ResourceAttributesWithSchema, trace::group_spans_by_resource_and_scope}, +}; +use opentelemetry_sdk::trace::SpanLinks; +pub use opentelemetry_sdk::{self as sdk, trace::SpanData}; +use tokio::{sync::mpsc, time}; +use tonic::{self as grpc, body::BoxBody, client::GrpcService}; +use tracing::{debug, info, trace}; + +pub async fn export_spans( + client: T, + spans: S, + resource: ResourceAttributesWithSchema, + metrics: Registry, +) where + T: GrpcService + Clone, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + S: Stream + Unpin, +{ + debug!("Span exporter running"); + SpanExporter::new(client, spans, resource, metrics) + .run() + .await +} + +/// SpanExporter sends a Stream of spans to the given TraceService gRPC service. +struct SpanExporter { + client: T, + spans: S, + resource: ResourceAttributesWithSchema, + metrics: Registry, +} + +#[derive(Debug)] +struct SpanRxClosed; + +// === impl SpanExporter === + +impl SpanExporter +where + T: GrpcService + Clone, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + S: Stream + Unpin, +{ + const MAX_BATCH_SIZE: usize = 1000; + const MAX_BATCH_IDLE: time::Duration = time::Duration::from_secs(10); + + fn new(client: T, spans: S, resource: ResourceAttributesWithSchema, metrics: Registry) -> Self { + Self { + client, + spans, + resource, + metrics, + } + } + + async fn run(self) { + let Self { + client, + mut spans, + resource, + mut metrics, + } = self; + + // Holds the batch of pending spans. Cleared as the spans are flushed. + // Contains no more than MAX_BATCH_SIZE spans. + let mut accum = Vec::new(); + + let mut svc = TraceServiceClient::new(client); + loop { + trace!("Establishing new TraceService::export request"); + metrics.start_stream(); + let (tx, mut rx) = mpsc::channel(1); + + let recv_future = async { + while let Some(req) = rx.recv().await { + match svc.export(grpc::Request::new(req)).await { + Ok(rsp) => { + let Some(partial_success) = rsp.into_inner().partial_success else { + continue; + }; + + if !partial_success.error_message.is_empty() { + debug!( + %partial_success.error_message, + rejected_spans = partial_success.rejected_spans, + "Response partially successful", + ); + } + } + Err(error) => { + debug!(%error, "Response future failed; restarting"); + } + } + } + }; + + // Drive both the response future and the export stream + // simultaneously. + tokio::select! { + _ = recv_future => {} + res = Self::export(&tx, &mut spans, &resource, &mut accum) => match res { + // The export stream closed; reconnect. + Ok(()) => {}, + // No more spans. + Err(SpanRxClosed) => return, + }, + } + } + } + + /// Accumulate spans and send them on the export stream. + /// + /// Returns an error when the proxy has closed the span stream. + async fn export( + tx: &mpsc::Sender, + spans: &mut S, + resource: &ResourceAttributesWithSchema, + accum: &mut Vec, + ) -> Result<(), SpanRxClosed> { + loop { + // Collect spans into a batch. + let collect = Self::collect_batch(spans, resource, accum).await; + + // If we collected spans, flush them. + if !accum.is_empty() { + // Once a batch has been accumulated, ensure that the + // request stream is ready to accept the batch. + match tx.reserve().await { + Ok(tx) => { + let msg = ExportTraceServiceRequest { + resource_spans: std::mem::take(accum), + }; + trace!(spans = msg.resource_spans.len(), "Sending batch"); + tx.send(msg); + } + Err(error) => { + // If the channel isn't open, start a new stream + // and retry sending the batch. + debug!(%error, "Request stream lost; restarting"); + return Ok(()); + } + } + } + + // If the span source was closed, end the task. + if let Err(closed) = collect { + debug!("Span channel lost"); + return Err(closed); + } + } + } + + /// Collects spans from the proxy into `accum`. + /// + /// Returns an error when the span stream has completed. An error may be + /// returned after accumulating spans. + async fn collect_batch( + span_stream: &mut S, + resource: &ResourceAttributesWithSchema, + accum: &mut Vec, + ) -> Result<(), SpanRxClosed> { + let mut input_accum: Vec = vec![]; + + let res = loop { + if input_accum.len() == Self::MAX_BATCH_SIZE { + trace!(capacity = Self::MAX_BATCH_SIZE, "Batch capacity reached"); + break Ok(()); + } + + tokio::select! { + biased; + + res = span_stream.next() => match res { + Some(span) => { + trace!(?span, "Adding to batch"); + let span = match convert_span(span) { + Ok(span) => span, + Err(error) => { + info!(%error, "Span dropped"); + continue; + } + }; + + input_accum.push(span); + } + None => break Err(SpanRxClosed), + }, + + // Don't hold spans indefinitely. Return if we hit an idle + // timeout and spans have been collected. + _ = time::sleep(Self::MAX_BATCH_IDLE) => { + if !input_accum.is_empty() { + trace!(spans = input_accum.len(), "Flushing spans due to inactivitiy"); + break Ok(()); + } + } + } + }; + + *accum = group_spans_by_resource_and_scope(input_accum, resource); + + res + } +} + +fn convert_span(span: ExportSpan) -> Result { + let ExportSpan { span, kind, labels } = span; + + let mut attributes = Vec::::new(); + for (k, v) in labels.iter() { + attributes.push(KeyValue::new(k.clone(), v.clone())); + } + for (k, v) in span.labels.iter() { + attributes.push(KeyValue::new(*k, v.clone())); + } + let is_remote = kind != trace_context::export::SpanKind::Client; + Ok(SpanData { + parent_span_id: SpanId::from_bytes(span.parent_id.into_bytes()?), + span_kind: match kind { + trace_context::export::SpanKind::Server => SpanKind::Server, + trace_context::export::SpanKind::Client => SpanKind::Client, + }, + name: span.span_name.into(), + start_time: span.start, + end_time: span.end, + attributes, + dropped_attributes_count: 0, + links: SpanLinks::default(), + status: Status::Unset, // TODO: this is gRPC status; we must read response trailers to populate this + span_context: SpanContext::new( + TraceId::from_bytes(span.trace_id.into_bytes()?), + SpanId::from_bytes(span.span_id.into_bytes()?), + TraceFlags::default(), + is_remote, + TraceState::NONE, + ), + events: Default::default(), + instrumentation_scope: Default::default(), + }) +} diff --git a/linkerd/opentelemetry/src/metrics.rs b/linkerd/opentelemetry/src/metrics.rs new file mode 100644 index 0000000000..5c785de9d4 --- /dev/null +++ b/linkerd/opentelemetry/src/metrics.rs @@ -0,0 +1,58 @@ +use linkerd_metrics::{metrics, Counter, FmtMetrics}; +use std::fmt; +use std::sync::Arc; + +metrics! { + opentelemetry_span_export_streams: Counter { "Total count of opened span export streams" }, + opentelemetry_span_export_requests: Counter { "Total count of span export request messages" }, + opentelemetry_span_exports: Counter { "Total count of spans exported" } +} + +#[derive(Debug)] +struct Metrics { + streams: Counter, + requests: Counter, + spans: Counter, +} + +#[derive(Clone, Debug)] +pub struct Registry(Arc); + +#[derive(Clone, Debug)] +pub struct Report(Arc); + +pub fn new() -> (Registry, Report) { + let metrics = Metrics { + streams: Counter::default(), + requests: Counter::default(), + spans: Counter::default(), + }; + let shared = Arc::new(metrics); + (Registry(shared.clone()), Report(shared)) +} + +impl Registry { + pub fn start_stream(&mut self) { + self.0.streams.incr() + } + + pub fn send(&mut self, spans: u64) { + self.0.requests.incr(); + self.0.spans.add(spans); + } +} + +impl FmtMetrics for Report { + fn fmt_metrics(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + opentelemetry_span_export_streams.fmt_help(f)?; + opentelemetry_span_export_streams.fmt_metric(f, &self.0.streams)?; + + opentelemetry_span_export_requests.fmt_help(f)?; + opentelemetry_span_export_requests.fmt_metric(f, &self.0.requests)?; + + opentelemetry_span_exports.fmt_help(f)?; + opentelemetry_span_exports.fmt_metric(f, &self.0.spans)?; + + Ok(()) + } +} diff --git a/linkerd/pool/Cargo.toml b/linkerd/pool/Cargo.toml index 94309b472d..3be1836928 100644 --- a/linkerd/pool/Cargo.toml +++ b/linkerd/pool/Cargo.toml @@ -1,9 +1,10 @@ [package] name = "linkerd-pool" -version = "0.1.0" -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -tower-service = "0.3" +tower-service = { workspace = true } diff --git a/linkerd/pool/mock/Cargo.toml b/linkerd/pool/mock/Cargo.toml index 50aecace25..5dcbdab361 100644 --- a/linkerd/pool/mock/Cargo.toml +++ b/linkerd/pool/mock/Cargo.toml @@ -1,15 +1,16 @@ [package] name = "linkerd-pool-mock" -version = "0.1.0" -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] parking_lot = "0.12" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["sync", "time"] } -tower-test = "0.4" +tower-test = { workspace = true } tracing = "0.1" linkerd-error = { path = "../../error" } diff --git a/linkerd/pool/p2c/Cargo.toml b/linkerd/pool/p2c/Cargo.toml index a2fda5e4cd..bf8afc1eab 100644 --- a/linkerd/pool/p2c/Cargo.toml +++ b/linkerd/pool/p2c/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "linkerd-pool-p2c" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] ahash = "0.8" futures = { version = "0.3", default-features = false } indexmap = "2" -prometheus-client = "0.22" -rand = { version = "0.8", features = ["small_rng"] } +prometheus-client = { workspace = true } +rand = { version = "0.9", features = ["small_rng"] } tokio = { version = "1", features = ["rt", "sync", "time"] } tracing = "0.1" @@ -21,7 +21,7 @@ linkerd-pool = { path = ".." } linkerd-stack = { path = "../../stack" } [dependencies.tower] -version = "0.4.13" +workspace = true default-features = false features = ["load", "ready-cache"] @@ -31,4 +31,4 @@ linkerd-tracing = { path = "../../tracing" } parking_lot = "0.12" quickcheck = { version = "1", default-features = false } tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } diff --git a/linkerd/pool/p2c/src/lib.rs b/linkerd/pool/p2c/src/lib.rs index 29c61f1cd2..b466058b11 100644 --- a/linkerd/pool/p2c/src/lib.rs +++ b/linkerd/pool/p2c/src/lib.rs @@ -11,7 +11,7 @@ use linkerd_error::Error; use linkerd_metrics::prom; use linkerd_pool::Pool; use linkerd_stack::{NewService, Service}; -use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng}; +use rand::{rngs::SmallRng, Rng, SeedableRng}; use std::{ collections::hash_map::Entry, net::SocketAddr, @@ -76,7 +76,7 @@ where S::Metric: std::fmt::Debug, { pub fn new(metrics: P2cMetrics, new_endpoint: N) -> Self { - let rng = SmallRng::from_rng(&mut thread_rng()).expect("RNG must be seeded"); + let rng = SmallRng::from_rng(&mut rand::rng()); Self { rng, metrics, @@ -120,8 +120,8 @@ fn gen_pair(rng: &mut SmallRng, len: usize) -> (usize, usize) { debug_assert!(len >= 2, "must have at least two endpoints"); // Get two distinct random indexes (in a random order) and // compare the loads of the service at each index. - let aidx = rng.gen_range(0..len); - let mut bidx = rng.gen_range(0..(len - 1)); + let aidx = rng.random_range(0..len); + let mut bidx = rng.random_range(0..(len - 1)); if bidx >= aidx { bidx += 1; } @@ -376,7 +376,7 @@ mod tests { if len < 2 { return quickcheck::TestResult::discard(); } - let mut rng = SmallRng::from_rng(rand::thread_rng()).expect("rng"); + let mut rng = SmallRng::from_rng(&mut rand::rng()); let (aidx, bidx) = gen_pair(&mut rng, len); quickcheck::TestResult::from_bool(aidx != bidx) } diff --git a/linkerd/proxy/api-resolve/Cargo.toml b/linkerd/proxy/api-resolve/Cargo.toml index 449a6b12f1..df860c91b9 100644 --- a/linkerd/proxy/api-resolve/Cargo.toml +++ b/linkerd/proxy/api-resolve/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "linkerd-proxy-api-resolve" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Implements the Resolve trait using the proxy's gRPC API """ [dependencies] futures = { version = "0.3", default-features = false } -linkerd2-proxy-api = { version = "0.13", features = ["destination"] } +linkerd2-proxy-api = { workspace = true, features = ["destination"] } linkerd-addr = { path = "../../addr" } linkerd-error = { path = "../../error" } linkerd-proxy-core = { path = "../core" } @@ -20,10 +20,10 @@ linkerd-stack = { path = "../../stack" } linkerd-tonic-stream = { path = "../../tonic-stream" } linkerd-tls = { path = "../../tls" } linkerd-identity = { path = "../../identity" } -http = "0.2" -http-body = "0.4" +http = { workspace = true } +http-body = { workspace = true } pin-project = "1" -prost = "0.12" -tonic = { version = "0.10", default-features = false } -tower = { version = "0.4", default-features = false } +prost = { workspace = true } +tonic = { workspace = true, default-features = false } +tower = { workspace = true, default-features = false } tracing = "0.1" diff --git a/linkerd/proxy/api-resolve/src/metadata.rs b/linkerd/proxy/api-resolve/src/metadata.rs index a1cc2b0e3f..d9466f7f38 100644 --- a/linkerd/proxy/api-resolve/src/metadata.rs +++ b/linkerd/proxy/api-resolve/src/metadata.rs @@ -27,6 +27,7 @@ pub struct Metadata { authority_override: Option, http2: HTTP2ClientParams, + is_zone_local: Option, } #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] @@ -53,11 +54,13 @@ impl Default for Metadata { tagged_transport_port: None, protocol_hint: ProtocolHint::Unknown, http2: HTTP2ClientParams::default(), + is_zone_local: None, } } } impl Metadata { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( labels: impl IntoIterator, protocol_hint: ProtocolHint, @@ -66,6 +69,7 @@ impl Metadata { authority_override: Option, weight: u32, http2: HTTP2ClientParams, + is_zone_local: Option, ) -> Self { Self { labels: labels.into_iter().collect::>().into(), @@ -75,6 +79,7 @@ impl Metadata { authority_override, weight, http2, + is_zone_local, } } @@ -87,6 +92,10 @@ impl Metadata { self.labels.clone() } + pub fn is_zone_local(&self) -> Option { + self.is_zone_local + } + pub fn protocol_hint(&self) -> ProtocolHint { self.protocol_hint } diff --git a/linkerd/proxy/api-resolve/src/pb.rs b/linkerd/proxy/api-resolve/src/pb.rs index 7bef87afe3..de38aba893 100644 --- a/linkerd/proxy/api-resolve/src/pb.rs +++ b/linkerd/proxy/api-resolve/src/pb.rs @@ -24,6 +24,16 @@ pub fn to_addr_meta( .chain(pb.metric_labels.iter()) .map(|(k, v)| (k.clone(), v.clone())); + let zone_locality = pb.metric_labels.get("zone_locality").and_then(|locality| { + if locality.eq_ignore_ascii_case("local") { + Some(true) + } else if locality.eq_ignore_ascii_case("remote") { + Some(false) + } else { + None + } + }); + let mut proto_hint = ProtocolHint::Unknown; let mut tagged_transport_port = None; if let Some(hint) = pb.protocol_hint { @@ -51,6 +61,7 @@ pub fn to_addr_meta( authority_override, pb.weight, http2, + zone_locality, ); Some((addr, meta)) } @@ -173,12 +184,8 @@ fn to_http2_client_params(pb: Http2ClientParams) -> linkerd_http_h2::ClientParam }) }), keep_alive: pb.keep_alive.and_then(|pb| { - let Some(interval) = pb.interval.and_then(|pb| Duration::try_from(pb).ok()) else { - return None; - }; - let Some(timeout) = pb.timeout.and_then(|pb| Duration::try_from(pb).ok()) else { - return None; - }; + let interval = pb.interval.and_then(|pb| Duration::try_from(pb).ok())?; + let timeout = pb.timeout.and_then(|pb| Duration::try_from(pb).ok())?; Some(h2::ClientKeepAlive { interval, timeout, @@ -206,8 +213,10 @@ fn to_http2_client_params(pb: Http2ClientParams) -> linkerd_http_h2::ClientParam #[cfg(test)] mod tests { use super::*; - use linkerd2_proxy_api::destination::tls_identity::{ - DnsLikeIdentity, Strategy, UriLikeIdentity, + use linkerd2_proxy_api::{ + destination::tls_identity::{DnsLikeIdentity, Strategy, UriLikeIdentity}, + net::ip_address::Ip, + net::IpAddress, }; use linkerd_identity as id; @@ -393,4 +402,65 @@ mod tests { }), ); } + + #[test] + fn zone_locality() { + let addr = WeightedAddr { + resource_ref: None, + addr: Some(TcpAddress { + ip: Some(IpAddress { + ip: Some(Ip::Ipv4(0)), + }), + port: 0, + }), + weight: 0, + metric_labels: Default::default(), + tls_identity: None, + protocol_hint: None, + authority_override: None, + http2: None, + }; + + let (_, meta) = to_addr_meta(addr.clone(), &HashMap::new()).unwrap(); + assert_eq!(meta.is_zone_local(), None); + + let (_, meta) = to_addr_meta( + WeightedAddr { + metric_labels: HashMap::from_iter([( + "zone_locality".to_string(), + "local".to_string(), + )]), + ..addr.clone() + }, + &HashMap::new(), + ) + .unwrap(); + assert_eq!(meta.is_zone_local(), Some(true)); + + let (_, meta) = to_addr_meta( + WeightedAddr { + metric_labels: HashMap::from_iter([( + "zone_locality".to_string(), + "remote".to_string(), + )]), + ..addr.clone() + }, + &HashMap::new(), + ) + .unwrap(); + assert_eq!(meta.is_zone_local(), Some(false)); + + let (_, meta) = to_addr_meta( + WeightedAddr { + metric_labels: HashMap::from_iter([( + "zone_locality".to_string(), + "garbage".to_string(), + )]), + ..addr.clone() + }, + &HashMap::new(), + ) + .unwrap(); + assert_eq!(meta.is_zone_local(), None); + } } diff --git a/linkerd/proxy/api-resolve/src/resolve.rs b/linkerd/proxy/api-resolve/src/resolve.rs index 1a23b8292d..56c6b60793 100644 --- a/linkerd/proxy/api-resolve/src/resolve.rs +++ b/linkerd/proxy/api-resolve/src/resolve.rs @@ -24,7 +24,7 @@ impl Resolve where S: GrpcService + Clone + Send + 'static, S::Error: Into + Send, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into + Send, S::Future: Send, { @@ -48,7 +48,7 @@ where T: Param, S: GrpcService + Clone + Send + 'static, S::Error: Into + Send, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into + Send, S::Future: Send, { diff --git a/linkerd/proxy/balance/Cargo.toml b/linkerd/proxy/balance/Cargo.toml index 86fa263763..1afdba6883 100644 --- a/linkerd/proxy/balance/Cargo.toml +++ b/linkerd/proxy/balance/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "linkerd-proxy-balance" -version = "0.1.0" -edition = "2021" -license = "Apache-2.0" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } -rand = "0.8" +rand = "0.9" tokio = { version = "1", features = ["time"] } tracing = "0.1" @@ -20,6 +21,6 @@ linkerd-proxy-balance-queue = { path = "queue" } linkerd-stack = { path = "../../stack" } [dependencies.tower] -version = "0.4.13" +workspace = true default-features = false features = ["load"] diff --git a/linkerd/proxy/balance/gauge-endpoints/Cargo.toml b/linkerd/proxy/balance/gauge-endpoints/Cargo.toml index 784908cea6..64ab2fc2a5 100644 --- a/linkerd/proxy/balance/gauge-endpoints/Cargo.toml +++ b/linkerd/proxy/balance/gauge-endpoints/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "linkerd-proxy-balance-gauge-endpoints" -version = "0.1.0" -edition = "2021" -license = "Apache-2.0" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -prometheus-client = "0.22" +prometheus-client = { workspace = true } linkerd-stack = { path = "../../../stack" } diff --git a/linkerd/proxy/balance/queue/Cargo.toml b/linkerd/proxy/balance/queue/Cargo.toml index c55743a681..6ae63c7283 100644 --- a/linkerd/proxy/balance/queue/Cargo.toml +++ b/linkerd/proxy/balance/queue/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "linkerd-proxy-balance-queue" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } parking_lot = "0.12" pin-project = "1" -prometheus-client = "0.22" -thiserror = "1" +prometheus-client = { workspace = true } +thiserror = "2" tokio = { version = "1", features = ["rt", "sync", "time"] } tokio-util = "0.7" tracing = "0.1" @@ -25,7 +25,7 @@ linkerd-stack = { path = "../../../stack" } [dev-dependencies] tokio-stream = { version = "0.1", features = ["sync"] } tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } linkerd-pool-mock = { path = "../../../pool/mock" } linkerd-tracing = { path = "../../../tracing" } diff --git a/linkerd/proxy/client-policy/Cargo.toml b/linkerd/proxy/client-policy/Cargo.toml index 7877924075..88d4356dc5 100644 --- a/linkerd/proxy/client-policy/Cargo.toml +++ b/linkerd/proxy/client-policy/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "linkerd-proxy-client-policy" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] proto = [ "linkerd-http-route/proto", + "linkerd-tls-route/proto", "linkerd2-proxy-api", "prost-types", "thiserror", @@ -17,20 +18,22 @@ proto = [ [dependencies] ahash = "0.8" ipnet = "2" -http = "0.2" +http = { workspace = true } once_cell = { version = "1" } -prost-types = { version = "0.12", optional = true } -tonic = { version = "0.10", default-features = false } -thiserror = { version = "1", optional = true } +prost-types = { workspace = true, optional = true } +tonic = { workspace = true, default-features = false } +thiserror = { version = "2", optional = true } linkerd-error = { path = "../../error" } linkerd-exp-backoff = { path = "../../exp-backoff" } linkerd-http-route = { path = "../../http/route" } +linkerd-tls-route = { path = "../../tls/route" } +linkerd-opaq-route = { path = "../../opaq-route" } linkerd-proxy-api-resolve = { path = "../api-resolve" } linkerd-proxy-core = { path = "../core" } [dependencies.linkerd2-proxy-api] -version = "0.13" +workspace = true optional = true features = ["outbound"] diff --git a/linkerd/proxy/client-policy/src/grpc.rs b/linkerd/proxy/client-policy/src/grpc.rs index 7f6a05fc2c..ef159749b9 100644 --- a/linkerd/proxy/client-policy/src/grpc.rs +++ b/linkerd/proxy/client-policy/src/grpc.rs @@ -1,19 +1,29 @@ use crate::FailureAccrual; +use linkerd_exp_backoff::ExponentialBackoff; use linkerd_http_route::{grpc, http}; -use std::sync::Arc; +use std::{sync::Arc, time}; pub use linkerd_http_route::grpc::{filter, find, r#match, RouteMatch}; -pub type Policy = crate::RoutePolicy; +pub type Policy = crate::RoutePolicy; pub type Route = grpc::Route; pub type Rule = grpc::Rule; +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +pub struct RouteParams { + pub timeouts: crate::http::Timeouts, + pub retry: Option, + pub allow_l5d_request_headers: bool, + pub export_hostname_labels: bool, +} + // TODO HTTP2 settings #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Grpc { pub routes: Arc<[Route]>, /// Configures how endpoints accrue observed failures. + // TODO(ver) Move this to backends and scope to endpoints. pub failure_accrual: FailureAccrual, } @@ -24,6 +34,15 @@ pub enum Filter { InternalError(&'static str), } +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Retry { + pub max_retries: usize, + pub max_request_bytes: usize, + pub codes: Codes, + pub timeout: Option, + pub backoff: Option, +} + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Codes(pub Arc>); @@ -36,8 +55,7 @@ pub fn default(distribution: crate::RouteDistribution) -> Route { meta: crate::Meta::new_default("default"), filters: Arc::new([]), distribution, - failure_policy: Codes::default(), - request_timeout: None, + params: Default::default(), }, }], } @@ -87,7 +105,7 @@ pub mod proto { proto::{ BackendSet, InvalidBackend, InvalidDistribution, InvalidFailureAccrual, InvalidMeta, }, - Meta, RouteBackend, RouteDistribution, + ClientPolicyOverrides, Meta, RouteBackend, RouteDistribution, }; use linkerd2_proxy_api::outbound::{self, grpc_route}; use linkerd_http_route::{ @@ -102,7 +120,6 @@ pub mod proto { r#match::host::{proto::InvalidHostMatch, MatchHost}, }, }; - use std::time::Duration; #[derive(Debug, thiserror::Error)] pub enum InvalidGrpcRoute { @@ -127,8 +144,29 @@ pub mod proto { #[error("invalid failure accrual policy: {0}")] Breaker(#[from] InvalidFailureAccrual), - #[error("invalid duration: {0}")] - Duration(#[from] prost_types::DurationError), + #[error("{0}")] + Retry(#[from] InvalidRetry), + + #[error("invalid request timeout: {0}")] + RequestTimeout(#[from] prost_types::DurationError), + + #[error("{0}")] + Timeout(#[from] crate::http::proto::InvalidTimeouts), + } + + #[derive(Debug, thiserror::Error)] + pub enum InvalidRetry { + #[error("invalid max-retries: {0}")] + MaxRetries(u32), + + #[error("invalid condition")] + Condition, + + #[error("invalid timeout: {0}")] + Timeout(#[from] prost_types::DurationError), + + #[error("invalid backoff: {0}")] + Backoff(#[from] crate::proto::InvalidBackoff), } #[derive(Debug, thiserror::Error)] @@ -146,22 +184,22 @@ pub mod proto { Redirect(#[from] InvalidRequestRedirect), } - impl TryFrom for Grpc { - type Error = InvalidGrpcRoute; - fn try_from(proto: outbound::proxy_protocol::Grpc) -> Result { + impl Grpc { + pub fn try_from( + overrides: ClientPolicyOverrides, + proto: outbound::proxy_protocol::Grpc, + ) -> Result { let routes = proto .routes .into_iter() - .map(try_route) + .map(|p| try_route(overrides, p)) .collect::, _>>()?; Ok(Self { routes, failure_accrual: proto.failure_accrual.try_into()?, }) } - } - impl Grpc { pub fn fill_backends(&self, set: &mut BackendSet) { for Route { ref rules, .. } in &*self.routes { for Rule { ref policy, .. } in rules { @@ -171,7 +209,10 @@ pub mod proto { } } - fn try_route(proto: outbound::GrpcRoute) -> Result { + fn try_route( + overrides: ClientPolicyOverrides, + proto: outbound::GrpcRoute, + ) -> Result { let outbound::GrpcRoute { hosts, rules, @@ -189,7 +230,7 @@ pub mod proto { let rules = rules .into_iter() - .map(|rule| try_rule(&meta, rule)) + .map(|rule| try_rule(&meta, overrides, rule)) .collect::, _>>()?; Ok(Route { hosts, rules }) @@ -197,12 +238,17 @@ pub mod proto { fn try_rule( meta: &Arc, + overrides: ClientPolicyOverrides, proto: outbound::grpc_route::Rule, ) -> Result { + #[allow(deprecated)] let outbound::grpc_route::Rule { matches, backends, filters, + timeouts, + retry, + allow_l5d_request_headers, request_timeout, } = proto; @@ -220,7 +266,10 @@ pub mod proto { .ok_or(InvalidGrpcRoute::Missing("distribution"))? .try_into()?; - let request_timeout = request_timeout.map(Duration::try_from).transpose()?; + let mut params = + RouteParams::try_from_proto(timeouts, retry, allow_l5d_request_headers, overrides)?; + let legacy = request_timeout.map(TryInto::try_into).transpose()?; + params.timeouts.request = params.timeouts.request.or(legacy); Ok(Rule { matches, @@ -228,12 +277,60 @@ pub mod proto { meta: meta.clone(), filters, distribution, - failure_policy: Codes::default(), - request_timeout, + params, }, }) } + impl RouteParams { + fn try_from_proto( + timeouts: Option, + retry: Option, + allow_l5d_request_headers: bool, + overrides: ClientPolicyOverrides, + ) -> Result { + Ok(Self { + retry: retry.map(Retry::try_from).transpose()?, + timeouts: timeouts + .map(crate::http::Timeouts::try_from) + .transpose()? + .unwrap_or_default(), + allow_l5d_request_headers, + export_hostname_labels: overrides.export_hostname_labels, + }) + } + } + + impl TryFrom for Retry { + type Error = InvalidRetry; + + fn try_from(retry: outbound::grpc_route::Retry) -> Result { + let cond = retry.conditions.ok_or(InvalidRetry::Condition)?; + let codes = Codes(Arc::new( + [ + cond.cancelled.then_some(tonic::Code::Cancelled as u16), + cond.deadine_exceeded + .then_some(tonic::Code::DeadlineExceeded as u16), + cond.resource_exhausted + .then_some(tonic::Code::ResourceExhausted as u16), + cond.internal.then_some(tonic::Code::Internal as u16), + cond.unavailable.then_some(tonic::Code::Unavailable as u16), + ] + .into_iter() + .flatten() + .collect(), + )); + + Ok(Self { + codes, + max_retries: retry.max_retries as usize, + max_request_bytes: retry.max_request_bytes as _, + backoff: retry.backoff.map(crate::proto::try_backoff).transpose()?, + timeout: retry.timeout.map(time::Duration::try_from).transpose()?, + }) + } + } + impl TryFrom for RouteDistribution { type Error = InvalidDistribution; fn try_from(distribution: grpc_route::Distribution) -> Result { @@ -280,13 +377,11 @@ pub mod proto { type Error = InvalidBackend; fn try_from( grpc_route::RouteBackend { - backend, - filters, - request_timeout, + backend, filters, .. }: grpc_route::RouteBackend, ) -> Result, InvalidBackend> { let backend = backend.ok_or(InvalidBackend::Missing("backend"))?; - RouteBackend::try_from_proto(backend, filters, request_timeout) + RouteBackend::try_from_proto(backend, filters) } } diff --git a/linkerd/proxy/client-policy/src/http.rs b/linkerd/proxy/client-policy/src/http.rs index 58664c8e64..8f92b3d44f 100644 --- a/linkerd/proxy/client-policy/src/http.rs +++ b/linkerd/proxy/client-policy/src/http.rs @@ -1,13 +1,22 @@ use crate::FailureAccrual; +use linkerd_exp_backoff::ExponentialBackoff; use linkerd_http_route::http; -use std::{ops::RangeInclusive, sync::Arc}; +use std::{ops::RangeInclusive, sync::Arc, time}; pub use linkerd_http_route::http::{filter, find, r#match, RouteMatch}; -pub type Policy = crate::RoutePolicy; +pub type Policy = crate::RoutePolicy; pub type Route = http::Route; pub type Rule = http::Rule; +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +pub struct RouteParams { + pub timeouts: Timeouts, + pub retry: Option, + pub allow_l5d_request_headers: bool, + pub export_hostname_labels: bool, +} + // TODO: keepalive settings, etc. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Http1 { @@ -35,9 +44,25 @@ pub enum Filter { InternalError(&'static str), } +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Retry { + pub max_retries: u16, + pub max_request_bytes: usize, + pub status_ranges: StatusRanges, + pub timeout: Option, + pub backoff: Option, +} + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusRanges(pub Arc<[RangeInclusive]>); +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +pub struct Timeouts { + pub response: Option, + pub idle: Option, + pub request: Option, +} + pub fn default(distribution: crate::RouteDistribution) -> Route { Route { hosts: vec![], @@ -47,8 +72,7 @@ pub fn default(distribution: crate::RouteDistribution) -> Route { meta: crate::Meta::new_default("default"), filters: Arc::new([]), distribution, - failure_policy: StatusRanges::default(), - request_timeout: None, + params: RouteParams::default(), }, }], } @@ -92,6 +116,8 @@ impl Default for StatusRanges { } } +// === impl Timeouts === + #[cfg(feature = "proto")] pub mod proto { use super::*; @@ -99,7 +125,7 @@ pub mod proto { proto::{ BackendSet, InvalidBackend, InvalidDistribution, InvalidFailureAccrual, InvalidMeta, }, - Meta, RouteBackend, RouteDistribution, + ClientPolicyOverrides, Meta, RouteBackend, RouteDistribution, }; use linkerd2_proxy_api::outbound::{self, http_route}; use linkerd_http_route::http::{ @@ -130,11 +156,42 @@ pub mod proto { #[error("invalid failure accrual policy: {0}")] Breaker(#[from] InvalidFailureAccrual), + #[error("invalid request timeout: {0}")] + RequestTimeout(#[from] prost_types::DurationError), + #[error("missing {0}")] Missing(&'static str), - #[error("invalid request timeout: {0}")] + #[error(transparent)] + Timeout(#[from] InvalidTimeouts), + + #[error(transparent)] + Retry(#[from] InvalidRetry), + } + + #[derive(Debug, thiserror::Error)] + pub enum InvalidRetry { + #[error("invalid max-retries: {0}")] + MaxRetries(u32), + + #[error("invalid condition")] + Condition, + + #[error("invalid timeout: {0}")] Timeout(#[from] prost_types::DurationError), + + #[error("invalid backoff: {0}")] + Backoff(#[from] crate::proto::InvalidBackoff), + } + + #[derive(Debug, thiserror::Error)] + pub enum InvalidTimeouts { + #[error("invalid response timeout: {0}")] + Response(prost_types::DurationError), + #[error("invalid idle timeout: {0}")] + Idle(prost_types::DurationError), + #[error("invalid request timeout: {0}")] + Request(prost_types::DurationError), } #[derive(Debug, thiserror::Error)] @@ -160,13 +217,15 @@ pub mod proto { } } - impl TryFrom for Http1 { - type Error = InvalidHttpRoute; - fn try_from(proto: outbound::proxy_protocol::Http1) -> Result { + impl Http1 { + pub fn try_from( + overrides: ClientPolicyOverrides, + proto: outbound::proxy_protocol::Http1, + ) -> Result { let routes = proto .routes .into_iter() - .map(try_route) + .map(|p| try_route(overrides, p)) .collect::, _>>()?; Ok(Self { routes, @@ -175,13 +234,15 @@ pub mod proto { } } - impl TryFrom for Http2 { - type Error = InvalidHttpRoute; - fn try_from(proto: outbound::proxy_protocol::Http2) -> Result { + impl Http2 { + pub fn try_from( + overrides: ClientPolicyOverrides, + proto: outbound::proxy_protocol::Http2, + ) -> Result { let routes = proto .routes .into_iter() - .map(try_route) + .map(|p| try_route(overrides, p)) .collect::, _>>()?; Ok(Self { routes, @@ -190,7 +251,10 @@ pub mod proto { } } - fn try_route(proto: outbound::HttpRoute) -> Result { + fn try_route( + overrides: ClientPolicyOverrides, + proto: outbound::HttpRoute, + ) -> Result { let outbound::HttpRoute { hosts, rules, @@ -208,7 +272,7 @@ pub mod proto { let rules = rules .into_iter() - .map(|rule| try_rule(&meta, rule)) + .map(|rule| try_rule(&meta, overrides, rule)) .collect::, _>>()?; Ok(Route { hosts, rules }) @@ -216,12 +280,17 @@ pub mod proto { fn try_rule( meta: &Arc, + overrides: ClientPolicyOverrides, proto: outbound::http_route::Rule, ) -> Result { + #[allow(deprecated)] let outbound::http_route::Rule { matches, backends, filters, + timeouts, + retry, + allow_l5d_request_headers, request_timeout, } = proto; @@ -239,9 +308,10 @@ pub mod proto { .ok_or(InvalidHttpRoute::Missing("distribution"))? .try_into()?; - let request_timeout = request_timeout - .map(std::time::Duration::try_from) - .transpose()?; + let mut params = + RouteParams::try_from_proto(timeouts, retry, allow_l5d_request_headers, overrides)?; + let legacy = request_timeout.map(TryInto::try_into).transpose()?; + params.timeouts.request = params.timeouts.request.or(legacy); Ok(Rule { matches, @@ -249,12 +319,93 @@ pub mod proto { meta: meta.clone(), filters, distribution, - failure_policy: StatusRanges::default(), - request_timeout, + params, }, }) } + impl RouteParams { + fn try_from_proto( + timeouts: Option, + retry: Option, + allow_l5d_request_headers: bool, + overrides: ClientPolicyOverrides, + ) -> Result { + Ok(Self { + retry: retry.map(Retry::try_from).transpose()?, + timeouts: timeouts + .map(Timeouts::try_from) + .transpose()? + .unwrap_or_default(), + allow_l5d_request_headers, + export_hostname_labels: overrides.export_hostname_labels, + }) + } + } + + impl TryFrom for Timeouts { + type Error = InvalidTimeouts; + fn try_from( + timeouts: linkerd2_proxy_api::http_route::Timeouts, + ) -> Result { + Ok(Self { + response: timeouts + .response + .map(time::Duration::try_from) + .transpose() + .map_err(InvalidTimeouts::Response)?, + idle: timeouts + .idle + .map(time::Duration::try_from) + .transpose() + .map_err(InvalidTimeouts::Response)?, + request: timeouts + .request + .map(time::Duration::try_from) + .transpose() + .map_err(InvalidTimeouts::Request)?, + }) + } + } + + impl TryFrom for Retry { + type Error = InvalidRetry; + fn try_from(retry: outbound::http_route::Retry) -> Result { + fn range( + r: outbound::http_route::retry::conditions::StatusRange, + ) -> Result, InvalidRetry> { + let Ok(start) = u16::try_from(r.start) else { + return Err(InvalidRetry::Condition); + }; + let Ok(end) = u16::try_from(r.end) else { + return Err(InvalidRetry::Condition); + }; + if start == 0 || end == 0 || end > 599 || start > end { + return Err(InvalidRetry::Condition); + } + Ok(start..=end) + } + + let status_ranges = StatusRanges( + retry + .conditions + .ok_or(InvalidRetry::Condition)? + .status_ranges + .into_iter() + .map(range) + .collect::>()?, + ); + Ok(Self { + status_ranges, + max_retries: u16::try_from(retry.max_retries) + .map_err(|_| InvalidRetry::MaxRetries(retry.max_retries))?, + max_request_bytes: retry.max_request_bytes as _, + backoff: retry.backoff.map(crate::proto::try_backoff).transpose()?, + timeout: retry.timeout.map(time::Duration::try_from).transpose()?, + }) + } + } + impl TryFrom for RouteDistribution { type Error = InvalidDistribution; fn try_from(distribution: http_route::Distribution) -> Result { @@ -301,13 +452,11 @@ pub mod proto { type Error = InvalidBackend; fn try_from( http_route::RouteBackend { - backend, - filters, - request_timeout, + backend, filters, .. }: http_route::RouteBackend, ) -> Result { let backend = backend.ok_or(InvalidBackend::Missing("backend"))?; - RouteBackend::try_from_proto(backend, filters, request_timeout) + RouteBackend::try_from_proto(backend, filters) } } diff --git a/linkerd/proxy/client-policy/src/lib.rs b/linkerd/proxy/client-policy/src/lib.rs index ccbfa27be1..266a628e89 100644 --- a/linkerd/proxy/client-policy/src/lib.rs +++ b/linkerd/proxy/client-policy/src/lib.rs @@ -7,6 +7,7 @@ use std::{borrow::Cow, fmt, hash::Hash, net::SocketAddr, num::NonZeroU16, sync:: pub mod grpc; pub mod http; pub mod opaq; +pub mod tls; pub use linkerd_http_route as route; pub use linkerd_proxy_api_resolve::Metadata as EndpointMetadata; @@ -18,6 +19,11 @@ pub struct ClientPolicy { pub backends: Arc<[Backend]>, } +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct ClientPolicyOverrides { + pub export_hostname_labels: bool, +} + // TODO additional server configs (e.g. concurrency limits, window sizes, etc) #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub enum Protocol { @@ -34,8 +40,7 @@ pub enum Protocol { Opaque(opaq::Opaque), - // TODO(ver) TLS-aware type - Tls(opaq::Opaque), + Tls(tls::Tls), } #[derive(Clone, Debug, Eq)] @@ -54,25 +59,12 @@ pub enum Meta { } #[derive(Clone, Debug, Eq, Hash, PartialEq)] -pub struct RoutePolicy { +pub struct RoutePolicy { pub meta: Arc, pub filters: Arc<[T]>, pub distribution: RouteDistribution, - /// Request timeout applied to HTTP and gRPC routes. - /// - /// Opaque routes are proxied as opaque TCP, and therefore, we have no - /// concept of a "request", so this field is ignored by opaque routes. - /// It's somewhat unfortunate that this field is part of the `RoutePolicy` - /// struct, which is used to represent routes for all protocols, rather than - /// as a filter, which are a generic type that depends on the protocol in - /// use. However, this can't be easily modeled as a filter using the current - /// design for filters, as filters synchronously modify a request or return - /// an error --- a filter cannot wrap the response future in order to add a - /// timeout. - pub request_timeout: Option, - - /// Configures what responses are classified as failures. - pub failure_policy: F, + + pub params: P, } // TODO(ver) Weighted random WITHOUT availability awareness, as required by @@ -90,7 +82,6 @@ pub enum RouteDistribution { pub struct RouteBackend { pub filters: Arc<[T]>, pub backend: Backend, - pub request_timeout: Option, } // TODO(ver) how does configuration like failure accrual fit in here? What about @@ -167,8 +158,7 @@ impl ClientPolicy { )) .collect(), distribution: RouteDistribution::Empty, - failure_policy: http::StatusRanges::default(), - request_timeout: None, + params: http::RouteParams::default(), }, }], }]) @@ -187,10 +177,19 @@ impl ClientPolicy { routes: HTTP_ROUTES.clone(), failure_accrual: Default::default(), }, + opaque: opaq::Opaque { - // TODO(eliza): eventually, can we configure the opaque - // policy to fail conns? - policy: None, + routes: Some(opaq::Route { + policy: opaq::Policy { + meta: META.clone(), + filters: std::iter::once(opaq::Filter::InternalError( + "invalid client policy configuration", + )) + .collect(), + distribution: RouteDistribution::Empty, + params: (), + }, + }), }, }, backends: BACKENDS.clone(), @@ -218,11 +217,7 @@ impl ClientPolicy { routes: NO_HTTP_ROUTES.clone(), failure_accrual: Default::default(), }, - opaque: opaq::Opaque { - // TODO(eliza): eventually, can we configure the opaque - // policy to fail conns? - policy: None, - }, + opaque: opaq::Opaque { routes: None }, }, backends: NO_BACKENDS.clone(), } @@ -348,6 +343,9 @@ pub mod proto { #[error("invalid opaque route: {0}")] OpaqueRoute(#[from] opaq::proto::InvalidOpaqueRoute), + #[error("invalid TLS route: {0}")] + TlsRoute(#[from] tls::proto::InvalidTlsRoute), + #[error("invalid backend: {0}")] Backend(#[from] InvalidBackend), @@ -422,21 +420,26 @@ pub mod proto { #[derive(Debug, thiserror::Error)] pub enum InvalidFailureAccrual { #[error("invalid backoff: {0}")] - Backoff(#[from] linkerd_exp_backoff::InvalidBackoff), - #[error("invalid {field} duration: {error}")] - Duration { - field: &'static str, - #[source] - error: prost_types::DurationError, - }, + Backoff(#[from] InvalidBackoff), #[error("missing {0}")] Missing(&'static str), } - impl TryFrom for ClientPolicy { - type Error = InvalidPolicy; + #[derive(Debug, thiserror::Error)] + pub enum InvalidBackoff { + #[error(transparent)] + Backoff(#[from] linkerd_exp_backoff::InvalidBackoff), + #[error("invalid duration: {0}")] + Duration(#[from] prost_types::DurationError), + #[error("missing {0}")] + Missing(&'static str), + } - fn try_from(policy: outbound::OutboundPolicy) -> Result { + impl ClientPolicy { + pub fn try_from( + overrides: ClientPolicyOverrides, + policy: outbound::OutboundPolicy, + ) -> Result { use outbound::proxy_protocol; let parent = policy @@ -462,16 +465,18 @@ pub mod proto { "Detect missing protocol detection timeout", ))? .try_into()?; - let http1: http::Http1 = http1 - .ok_or(InvalidPolicy::Protocol( + let http1 = http::Http1::try_from( + overrides, + http1.ok_or(InvalidPolicy::Protocol( "Detect missing HTTP/1 configuration", - ))? - .try_into()?; - let http2: http::Http2 = http2 - .ok_or(InvalidPolicy::Protocol( + ))?, + )?; + let http2 = http::Http2::try_from( + overrides, + http2.ok_or(InvalidPolicy::Protocol( "Detect missing HTTP/2 configuration", - ))? - .try_into()?; + ))?, + )?; let opaque: opaq::Opaque = opaque .ok_or(InvalidPolicy::Protocol( "Detect missing opaque configuration", @@ -486,10 +491,17 @@ pub mod proto { } } - proxy_protocol::Kind::Http1(http) => Protocol::Http1(http.try_into()?), - proxy_protocol::Kind::Http2(http) => Protocol::Http2(http.try_into()?), + proxy_protocol::Kind::Http1(http) => { + Protocol::Http1(http::Http1::try_from(overrides, http)?) + } + proxy_protocol::Kind::Http2(http) => { + Protocol::Http2(http::Http2::try_from(overrides, http)?) + } proxy_protocol::Kind::Opaque(opaque) => Protocol::Opaque(opaque.try_into()?), - proxy_protocol::Kind::Grpc(grpc) => Protocol::Grpc(grpc.try_into()?), + proxy_protocol::Kind::Grpc(grpc) => { + Protocol::Grpc(grpc::Grpc::try_from(overrides, grpc)?) + } + proxy_protocol::Kind::Tls(tls) => Protocol::Tls(tls.try_into()?), }; let mut backends = BackendSet::default(); @@ -502,13 +514,16 @@ pub mod proto { } => { http::proto::fill_route_backends(&http1.routes, &mut backends); http::proto::fill_route_backends(&http2.routes, &mut backends); - opaque.fill_backends(&mut backends); + opaq::proto::fill_route_backends(opaque.routes.as_ref(), &mut backends); } Protocol::Http1(http::Http1 { ref routes, .. }) | Protocol::Http2(http::Http2 { ref routes, .. }) => { http::proto::fill_route_backends(routes, &mut backends); } - Protocol::Opaque(ref p) | Protocol::Tls(ref p) => { + Protocol::Opaque(ref p) => { + opaq::proto::fill_route_backends(p.routes.as_ref(), &mut backends); + } + Protocol::Tls(ref p) => { p.fill_backends(&mut backends); } Protocol::Grpc(ref p) => { @@ -598,7 +613,6 @@ pub mod proto { pub(crate) fn try_from_proto( backend: outbound::Backend, filters: impl IntoIterator, - request_timeout: Option, ) -> Result where T: TryFrom, @@ -610,20 +624,8 @@ pub mod proto { .map(T::try_from) .collect::, _>>() .map_err(|error| InvalidBackend::Filter(error.into()))?; - let request_timeout = - request_timeout - .map(|d| d.try_into()) - .transpose() - .map_err(|error| InvalidBackend::Duration { - field: "backend request timeout", - error, - })?; - - Ok(RouteBackend { - filters, - backend, - request_timeout, - }) + + Ok(RouteBackend { filters, backend }) } } @@ -723,34 +725,12 @@ pub mod proto { failure_accrual::Kind::ConsecutiveFailures(ConsecutiveFailures { max_failures, backoff, - }) => { - // TODO(eliza): if other failure accrual kinds are added - // that also use exponential backoffs, this could be factored out... - let outbound::ExponentialBackoff { - min_backoff, - max_backoff, - jitter_ratio, - } = backoff.ok_or(InvalidFailureAccrual::Missing( - "consecutive failures backoff", - ))?; - - let duration = |dur: Option, field: &'static str| { - dur.ok_or(InvalidFailureAccrual::Missing(field))? - .try_into() - .map_err(|error| InvalidFailureAccrual::Duration { field, error }) - }; - let min = duration(min_backoff, "min_backoff")?; - let max = duration(max_backoff, "max_backoff")?; - let backoff = linkerd_exp_backoff::ExponentialBackoff::try_new( - min, - max, - jitter_ratio as f64, - )?; - Ok(FailureAccrual::ConsecutiveFailures { - max_failures: max_failures as usize, - backoff, - }) - } + }) => Ok(FailureAccrual::ConsecutiveFailures { + max_failures: max_failures as usize, + backoff: backoff.map(try_backoff).transpose()?.ok_or( + InvalidFailureAccrual::Missing("consecutive failures backoff"), + )?, + }), } } } @@ -763,4 +743,23 @@ pub mod proto { .unwrap_or(Ok(FailureAccrual::None)) } } + + pub(crate) fn try_backoff( + outbound::ExponentialBackoff { + min_backoff, + max_backoff, + jitter_ratio, + }: outbound::ExponentialBackoff, + ) -> Result { + let min = min_backoff + .map(time::Duration::try_from) + .transpose()? + .ok_or(InvalidBackoff::Missing("min_backoff"))?; + let max = max_backoff + .map(time::Duration::try_from) + .transpose()? + .ok_or(InvalidBackoff::Missing("max_backoff"))?; + linkerd_exp_backoff::ExponentialBackoff::try_new(min, max, jitter_ratio as f64) + .map_err(Into::into) + } } diff --git a/linkerd/proxy/client-policy/src/opaq.rs b/linkerd/proxy/client-policy/src/opaq.rs index 863067eecd..ad90d9d96b 100644 --- a/linkerd/proxy/client-policy/src/opaq.rs +++ b/linkerd/proxy/client-policy/src/opaq.rs @@ -1,17 +1,23 @@ -use crate::RoutePolicy; +use linkerd_opaq_route as opaq; -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +pub type Policy = crate::RoutePolicy; +pub type Route = opaq::Route; +pub type Rule = opaq::Rule; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Opaque { - pub policy: Option, + pub routes: Option, } -pub type Policy = RoutePolicy; - #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] pub struct NonIoErrors; #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum Filter {} +pub enum Filter { + Forbidden, + Invalid(std::sync::Arc), + InternalError(&'static str), +} impl NonIoErrors { pub fn contains(&self, e: &(dyn std::error::Error + 'static)) -> bool { @@ -28,12 +34,8 @@ pub(crate) mod proto { Meta, RouteBackend, RouteDistribution, }; use linkerd2_proxy_api::outbound::{self, opaque_route}; - - use once_cell::sync::Lazy; use std::sync::Arc; - pub(crate) static NO_FILTERS: Lazy> = Lazy::new(|| Arc::new([])); - #[derive(Debug, thiserror::Error)] pub enum InvalidOpaqueRoute { #[error("invalid route metadata: {0}")] @@ -42,6 +44,9 @@ pub(crate) mod proto { #[error("invalid distribution: {0}")] Distribution(#[from] InvalidDistribution), + #[error("invalid filter: {0}")] + Filter(#[from] InvalidFilter), + /// Note: this restriction may be removed in the future, if a way of /// actually matching rules for opaque routes is added. #[error("an opaque route must have exactly one rule, but {0} were provided")] @@ -59,76 +64,72 @@ pub(crate) mod proto { Missing(&'static str), } - impl TryFrom for Opaque { - type Error = InvalidOpaqueRoute; - fn try_from(proto: outbound::proxy_protocol::Opaque) -> Result { - if proto.routes.len() != 1 { - return Err(InvalidOpaqueRoute::OnlyOneRoute(proto.routes.len())); - } + #[derive(Debug, thiserror::Error)] + pub enum InvalidFilter { + #[error("invalid route error kind: {0}")] + InvalidRouteErrorKind(i32), - proto - .routes - .into_iter() - .next() - .ok_or(InvalidOpaqueRoute::OnlyOneRoute(0))? - .try_into() + #[error("missing filter kind")] + Missing, + } + + pub(crate) fn fill_route_backends(rts: Option<&Route>, set: &mut BackendSet) { + if let Some(Route { policy, .. }) = rts { + policy.distribution.fill_backends(set); } } - impl TryFrom for Opaque { + impl TryFrom for Opaque { type Error = InvalidOpaqueRoute; - - fn try_from( - outbound::OpaqueRoute { metadata, rules }: outbound::OpaqueRoute, - ) -> Result { - let meta = Arc::new( - metadata - .ok_or(InvalidMeta("missing metadata"))? - .try_into()?, - ); - - // Currently, opaque rules have no match expressions, so if there's - // more than one rule, we have no way of determining which one to - // use. Therefore, require that there's exactly one rule. - if rules.len() != 1 { - return Err(InvalidOpaqueRoute::OnlyOneRule(rules.len())); + fn try_from(proto: outbound::proxy_protocol::Opaque) -> Result { + if proto.routes.len() > 1 { + return Err(InvalidOpaqueRoute::OnlyOneRoute(proto.routes.len())); } + let routes = proto.routes.into_iter().next().map(try_route).transpose()?; - let policy = rules - .into_iter() - .map(|rule| try_rule(&meta, rule)) - .next() - .ok_or(InvalidOpaqueRoute::OnlyOneRule(0))??; - - Ok(Self { - policy: Some(policy), - }) + Ok(Self { routes }) } } - impl Opaque { - pub(crate) fn fill_backends(&self, set: &mut BackendSet) { - for p in &self.policy { - p.distribution.fill_backends(set); - } + fn try_route( + outbound::OpaqueRoute { metadata, rules }: outbound::OpaqueRoute, + ) -> Result { + let meta = Arc::new( + metadata + .ok_or(InvalidMeta("missing metadata"))? + .try_into()?, + ); + + // Currently, opaque rules have no match expressions, so if there's + // more than one rule, we have no way of determining which one to + // use. Therefore, require that there's exactly one rule. + if rules.len() != 1 { + return Err(InvalidOpaqueRoute::OnlyOneRule(rules.len())); } + + let rule = rules.first().cloned().expect("already checked"); + let policy = try_rule(&meta, rule)?; + Ok(Route { policy }) } fn try_rule( meta: &Arc, - opaque_route::Rule { backends }: opaque_route::Rule, + opaque_route::Rule { backends, filters }: opaque_route::Rule, ) -> Result { let distribution = backends .ok_or(InvalidOpaqueRoute::Missing("distribution"))? .try_into()?; + let filters = filters + .into_iter() + .map(Filter::try_from) + .collect::, _>>()?; + Ok(Policy { meta: meta.clone(), - filters: NO_FILTERS.clone(), - failure_policy: NonIoErrors, + filters, + params: (), distribution, - // Request timeouts are ignored on opaque routes. - request_timeout: None, }) } @@ -177,19 +178,24 @@ pub(crate) mod proto { impl TryFrom for RouteBackend { type Error = InvalidBackend; fn try_from( - opaque_route::RouteBackend { backend }: opaque_route::RouteBackend, - ) -> Result { + opaque_route::RouteBackend { backend, filters }: opaque_route::RouteBackend, + ) -> Result, InvalidBackend> { let backend = backend.ok_or(InvalidBackend::Missing("backend"))?; - RouteBackend::try_from_proto(backend, std::iter::empty::<()>(), None) + RouteBackend::try_from_proto(backend, filters) } } - // Necessary to satisfy `RouteBackend::try_from_proto` type constraints. - // TODO(eliza): if filters are added to opaque routes, change this to a - // proper `TryFrom` impl... - impl From<()> for Filter { - fn from(_: ()) -> Self { - unreachable!("no filters can be configured on opaque routes yet") + impl TryFrom for Filter { + type Error = InvalidFilter; + + fn try_from(filter: opaque_route::Filter) -> Result { + use linkerd2_proxy_api::opaque_route::Invalid; + use opaque_route::filter::Kind; + + match filter.kind.ok_or(InvalidFilter::Missing)? { + Kind::Invalid(Invalid { message }) => Ok(Filter::Invalid(message.into())), + Kind::Forbidden(_) => Ok(Filter::Forbidden), + } } } } diff --git a/linkerd/proxy/client-policy/src/tls.rs b/linkerd/proxy/client-policy/src/tls.rs new file mode 100644 index 0000000000..4f20f4cfe1 --- /dev/null +++ b/linkerd/proxy/client-policy/src/tls.rs @@ -0,0 +1,231 @@ +use linkerd_tls_route as tls; +pub use linkerd_tls_route::{find, sni, RouteMatch}; +use std::sync::Arc; + +pub type Policy = crate::RoutePolicy; +pub type Route = tls::Route; + +#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] +pub struct RouteParams { + pub export_hostname_labels: bool, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Tls { + pub routes: Arc<[Route]>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum Filter { + Forbidden, + Invalid(Arc), + InternalError(&'static str), +} + +pub fn default(distribution: crate::RouteDistribution) -> Route { + Route { + snis: vec![], + policy: Policy { + meta: crate::Meta::new_default("default"), + filters: Arc::new([]), + params: Default::default(), + distribution, + }, + } +} + +impl Default for Tls { + fn default() -> Self { + Self { + routes: Arc::new([]), + } + } +} + +#[cfg(feature = "proto")] +pub(crate) mod proto { + use super::*; + use crate::{ + proto::{BackendSet, InvalidBackend, InvalidDistribution, InvalidMeta}, + Meta, RouteBackend, RouteDistribution, + }; + use linkerd2_proxy_api::outbound::{self, tls_route}; + use linkerd_tls_route::sni::proto::InvalidSniMatch; + use std::sync::Arc; + + #[derive(Debug, thiserror::Error)] + pub enum InvalidTlsRoute { + #[error("invalid sni match: {0}")] + SniMatch(#[from] InvalidSniMatch), + + #[error("invalid route metadata: {0}")] + Meta(#[from] InvalidMeta), + + #[error("invalid distribution: {0}")] + Distribution(#[from] InvalidDistribution), + + #[error("invalid filter: {0}")] + Filter(#[from] InvalidFilter), + + /// Note: this restriction may be removed in the future, if a way of + /// actually matching rules for TLS routes is added. + #[error("a TLS route must have exactly one rule, but {0} were provided")] + OnlyOneRule(usize), + + #[error("no filters can be configured on opaque routes yet")] + NoFilters, + + #[error("missing {0}")] + Missing(&'static str), + } + + #[derive(Debug, thiserror::Error)] + pub enum InvalidFilter { + #[error("invalid route error kind: {0}")] + InvalidRouteErrorKind(i32), + + #[error("missing filter kind")] + Missing, + } + + impl TryFrom for Tls { + type Error = InvalidTlsRoute; + fn try_from(proto: outbound::proxy_protocol::Tls) -> Result { + let routes = proto + .routes + .into_iter() + .map(try_route) + .collect::, _>>()?; + + Ok(Self { routes }) + } + } + + impl Tls { + pub fn fill_backends(&self, set: &mut BackendSet) { + for Route { ref policy, .. } in &*self.routes { + policy.distribution.fill_backends(set); + } + } + } + + fn try_route(proto: outbound::TlsRoute) -> Result { + let outbound::TlsRoute { + rules, + snis, + metadata, + } = proto; + let meta = Arc::new( + metadata + .ok_or(InvalidMeta("missing metadata"))? + .try_into()?, + ); + + let snis = snis + .into_iter() + .map(sni::MatchSni::try_from) + .collect::, _>>()?; + + if rules.len() != 1 { + // Currently, TLS rules have no match expressions, so if there's + // more than one rule, we have no way of determining which one to + // use. Therefore, require that there's exactly one rule. + return Err(InvalidTlsRoute::OnlyOneRule(rules.len())); + } + + let policy = rules + .into_iter() + .map(|rule| try_rule(&meta, rule)) + .next() + .ok_or(InvalidTlsRoute::OnlyOneRule(0))??; + + Ok(Route { snis, policy }) + } + + fn try_rule( + meta: &Arc, + tls_route::Rule { backends, filters }: tls_route::Rule, + ) -> Result { + let distribution = backends + .ok_or(InvalidTlsRoute::Missing("distribution"))? + .try_into()?; + + let filters = filters + .into_iter() + .map(Filter::try_from) + .collect::, _>>()?; + + Ok(Policy { + meta: meta.clone(), + filters, + params: Default::default(), + distribution, + }) + } + + impl TryFrom for RouteDistribution { + type Error = InvalidDistribution; + fn try_from(distribution: tls_route::Distribution) -> Result { + use tls_route::{distribution, WeightedRouteBackend}; + + Ok( + match distribution.kind.ok_or(InvalidDistribution::Missing)? { + distribution::Kind::Empty(_) => RouteDistribution::Empty, + distribution::Kind::RandomAvailable(distribution::RandomAvailable { + backends, + }) => { + let backends = backends + .into_iter() + .map(|WeightedRouteBackend { weight, backend }| { + let backend = backend + .ok_or(InvalidDistribution::MissingBackend)? + .try_into()?; + Ok((backend, weight)) + }) + .collect::, InvalidDistribution>>()?; + if backends.is_empty() { + return Err(InvalidDistribution::Empty("RandomAvailable")); + } + RouteDistribution::RandomAvailable(backends) + } + distribution::Kind::FirstAvailable(distribution::FirstAvailable { + backends, + }) => { + let backends = backends + .into_iter() + .map(RouteBackend::try_from) + .collect::, InvalidBackend>>()?; + if backends.is_empty() { + return Err(InvalidDistribution::Empty("FirstAvailable")); + } + RouteDistribution::FirstAvailable(backends) + } + }, + ) + } + } + + impl TryFrom for RouteBackend { + type Error = InvalidBackend; + fn try_from( + tls_route::RouteBackend { backend, filters }: tls_route::RouteBackend, + ) -> Result, InvalidBackend> { + let backend = backend.ok_or(InvalidBackend::Missing("backend"))?; + RouteBackend::try_from_proto(backend, filters) + } + } + + impl TryFrom for Filter { + type Error = InvalidFilter; + + fn try_from(filter: tls_route::Filter) -> Result { + use linkerd2_proxy_api::opaque_route::Invalid; + use tls_route::filter::Kind; + + match filter.kind.ok_or(InvalidFilter::Missing)? { + Kind::Invalid(Invalid { message }) => Ok(Filter::Invalid(message.into())), + Kind::Forbidden(_) => Ok(Filter::Forbidden), + } + } + } +} diff --git a/linkerd/proxy/core/Cargo.toml b/linkerd/proxy/core/Cargo.toml index 8dd04d1551..176810c42c 100644 --- a/linkerd/proxy/core/Cargo.toml +++ b/linkerd/proxy/core/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-proxy-core" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Core interfaces needed to implement proxy components """ @@ -14,6 +14,6 @@ futures = { version = "0.3", default-features = false } linkerd-error = { path = "../../error" } [dependencies.tower] -version = "0.4" +workspace = true default-features = false features = ["util"] diff --git a/linkerd/proxy/dns-resolve/Cargo.toml b/linkerd/proxy/dns-resolve/Cargo.toml index 180361ef2d..89a6743789 100644 --- a/linkerd/proxy/dns-resolve/Cargo.toml +++ b/linkerd/proxy/dns-resolve/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-proxy-dns-resolve" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Service Dns Resolutions for the proxy """ @@ -18,5 +18,5 @@ linkerd-proxy-core = { path = "../core" } linkerd-stack = { path = "../../stack" } tokio = { version = "1", features = ["sync"] } tokio-stream = { version = "0.1", features = ["sync"] } -tower = "0.4" +tower = { workspace = true } tracing = "0.1" diff --git a/linkerd/proxy/dns-resolve/src/lib.rs b/linkerd/proxy/dns-resolve/src/lib.rs index c69bec4376..f8de9c6098 100644 --- a/linkerd/proxy/dns-resolve/src/lib.rs +++ b/linkerd/proxy/dns-resolve/src/lib.rs @@ -80,7 +80,7 @@ async fn resolution(dns: dns::Resolver, na: NameAddr) -> Result Result { debug!(%error); @@ -107,3 +107,26 @@ async fn resolution(dns: dns::Resolver, na: NameAddr) -> Result= minimum { + valid_until + } else { + debug!(ttl.min = ?MINIMUM_TTL, "Given TTL too short, using a minimum TTL"); + minimum + }; + + sleep_until(deadline).await; +} diff --git a/linkerd/proxy/http/Cargo.toml b/linkerd/proxy/http/Cargo.toml index ea24295365..cb04df260e 100644 --- a/linkerd/proxy/http/Cargo.toml +++ b/linkerd/proxy/http/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-proxy-http" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ HTTP-specific implementations that rely on other proxy infrastructure @@ -13,44 +13,61 @@ This should probably be decomposed into smaller, decoupled crates. [dependencies] async-trait = "0.1" -bytes = "1" -drain = "0.1" +bytes = { workspace = true } +drain = { workspace = true } futures = { version = "0.3", default-features = false } -h2 = "0.3" -http = "0.2" -http-body = "0.4" +h2 = { workspace = true } +http = { workspace = true } +http-body = { workspace = true } httparse = "1" -hyper = { version = "0.14", features = [ +hyper = { workspace = true, features = [ "client", "http1", "http2", "server", - "stream", - "runtime", ] } hyper-balance = { path = "../../../hyper-balance" } +hyper-util = { workspace = true, default-features = false, features = [ + "client", + "client-legacy", + "http1", + "service", + "tokio", + "tracing", +] } +parking_lot = "0.12" pin-project = "1" -rand = "0.8" -thiserror = "1" +rand = "0.9" +thiserror = "2" tokio = { version = "1", features = ["rt", "sync", "time"] } -tower = { version = "0.4", default-features = false } +tower = { workspace = true, default-features = false } tracing = "0.1" try-lock = "0.2" -linkerd-detect = { path = "../../detect" } linkerd-duplex = { path = "../../duplex" } linkerd-error = { path = "../../error" } linkerd-http-box = { path = "../../http/box" } -linkerd-http-h2 = { path = "../../http/h2" } linkerd-http-classify = { path = "../../http/classify" } +linkerd-http-detect = { path = "../../http/detect" } +linkerd-http-h2 = { path = "../../http/h2" } +linkerd-http-insert = { path = "../../http/insert" } +linkerd-http-override-authority = { path = "../../http/override-authority" } +linkerd-http-retain = { path = "../../http/retain" } +linkerd-http-stream-timeouts = { path = "../../http/stream-timeouts" } +linkerd-http-upgrade = { path = "../../http/upgrade" } +linkerd-http-variant = { path = "../../http/variant" } linkerd-io = { path = "../../io" } linkerd-proxy-balance = { path = "../balance" } linkerd-stack = { path = "../../stack" } -[target.'cfg(fuzzing)'.dependencies] -tokio-test = "0.4" - [dev-dependencies] +http-body-util = { workspace = true, features = ["channel"] } tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } linkerd-tracing = { path = "../../tracing", features = ["ansi"] } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } + +[target.'cfg(fuzzing)'.dependencies] +tokio-test = "0.4" diff --git a/linkerd/proxy/http/fuzz/Cargo.toml b/linkerd/proxy/http/fuzz/Cargo.toml index d7970588e8..63c0fd6fe4 100644 --- a/linkerd/proxy/http/fuzz/Cargo.toml +++ b/linkerd/proxy/http/fuzz/Cargo.toml @@ -1,10 +1,10 @@ - [package] name = "linkerd-proxy-http-fuzz" -version = "0.0.0" -authors = ["Linkerd Developers "] -publish = false -edition = "2021" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [package.metadata] cargo-fuzz = true diff --git a/linkerd/proxy/http/src/classify.rs b/linkerd/proxy/http/src/classify.rs deleted file mode 100644 index 487e047981..0000000000 --- a/linkerd/proxy/http/src/classify.rs +++ /dev/null @@ -1,10 +0,0 @@ -pub mod channel; -pub mod gate; -mod insert; - -pub use self::{ - channel::{BroadcastClassification, NewBroadcastClassification, Tx}, - gate::{NewClassifyGate, NewClassifyGateSet}, - insert::{InsertClassifyResponse, NewInsertClassifyResponse}, -}; -pub use linkerd_http_classify::*; diff --git a/linkerd/proxy/http/src/client.rs b/linkerd/proxy/http/src/client.rs index 1770f56920..88f413c6a4 100644 --- a/linkerd/proxy/http/src/client.rs +++ b/linkerd/proxy/http/src/client.rs @@ -59,11 +59,11 @@ impl tower::Service for MakeClient where T: Clone + Send + Sync + 'static, X: ExtractParam, - C: MakeConnection<(crate::Version, T)> + Clone + Unpin + Send + Sync + 'static, + C: MakeConnection<(crate::Variant, T)> + Clone + Unpin + Send + Sync + 'static, C::Connection: Unpin + Send, C::Metadata: Send, C::Future: Unpin + Send + 'static, - B: hyper::body::HttpBody + Send + 'static, + B: crate::Body + Send + Unpin + 'static, B::Data: Send, B::Error: Into + Send + Sync, { @@ -119,11 +119,11 @@ type RspFuture = Pin>> + impl Service> for Client where T: Clone + Send + Sync + 'static, - C: MakeConnection<(crate::Version, T)> + Clone + Send + Sync + 'static, + C: MakeConnection<(crate::Variant, T)> + Clone + Send + Sync + 'static, C::Connection: Unpin + Send, C::Future: Unpin + Send + 'static, C::Error: Into, - B: hyper::body::HttpBody + Send + 'static, + B: crate::Body + Send + Unpin + 'static, B::Data: Send, B::Error: Into + Send + Sync, { diff --git a/linkerd/proxy/http/src/detect.rs b/linkerd/proxy/http/src/detect.rs deleted file mode 100644 index 1eb27d1c4f..0000000000 --- a/linkerd/proxy/http/src/detect.rs +++ /dev/null @@ -1,171 +0,0 @@ -use crate::Version; -use bytes::BytesMut; -use linkerd_detect::Detect; -use linkerd_error::Error; -use linkerd_io::{self as io, AsyncReadExt}; -use tracing::{debug, trace}; - -// Coincidentally, both our abbreviated H2 preface and our smallest possible -// HTTP/1 message are 14 bytes. -const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0"; -const SMALLEST_POSSIBLE_HTTP1_REQ: &str = "GET / HTTP/1.1"; - -/// Attempts to detect the HTTP version of a stream. -/// -/// This module biases towards availability instead of correctness. I.e. instead -/// of buffering until we can be sure that we're dealing with an HTTP stream, we -/// instead perform only a single read and use that data to inform protocol -/// hinting. If a single read doesn't provide enough data to make a decision, we -/// treat the protocol as unknown. -/// -/// This allows us to interoperate with protocols that send very small initial -/// messages. In rare situations, we may fail to properly detect that a stream is -/// HTTP. -#[derive(Clone, Debug, Default)] -pub struct DetectHttp(()); - -#[async_trait::async_trait] -impl Detect for DetectHttp { - type Protocol = Version; - - async fn detect(&self, io: &mut I, buf: &mut BytesMut) -> Result, Error> { - trace!(capacity = buf.capacity(), "Reading"); - let sz = io.read_buf(buf).await?; - trace!(sz, "Read"); - if sz == 0 { - // No data was read because the socket closed or the - // buffer capacity was exhausted. - debug!(read = buf.len(), "Could not detect protocol"); - return Ok(None); - } - - // HTTP/2 checking is faster because it's a simple string match. If we - // have enough data, check it first. We don't bother matching on the - // entire H2 preface because the first part is enough to get a clear - // signal. - if buf.len() >= H2_PREFACE.len() { - trace!("Checking H2 preface"); - if &buf[..H2_PREFACE.len()] == H2_PREFACE { - trace!("Matched HTTP/2 prefix"); - return Ok(Some(Version::H2)); - } - } - - // Otherwise, we try to parse the data as an HTTP/1 message. - if buf.len() >= SMALLEST_POSSIBLE_HTTP1_REQ.len() { - trace!("Parsing HTTP/1 message"); - if let Ok(_) | Err(httparse::Error::TooManyHeaders) = - httparse::Request::new(&mut [httparse::EMPTY_HEADER; 0]).parse(&buf[..]) - { - trace!("Matched HTTP/1"); - return Ok(Some(Version::Http1)); - } - } - - trace!("Not HTTP"); - Ok(None) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tokio_test::io; - - const HTTP11_LINE: &[u8] = b"GET / HTTP/1.1\r\n"; - const H2_AND_GARBAGE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\ngarbage"; - const GARBAGE: &[u8] = - b"garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage garbage"; - - #[tokio::test] - async fn h2() { - let _trace = linkerd_tracing::test::trace_init(); - - for read in &[H2_PREFACE, H2_AND_GARBAGE] { - debug!(read = ?std::str::from_utf8(read).unwrap()); - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(read).build(); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, Some(Version::H2)); - } - } - - #[tokio::test] - async fn http1() { - let _trace = linkerd_tracing::test::trace_init(); - - for i in 1..SMALLEST_POSSIBLE_HTTP1_REQ.len() { - debug!(read = ?std::str::from_utf8(&HTTP11_LINE[..i]).unwrap()); - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(&HTTP11_LINE[..i]).build(); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, None); - } - - debug!(read = ?std::str::from_utf8(HTTP11_LINE).unwrap()); - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(HTTP11_LINE).build(); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, Some(Version::Http1)); - - const REQ: &[u8] = b"GET /foo/bar/bar/blah HTTP/1.1\r\nHost: foob.example.com\r\n\r\n"; - for i in SMALLEST_POSSIBLE_HTTP1_REQ.len()..REQ.len() { - debug!(read = ?std::str::from_utf8(&REQ[..i]).unwrap()); - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(&REQ[..i]).build(); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, Some(Version::Http1)); - assert_eq!(buf[..], REQ[..i]); - } - - // Starts with a P, like the h2 preface. - const POST: &[u8] = b"POST /foo HTTP/1.1\r\n"; - for i in SMALLEST_POSSIBLE_HTTP1_REQ.len()..POST.len() { - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(&POST[..i]).build(); - debug!(read = ?std::str::from_utf8(&POST[..i]).unwrap()); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, Some(Version::Http1)); - assert_eq!(buf[..], POST[..i]); - } - } - - #[tokio::test(flavor = "current_thread")] - async fn unknown() { - let _trace = linkerd_tracing::test::trace_init(); - - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(b"foo.bar.blah\r\nbobo").build(); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, None); - assert_eq!(&buf[..], b"foo.bar.blah\r\nbobo"); - - let mut buf = BytesMut::with_capacity(1024); - let mut io = io::Builder::new().read(GARBAGE).build(); - let kind = DetectHttp(()).detect(&mut io, &mut buf).await.unwrap(); - assert_eq!(kind, None); - assert_eq!(&buf[..], GARBAGE); - } -} - -#[cfg(fuzzing)] -pub mod fuzz_logic { - use super::*; - - pub async fn fuzz_entry(input: &[u8]) { - use tokio::io::AsyncWriteExt; - - let (mut client, mut server) = tokio::io::duplex(input.len()); - - let mut buf = bytes::Bytes::copy_from_slice(input); - let write = tokio::spawn(async move { client.write_buf(&mut buf).await }); - - let mut buf = BytesMut::with_capacity(1024); - let _kind = DetectHttp(()).detect(&mut server, &mut buf).await.unwrap(); - - write - .await - .expect("Spawn must succeed") - .expect("Write must succeed"); - } -} diff --git a/linkerd/proxy/http/src/executor.rs b/linkerd/proxy/http/src/executor.rs deleted file mode 100644 index b97fc4e65c..0000000000 --- a/linkerd/proxy/http/src/executor.rs +++ /dev/null @@ -1,16 +0,0 @@ -use std::future::Future; -use tracing::instrument::Instrument; - -#[derive(Clone, Debug, Default)] -pub struct TracingExecutor; - -impl hyper::rt::Executor for TracingExecutor -where - F: Future + Send + 'static, - F::Output: Send + 'static, -{ - #[inline] - fn execute(&self, f: F) { - tokio::spawn(f.in_current_span()); - } -} diff --git a/linkerd/proxy/http/src/h1.rs b/linkerd/proxy/http/src/h1.rs index 3401458426..9f51ac9f17 100644 --- a/linkerd/proxy/http/src/h1.rs +++ b/linkerd/proxy/http/src/h1.rs @@ -1,7 +1,4 @@ -use crate::{ - glue::HyperConnect, - upgrade::{Http11Upgrade, HttpConnect}, -}; +use crate::TokioExecutor; use futures::prelude::*; use http::{ header::{CONTENT_LENGTH, TRANSFER_ENCODING}, @@ -9,6 +6,7 @@ use http::{ }; use linkerd_error::{Error, Result}; use linkerd_http_box::BoxBody; +use linkerd_http_upgrade::{glue::HyperConnect, upgrade::Http11Upgrade}; use linkerd_stack::MakeConnection; use std::{pin::Pin, time::Duration}; use tracing::{debug, trace}; @@ -32,8 +30,8 @@ pub struct PoolSettings { pub struct Client { connect: C, target: T, - absolute_form: Option, B>>, - origin_form: Option, B>>, + absolute_form: Option, B>>, + origin_form: Option, B>>, pool: PoolSettings, } @@ -66,10 +64,10 @@ type RspFuture = Pin>> + impl Client where T: Clone + Send + Sync + 'static, - C: MakeConnection<(crate::Version, T)> + Clone + Send + Sync + 'static, + C: MakeConnection<(crate::Variant, T)> + Clone + Send + Sync + 'static, C::Connection: Unpin + Send, C::Future: Unpin + Send + 'static, - B: hyper::body::HttpBody + Send + 'static, + B: crate::Body + Send + Unpin + 'static, B::Data: Send, B::Error: Into + Send + Sync, { @@ -93,7 +91,7 @@ where // ish, so we just build a one-off client for the connection. // There's no real reason to hold the client for re-use. debug!(use_absolute_form, is_missing_host, "Using one-off client"); - hyper::Client::builder() + hyper_util::client::legacy::Client::builder(TokioExecutor::new()) .pool_max_idle_per_host(0) .set_host(use_absolute_form) .build(HyperConnect::new( @@ -118,7 +116,7 @@ where if client.is_none() { debug!(use_absolute_form, "Caching new client"); *client = Some( - hyper::Client::builder() + hyper_util::client::legacy::Client::builder(TokioExecutor::new()) .pool_max_idle_per_host(self.pool.max_idle) .pool_idle_timeout(self.pool.idle_timeout) .set_host(use_absolute_form) @@ -133,70 +131,64 @@ where client.as_ref().unwrap().request(req) }; - Box::pin(rsp_fut.err_into().map_ok(move |mut rsp| { + Box::pin(async move { + let mut rsp = rsp_fut.await?; if is_http_connect { - debug_assert!( - upgrade.is_some(), - "Upgrade extension must be set on CONNECT requests" - ); - rsp.extensions_mut().insert(HttpConnect); - - // Strip headers that may not be transmitted to the server, per - // https://tools.ietf.org/html/rfc7231#section-4.3.6: + // Strip headers that may not be transmitted to the server, per RFC 9110: + // + // > A server MUST NOT send any `Transfer-Encoding` or `Content-Length` header + // > fields in a 2xx (Successful) response to `CONNECT`. A client MUST ignore any + // > `Content-Length` or `Transfer-Encoding` header fields received in a successful + // > response to `CONNECT`. // - // A client MUST ignore any Content-Length or Transfer-Encoding - // header fields received in a successful response to CONNECT. + // see: https://www.rfc-editor.org/rfc/rfc9110#section-9.3.6-12 if rsp.status().is_success() { rsp.headers_mut().remove(CONTENT_LENGTH); rsp.headers_mut().remove(TRANSFER_ENCODING); } } - if is_upgrade(&rsp) { + if is_upgrade(&rsp, is_http_connect) { trace!("Client response is HTTP/1.1 upgrade"); if let Some(upgrade) = upgrade { - upgrade.insert_half(hyper::upgrade::on(&mut rsp)); + upgrade.insert_half(hyper::upgrade::on(&mut rsp))?; } } else { - crate::strip_connection_headers(rsp.headers_mut()); + linkerd_http_upgrade::strip_connection_headers(rsp.headers_mut()); } - rsp.map(BoxBody::new) - })) + Ok(rsp.map(BoxBody::new)) + }) } } /// Checks responses to determine if they are successful HTTP upgrades. -pub(crate) fn is_upgrade(res: &http::Response) -> bool { - #[inline] - fn is_connect_success(res: &http::Response) -> bool { - res.extensions().get::().is_some() && res.status().is_success() - } - - // Upgrades were introduced in HTTP/1.1 - if res.version() != http::Version::HTTP_11 { - if is_connect_success(res) { - tracing::warn!( - "A successful response to a CONNECT request had an incorrect HTTP version \ - (expected HTTP/1.1, got {:?})", - res.version() - ); +fn is_upgrade(rsp: &http::Response, is_http_connect: bool) -> bool { + use http::Version; + + match rsp.version() { + Version::HTTP_11 => match rsp.status() { + // `101 Switching Protocols` indicates an upgrade. + http::StatusCode::SWITCHING_PROTOCOLS => true, + // CONNECT requests are complete if status code is 2xx. + status if is_http_connect && status.is_success() => true, + // Just a regular HTTP response... + _ => false, + }, + version => { + // Upgrades are specific to HTTP/1.1. They are not included in HTTP/1.0, nor are they + // supported in HTTP/2. If this response is associated with any protocol version + // besides HTTP/1.1, it is not applicable to an upgrade. + if is_http_connect && rsp.status().is_success() { + tracing::warn!( + "A successful response to a CONNECT request had an incorrect HTTP version \ + (expected HTTP/1.1, got {:?})", + version + ); + } + false } - return false; - } - - // 101 Switching Protocols - if res.status() == http::StatusCode::SWITCHING_PROTOCOLS { - return true; } - - // CONNECT requests are complete if status code is 2xx. - if is_connect_success(res) { - return true; - } - - // Just a regular HTTP response... - false } /// Returns if the request target is in `absolute-form`. @@ -219,38 +211,3 @@ pub(crate) fn is_absolute_form(uri: &Uri) -> bool { uri.scheme().is_some() } - -/// Returns if the request target is in `origin-form`. -/// -/// This is `origin-form`: `example.com` -fn is_origin_form(uri: &Uri) -> bool { - uri.scheme().is_none() && uri.path_and_query().is_none() -} - -/// Returns if the received request is definitely bad. -/// -/// Just because a request parses doesn't mean it's correct. For examples: -/// -/// - `GET example.com` -/// - `CONNECT /just-a-path -pub(crate) fn is_bad_request(req: &http::Request) -> bool { - if req.method() == http::Method::CONNECT { - // CONNECT is only valid over HTTP/1.1 - if req.version() != http::Version::HTTP_11 { - debug!("CONNECT request not valid for HTTP/1.0: {:?}", req.uri()); - return true; - } - - // CONNECT requests are only valid in authority-form. - if !is_origin_form(req.uri()) { - debug!("CONNECT request with illegal URI: {:?}", req.uri()); - return true; - } - } else if is_origin_form(req.uri()) { - // If not CONNECT, refuse any origin-form URIs - debug!("{} request with illegal URI: {:?}", req.method(), req.uri()); - return true; - } - - false -} diff --git a/linkerd/proxy/http/src/h2.rs b/linkerd/proxy/http/src/h2.rs index 3488558bcc..62c0ed58db 100644 --- a/linkerd/proxy/http/src/h2.rs +++ b/linkerd/proxy/http/src/h2.rs @@ -1,9 +1,5 @@ -use crate::executor::TracingExecutor; +use crate::{Body, TokioExecutor}; use futures::prelude::*; -use hyper::{ - body::HttpBody, - client::conn::{self, SendRequest}, -}; use linkerd_error::{Error, Result}; use linkerd_stack::{MakeConnection, Service}; use std::{ @@ -26,7 +22,7 @@ pub struct Connect { #[derive(Debug)] pub struct Connection { - tx: SendRequest, + tx: hyper::client::conn::http2::SendRequest, } // === impl Connect === @@ -55,11 +51,11 @@ type ConnectFuture = Pin>> + Sen impl Service for Connect where - C: MakeConnection<(crate::Version, T)>, + C: MakeConnection<(crate::Variant, T)>, C::Connection: Send + Unpin + 'static, C::Metadata: Send, C::Future: Send + 'static, - B: HttpBody + Send + 'static, + B: Body + Send + Unpin + 'static, B::Data: Send, B::Error: Into + Send + Sync, { @@ -83,26 +79,26 @@ where let connect = self .connect - .connect((crate::Version::H2, target)) + .connect((crate::Variant::H2, target)) .instrument(trace_span!("connect").or_current()); Box::pin( async move { let (io, _meta) = connect.err_into::().await?; - let mut builder = conn::Builder::new(); - builder.executor(TracingExecutor).http2_only(true); + let mut builder = hyper::client::conn::http2::Builder::new(TokioExecutor::new()); + builder.timer(hyper_util::rt::TokioTimer::new()); match flow_control { None => {} Some(FlowControl::Adaptive) => { - builder.http2_adaptive_window(true); + builder.adaptive_window(true); } Some(FlowControl::Fixed { initial_stream_window_size, initial_connection_window_size, }) => { builder - .http2_initial_stream_window_size(initial_stream_window_size) - .http2_initial_connection_window_size(initial_connection_window_size); + .initial_stream_window_size(initial_stream_window_size) + .initial_connection_window_size(initial_connection_window_size); } } @@ -114,21 +110,21 @@ where }) = keep_alive { builder - .http2_keep_alive_timeout(timeout) - .http2_keep_alive_interval(interval) - .http2_keep_alive_while_idle(while_idle); + .keep_alive_timeout(timeout) + .keep_alive_interval(interval) + .keep_alive_while_idle(while_idle); } - builder.http2_max_frame_size(max_frame_size); + builder.max_frame_size(max_frame_size); if let Some(max) = max_concurrent_reset_streams { - builder.http2_max_concurrent_reset_streams(max); + builder.max_concurrent_reset_streams(max); } if let Some(sz) = max_send_buf_size { - builder.http2_max_send_buf_size(sz); + builder.max_send_buf_size(sz); } let (tx, conn) = builder - .handshake(io) + .handshake(hyper_util::rt::TokioIo::new(io)) .instrument(trace_span!("handshake").or_current()) .await?; @@ -148,13 +144,13 @@ where impl tower::Service> for Connection where - B: HttpBody + Send + 'static, + B: Body + Send + 'static, B::Data: Send, B::Error: Into + Send + Sync, { - type Response = http::Response; + type Response = http::Response; type Error = hyper::Error; - type Future = conn::ResponseFuture; + type Future = Pin>>>; #[inline] fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { @@ -176,6 +172,6 @@ where *req.version_mut() = http::Version::HTTP_11; } - self.tx.send_request(req) + self.tx.send_request(req).boxed() } } diff --git a/linkerd/proxy/http/src/lib.rs b/linkerd/proxy/http/src/lib.rs index eb5a928526..fc24fe5ef2 100644 --- a/linkerd/proxy/http/src/lib.rs +++ b/linkerd/proxy/http/src/lib.rs @@ -5,25 +5,16 @@ use http::{header::AsHeaderName, uri::Authority}; use linkerd_error::Error; pub mod balance; -pub mod classify; pub mod client; pub mod client_handle; -pub mod detect; -mod executor; -mod glue; pub mod h1; pub mod h2; mod header_from_target; -pub mod insert; pub mod normalize_uri; pub mod orig_proto; -mod override_authority; -mod retain; mod server; pub mod strip_header; pub mod timeout; -pub mod upgrade; -pub mod version; pub use self::{ balance::NewBalance, @@ -32,23 +23,29 @@ pub use self::{ NewInsertClassifyResponse, }, client_handle::{ClientHandle, SetClientHandle}, - detect::DetectHttp, - executor::TracingExecutor, header_from_target::NewHeaderFromTarget, normalize_uri::{MarkAbsoluteForm, NewNormalizeUri}, - override_authority::{AuthorityOverride, NewOverrideAuthority}, - retain::Retain, server::{NewServeHttp, Params as ServerParams, ServeHttp}, strip_header::StripHeader, timeout::{NewTimeout, ResponseTimeout, ResponseTimeoutError}, - version::Version, }; pub use http::{ - header::{self, HeaderName, HeaderValue}, + header::{self, HeaderMap, HeaderName, HeaderValue}, uri, Method, Request, Response, StatusCode, }; -pub use hyper::body::HttpBody; +pub use http_body::Body; +pub use hyper_util::rt::tokio::TokioExecutor; pub use linkerd_http_box::{BoxBody, BoxRequest, BoxResponse, EraseResponse}; +pub use linkerd_http_classify as classify; +pub use linkerd_http_detect::{ + DetectMetrics, DetectMetricsFamilies, DetectParams, Detection, NewDetect, +}; +pub use linkerd_http_insert as insert; +pub use linkerd_http_override_authority::{AuthorityOverride, NewOverrideAuthority}; +pub use linkerd_http_retain::{self as retain, Retain}; +pub use linkerd_http_stream_timeouts::{self as stream_timeouts, EnforceTimeouts, StreamTimeouts}; +pub use linkerd_http_upgrade as upgrade; +pub use linkerd_http_variant::{Unsupported as UnsupportedVariant, Variant}; #[derive(Clone, Debug)] pub struct HeaderPair(pub HeaderName, pub HeaderValue); @@ -57,7 +54,7 @@ pub trait HasH2Reason { fn h2_reason(&self) -> Option<::h2::Reason>; } -impl<'a> HasH2Reason for &'a (dyn std::error::Error + 'static) { +impl HasH2Reason for &(dyn std::error::Error + 'static) { fn h2_reason(&self) -> Option<::h2::Reason> { if let Some(err) = self.downcast_ref::<::h2::Error>() { return err.h2_reason(); @@ -93,45 +90,3 @@ where let v = req.headers().get(header)?; v.to_str().ok()?.parse().ok() } - -fn set_authority(uri: &mut uri::Uri, auth: uri::Authority) { - let mut parts = uri::Parts::from(std::mem::take(uri)); - - parts.authority = Some(auth); - - // If this was an origin-form target (path only), - // then we can't *only* set the authority, as that's - // an illegal target (such as `example.com/docs`). - // - // But don't set a scheme if this was authority-form (CONNECT), - // since that would change its meaning (like `https://example.com`). - if parts.path_and_query.is_some() { - parts.scheme = Some(http::uri::Scheme::HTTP); - } - - let new = http::uri::Uri::from_parts(parts).expect("absolute uri"); - - *uri = new; -} - -fn strip_connection_headers(headers: &mut http::HeaderMap) { - if let Some(val) = headers.remove(header::CONNECTION) { - if let Ok(conn_header) = val.to_str() { - // A `Connection` header may have a comma-separated list of - // names of other headers that are meant for only this specific - // connection. - // - // Iterate these names and remove them as headers. - for name in conn_header.split(',') { - let name = name.trim(); - headers.remove(name); - } - } - } - - // Additionally, strip these "connection-level" headers always, since - // they are otherwise illegal if upgraded to HTTP2. - headers.remove(header::UPGRADE); - headers.remove("proxy-connection"); - headers.remove("keep-alive"); -} diff --git a/linkerd/proxy/http/src/normalize_uri.rs b/linkerd/proxy/http/src/normalize_uri.rs index 830b004d23..818adcd48d 100644 --- a/linkerd/proxy/http/src/normalize_uri.rs +++ b/linkerd/proxy/http/src/normalize_uri.rs @@ -123,7 +123,7 @@ where }; trace!(%authority, "Normalizing URI"); - crate::set_authority(req.uri_mut(), authority); + linkerd_http_override_authority::set_authority(req.uri_mut(), authority); } } diff --git a/linkerd/proxy/http/src/orig_proto.rs b/linkerd/proxy/http/src/orig_proto.rs index 858b7a8431..c9c31c7861 100644 --- a/linkerd/proxy/http/src/orig_proto.rs +++ b/linkerd/proxy/http/src/orig_proto.rs @@ -1,7 +1,7 @@ -use super::{h1, h2, upgrade}; +use super::{h1, h2, Body}; use futures::prelude::*; use http::header::{HeaderValue, TRANSFER_ENCODING}; -use hyper::body::HttpBody; +use http_body::Frame; use linkerd_error::{Error, Result}; use linkerd_http_box::BoxBody; use linkerd_stack::{layer, MakeConnection, Service}; @@ -27,8 +27,9 @@ pub struct DowngradedH2Error(h2::Reason); #[pin_project::pin_project] #[derive(Debug, Default)] -pub struct UpgradeResponseBody { - inner: hyper::Body, +pub struct UpgradeResponseBody { + #[pin] + inner: B, } /// Downgrades HTTP2 requests that were previousl upgraded to their original @@ -53,10 +54,10 @@ impl Upgrade { impl Service> for Upgrade where T: Clone + Send + Sync + 'static, - C: MakeConnection<(crate::Version, T)> + Clone + Send + Sync + 'static, + C: MakeConnection<(crate::Variant, T)> + Clone + Send + Sync + 'static, C::Connection: Unpin + Send, C::Future: Unpin + Send + 'static, - B: hyper::body::HttpBody + Send + 'static, + B: crate::Body + Send + Unpin + 'static, B::Data: Send, B::Error: Into + Send + Sync, { @@ -71,7 +72,11 @@ where fn call(&mut self, mut req: http::Request) -> Self::Future { debug_assert!(req.version() != http::Version::HTTP_2); - if req.extensions().get::().is_some() { + if req + .extensions() + .get::() + .is_some() + { debug!("Skipping orig-proto upgrade due to HTTP/1.1 upgrade"); return Box::pin(self.http1.request(req).map_ok(|rsp| rsp.map(BoxBody::new))); } @@ -194,8 +199,12 @@ fn test_downgrade_h2_error() { // === impl UpgradeResponseBody === -impl HttpBody for UpgradeResponseBody { - type Data = bytes::Bytes; +impl Body for UpgradeResponseBody +where + B: Body + Unpin, + B::Error: std::error::Error + Send + Sync + 'static, +{ + type Data = B::Data; type Error = Error; #[inline] @@ -203,27 +212,19 @@ impl HttpBody for UpgradeResponseBody { self.inner.is_end_stream() } - fn poll_data( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(self.project().inner) - .poll_data(cx) - .map_err(downgrade_h2_error) - } - - fn poll_trailers( + fn poll_frame( self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll, Self::Error>> { - Pin::new(self.project().inner) - .poll_trailers(cx) + ) -> Poll, Self::Error>>> { + self.project() + .inner + .poll_frame(cx) .map_err(downgrade_h2_error) } #[inline] fn size_hint(&self) -> http_body::SizeHint { - HttpBody::size_hint(&self.inner) + Body::size_hint(&self.inner) } } diff --git a/linkerd/proxy/http/src/server.rs b/linkerd/proxy/http/src/server.rs index 168ae61c7e..32c1e98c30 100644 --- a/linkerd/proxy/http/src/server.rs +++ b/linkerd/proxy/http/src/server.rs @@ -1,8 +1,7 @@ -use crate::{ - client_handle::SetClientHandle, h2, upgrade, BoxBody, BoxRequest, ClientHandle, - TracingExecutor, Version, -}; +use crate::{client_handle::SetClientHandle, h2, BoxBody, ClientHandle, Variant}; +use hyper_util::rt::tokio::TokioExecutor; use linkerd_error::Error; +use linkerd_http_box::BoxRequest; use linkerd_io::{self as io, PeerAddr}; use linkerd_stack::{layer, ExtractParam, NewService}; use std::{ @@ -19,7 +18,7 @@ mod tests; /// Configures HTTP server behavior. #[derive(Clone, Debug)] pub struct Params { - pub version: Version, + pub version: Variant, pub http2: h2::ServerParams, pub drain: drain::Watch, } @@ -31,11 +30,12 @@ pub struct NewServeHttp { params: X, } -/// Serves HTTP connectionswith an inner service. +/// Serves HTTP connections with an inner service. #[derive(Clone, Debug)] pub struct ServeHttp { - version: Version, - server: hyper::server::conn::Http, + version: Variant, + http1: hyper::server::conn::http1::Builder, + http2: hyper::server::conn::http2::Builder, inner: N, drain: drain::Watch, } @@ -76,44 +76,52 @@ where max_pending_accept_reset_streams, } = h2; - let mut srv = hyper::server::conn::Http::new().with_executor(TracingExecutor); + let mut http2 = hyper::server::conn::http2::Builder::new(TokioExecutor::new()); + http2.timer(hyper_util::rt::TokioTimer::new()); match flow_control { None => {} Some(h2::FlowControl::Adaptive) => { - srv.http2_adaptive_window(true); + http2.adaptive_window(true); } Some(h2::FlowControl::Fixed { initial_stream_window_size, initial_connection_window_size, }) => { - srv.http2_initial_stream_window_size(initial_stream_window_size) - .http2_initial_connection_window_size(initial_connection_window_size); + http2 + .initial_stream_window_size(initial_stream_window_size) + .initial_connection_window_size(initial_connection_window_size); } } // Configure HTTP/2 PING frames if let Some(h2::KeepAlive { timeout, interval }) = keep_alive { - srv.http2_keep_alive_timeout(timeout) - .http2_keep_alive_interval(interval); + http2 + .keep_alive_timeout(timeout) + .keep_alive_interval(interval); } - srv.http2_max_concurrent_streams(max_concurrent_streams) - .http2_max_frame_size(max_frame_size) - .http2_max_pending_accept_reset_streams(max_pending_accept_reset_streams); + http2 + .max_concurrent_streams(max_concurrent_streams) + .max_frame_size(max_frame_size) + .max_pending_accept_reset_streams(max_pending_accept_reset_streams); if let Some(sz) = max_header_list_size { - srv.http2_max_header_list_size(sz); + http2.max_header_list_size(sz); } if let Some(sz) = max_send_buf_size { - srv.http2_max_send_buf_size(sz); + http2.max_send_buf_size(sz); } + let mut http1 = hyper::server::conn::http1::Builder::new(); + http1.timer(hyper_util::rt::TokioTimer::new()); + debug!(?version, "Creating HTTP service"); let inner = self.inner.new_service(target); ServeHttp { inner, version, drain, - server: srv, + http1, + http2, } } } @@ -126,6 +134,7 @@ where N: NewService + Send + 'static, S: Service, Response = http::Response, Error = Error> + Unpin + + Clone + Send + 'static, S::Future: Send + 'static, @@ -141,7 +150,8 @@ where fn call(&mut self, io: I) -> Self::Future { let version = self.version; let drain = self.drain.clone(); - let mut server = self.server.clone(); + let http1 = self.http1.clone(); + let http2 = self.http2.clone(); let res = io.peer_addr().map(|pa| { let (handle, closed) = ClientHandle::new(pa); @@ -155,13 +165,15 @@ where let (svc, closed) = res?; debug!(?version, "Handling as HTTP"); match version { - Version::Http1 => { + Variant::Http1 => { // Enable support for HTTP upgrades (CONNECT and websockets). - let svc = upgrade::Service::new(BoxRequest::new(svc), drain.clone()); - let mut conn = server - .http1_only(true) - .serve_connection(io, svc) - .with_upgrades(); + let svc = linkerd_http_upgrade::upgrade::Service::new( + BoxRequest::new(svc), + drain.clone(), + ); + let svc = hyper_util::service::TowerToHyperService::new(svc); + let io = hyper_util::rt::TokioIo::new(io); + let mut conn = http1.serve_connection(io, svc).with_upgrades(); tokio::select! { res = &mut conn => { @@ -181,10 +193,11 @@ where } } - Version::H2 => { - let mut conn = server - .http2_only(true) - .serve_connection(io, BoxRequest::new(svc)); + Variant::H2 => { + let svc = + hyper_util::service::TowerToHyperService::new(BoxRequest::new(svc)); + let io = hyper_util::rt::TokioIo::new(io); + let mut conn = http2.serve_connection(io, svc); tokio::select! { res = &mut conn => { diff --git a/linkerd/proxy/http/src/server/tests.rs b/linkerd/proxy/http/src/server/tests.rs index 741144c025..a7b806144b 100644 --- a/linkerd/proxy/http/src/server/tests.rs +++ b/linkerd/proxy/http/src/server/tests.rs @@ -1,9 +1,10 @@ -use std::vec; - use super::*; use bytes::Bytes; -use http_body::Body; +use futures::FutureExt; +use http_body_util::BodyExt; +use linkerd_io as io; use linkerd_stack::CloneParam; +use std::vec; use tokio::time; use tower::ServiceExt; use tower_test::mock; @@ -26,9 +27,10 @@ async fn h2_connection_window_exhaustion() { h2::ServerParams::default(), // An HTTP/2 client with constrained connection and stream windows to // force window exhaustion. - hyper::client::conn::Builder::new() - .http2_initial_connection_window_size(CLIENT_CONN_WINDOW) - .http2_initial_stream_window_size(CLIENT_STREAM_WINDOW), + hyper::client::conn::http2::Builder::new(TokioExecutor::new()) + .initial_connection_window_size(CLIENT_CONN_WINDOW) + .initial_stream_window_size(CLIENT_STREAM_WINDOW) + .timer(hyper_util::rt::TokioTimer::new()), ) .await; @@ -74,7 +76,7 @@ async fn h2_connection_window_exhaustion() { .expect("timed out"); tokio::select! { _ = time::sleep(time::Duration::from_secs(2)) => {} - _ = rx.data() => panic!("unexpected data"), + _ = rx.frame() => panic!("unexpected data"), } tracing::info!("Dropping one of the retained response bodies frees capacity so that the data can be received"); @@ -99,7 +101,9 @@ async fn h2_stream_window_exhaustion() { // A basic HTTP/2 server configuration with no overrides. h2::ServerParams::default(), // An HTTP/2 client with stream windows to force window exhaustion. - hyper::client::conn::Builder::new().http2_initial_stream_window_size(CLIENT_STREAM_WINDOW), + hyper::client::conn::http2::Builder::new(TokioExecutor::new()) + .initial_stream_window_size(CLIENT_STREAM_WINDOW) + .timer(hyper_util::rt::TokioTimer::new()), ) .await; @@ -107,34 +111,52 @@ async fn h2_stream_window_exhaustion() { let chunk = (0..CLIENT_STREAM_WINDOW).map(|_| b'a').collect::(); tracing::info!(sz = chunk.len(), "Sending chunk"); - tx.try_send_data(chunk.clone()).expect("send data"); + tx.send_data(chunk.clone()).await.expect("can send data"); tokio::task::yield_now().await; tracing::info!(sz = chunk.len(), "Buffering chunk in channel"); - tx.try_send_data(chunk.clone()).expect("send data"); + tx.send_data(chunk.clone()).await.expect("can send data"); tokio::task::yield_now().await; tracing::info!(sz = chunk.len(), "Confirming stream window exhaustion"); + /* + * XXX(kate): this can be reinstate when we have a `poll_ready(cx)` method on the new sender. assert!( timeout(futures::future::poll_fn(|cx| tx.poll_ready(cx))) .await .is_err(), "stream window should be exhausted" ); + */ tracing::info!("Once the pending data is read, the stream window should be replenished"); - let data = body.data().await.expect("data").expect("data"); + let data = body + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields data"); assert_eq!(data, chunk); - let data = body.data().await.expect("data").expect("data"); + let data = body + .frame() + .await + .expect("yields a result") + .expect("yields a frame") + .into_data() + .expect("yields data"); assert_eq!(data, chunk); - timeout(body.data()).await.expect_err("no more chunks"); + timeout(body.frame()).await.expect_err("no more chunks"); tracing::info!(sz = chunk.len(), "Confirming stream window availability"); + /* + * XXX(kate): this can be reinstated when we have a `poll_ready(cx)` method on the new sender. timeout(futures::future::poll_fn(|cx| tx.poll_ready(cx))) .await .expect("timed out") .expect("ready"); + */ } // === Utilities === @@ -142,7 +164,7 @@ async fn h2_stream_window_exhaustion() { const LOG_LEVEL: &str = "h2::proto=trace,hyper=trace,linkerd=trace,info"; struct TestServer { - client: hyper::client::conn::SendRequest, + client: hyper::client::conn::http2::SendRequest, server: Handle, } @@ -181,19 +203,30 @@ async fn timeout(inner: F) -> Result impl TestServer { #[tracing::instrument(skip_all)] - async fn connect(params: Params, client: &mut hyper::client::conn::Builder) -> Self { + async fn connect_h2( + h2: h2::ServerParams, + client: &mut hyper::client::conn::http2::Builder, + ) -> Self { + let params = Params { + drain: drain(), + version: Variant::H2, + http2: h2, + }; + + let (sio, cio) = io::duplex(20 * 1024 * 1024); // 20 MB + // Build the HTTP server with a mocked inner service so that we can handle // requests. - let (mock, server) = mock::pair(); + let (mock, server) = mock::pair::, http::Response>(); let svc = NewServeHttp::new(CloneParam::from(params), NewMock(mock)).new_service(()); - - let (sio, cio) = io::duplex(20 * 1024 * 1024); // 20 MB - tokio::spawn(svc.oneshot(sio).instrument(info_span!("server"))); + fn bound, I>(_: &S) {} + bound::, linkerd_io::DuplexStream>(&svc); + let fut = svc.oneshot(sio).instrument(info_span!("server")); + tokio::spawn(fut); // Build a real HTTP/2 client using the mocked socket. let (client, task) = client - .executor(crate::executor::TracingExecutor) - .handshake::<_, BoxBody>(cio) + .handshake::<_, BoxBody>(hyper_util::rt::tokio::TokioIo::new(cio)) .await .expect("client connect"); tokio::spawn(task.instrument(info_span!("client"))); @@ -201,41 +234,33 @@ impl TestServer { Self { client, server } } - async fn connect_h2(h2: h2::ServerParams, client: &mut hyper::client::conn::Builder) -> Self { - Self::connect( - // A basic HTTP/2 server configuration with no overrides. - Params { - drain: drain(), - version: Version::H2, - http2: h2, - }, - // An HTTP/2 client with constrained connection and stream windows to accomodate - client.http2_only(true), - ) - .await - } - /// Issues a request through the client to the mocked server and processes the /// response. The mocked response body sender and the readable response body are /// returned. #[tracing::instrument(skip(self))] - async fn get(&mut self) -> (hyper::body::Sender, hyper::Body) { + async fn get( + &mut self, + ) -> ( + http_body_util::channel::Sender, + hyper::body::Incoming, + ) { self.server.allow(1); let mut call0 = self .client - .send_request(http::Request::new(BoxBody::default())); + .send_request(http::Request::new(BoxBody::default())) + .boxed(); let (_req, next) = tokio::select! { _ = (&mut call0) => unreachable!("client cannot receive a response"), next = self.server.next_request() => next.expect("server not dropped"), }; - let (tx, rx) = hyper::Body::channel(); + let (tx, rx) = http_body_util::channel::Channel::new(512); next.send_response(http::Response::new(BoxBody::new(rx))); let rsp = call0.await.expect("response"); (tx, rsp.into_body()) } #[tracing::instrument(skip(self))] - async fn respond(&mut self, body: Bytes) -> hyper::Body { + async fn respond(&mut self, body: Bytes) -> hyper::body::Incoming { let (mut tx, rx) = self.get().await; tx.send_data(body.clone()).await.expect("send data"); rx diff --git a/linkerd/proxy/http/src/timeout.rs b/linkerd/proxy/http/src/timeout.rs index bc958029e4..25b10a4af6 100644 --- a/linkerd/proxy/http/src/timeout.rs +++ b/linkerd/proxy/http/src/timeout.rs @@ -3,7 +3,7 @@ use linkerd_stack::{layer, ExtractParam, MapErr, NewService, Timeout, TimeoutErr use std::time::Duration; use thiserror::Error; -/// An HTTP-specific optional timeout layer. +/// DEPRECATED: An HTTP-specific optional timeout layer. /// /// The stack target must implement `HasTimeout`, and if a duration is /// specified for the target, a timeout is applied waiting for HTTP responses. diff --git a/linkerd/proxy/identity-client/Cargo.toml b/linkerd/proxy/identity-client/Cargo.toml index 3d199200d9..e954bf1644 100644 --- a/linkerd/proxy/identity-client/Cargo.toml +++ b/linkerd/proxy/identity-client/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "linkerd-proxy-identity-client" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } -linkerd2-proxy-api = { version = "0.13", features = ["identity"] } +linkerd2-proxy-api = { workspace = true, features = ["identity"] } linkerd-dns-name = { path = "../../dns/name" } linkerd-error = { path = "../../error" } linkerd-identity = { path = "../../identity" } @@ -16,8 +16,8 @@ linkerd-metrics = { path = "../../metrics" } linkerd-stack = { path = "../../stack" } parking_lot = "0.12" pin-project = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["time", "sync"] } -tonic = { version = "0.10", default-features = false } +tonic = { workspace = true, default-features = false } tracing = "0.1" -http-body = "0.4" +http-body = { workspace = true } diff --git a/linkerd/proxy/identity-client/src/certify.rs b/linkerd/proxy/identity-client/src/certify.rs index ce2839b0df..61d313fe5e 100644 --- a/linkerd/proxy/identity-client/src/certify.rs +++ b/linkerd/proxy/identity-client/src/certify.rs @@ -93,7 +93,7 @@ impl Certify { C: Credentials, N: NewService<(), Service = S>, S: GrpcService, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { debug!("Identity daemon running"); @@ -155,7 +155,7 @@ async fn certify( where C: Credentials, S: GrpcService, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into + Send, { let req = tonic::Request::new(api::CertifyRequest { diff --git a/linkerd/proxy/resolve/Cargo.toml b/linkerd/proxy/resolve/Cargo.toml index b997674cd8..fe6f94f1ed 100644 --- a/linkerd/proxy/resolve/Cargo.toml +++ b/linkerd/proxy/resolve/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-proxy-resolve" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Utilities for working with `Resolve` implementations """ @@ -13,7 +13,7 @@ Utilities for working with `Resolve` implementations futures = { version = "0.3", default-features = false } linkerd-error = { path = "../../error" } linkerd-proxy-core = { path = "../core" } -thiserror = "1" -tower = "0.4" +thiserror = "2" +tower = { workspace = true } tracing = "0.1" pin-project = "1" diff --git a/linkerd/proxy/resolve/src/recover.rs b/linkerd/proxy/resolve/src/recover.rs index 885c845f67..e5d130afa9 100644 --- a/linkerd/proxy/resolve/src/recover.rs +++ b/linkerd/proxy/resolve/src/recover.rs @@ -147,14 +147,12 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.project(); loop { - // XXX(eliza): note that this match was originally an `if let`, - // but that doesn't work with `#[project]` for some kinda reason - #[allow(clippy::single_match)] - match this.inner.state { - State::Connected { - ref mut resolution, - ref mut is_initial, - } => { + if let State::Connected { + ref mut resolution, + ref mut is_initial, + } = this.inner.state + { + { tracing::trace!("polling"); match ready!(resolution.try_poll_next_unpin(cx)) { Some(Ok(Update::Remove(_))) if *is_initial => { @@ -189,7 +187,6 @@ where } } } - _ => {} } ready!(this.inner.poll_connected(cx))?; diff --git a/linkerd/proxy/server-policy/Cargo.toml b/linkerd/proxy/server-policy/Cargo.toml index 314286d051..e94ec7b701 100644 --- a/linkerd/proxy/server-policy/Cargo.toml +++ b/linkerd/proxy/server-policy/Cargo.toml @@ -1,27 +1,31 @@ [package] name = "linkerd-proxy-server-policy" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] proto = ["linkerd-http-route/proto", "linkerd2-proxy-api", "prost-types"] +test-util = [] [dependencies] +governor = { version = "0.10", default-features = false, features = ["std"] } ipnet = "2" -http = "0.2" -prost-types = { version = "0.12", optional = true } -thiserror = "1" +http = { workspace = true } +prost-types = { workspace = true, optional = true } +thiserror = "2" linkerd-http-route = { path = "../../http/route" } +linkerd-identity = { path = "../../identity" } [dependencies.linkerd2-proxy-api] -version = "0.13" +workspace = true features = ["inbound"] optional = true [dev-dependencies] maplit = "1" quickcheck = { version = "1", default-features = false } +tokio = { version = "1", features = ["full", "macros"] } diff --git a/linkerd/proxy/server-policy/src/lib.rs b/linkerd/proxy/server-policy/src/lib.rs index bee142475b..7a5055948f 100644 --- a/linkerd/proxy/server-policy/src/lib.rs +++ b/linkerd/proxy/server-policy/src/lib.rs @@ -6,18 +6,21 @@ use std::{hash::Hash, sync::Arc, time}; pub mod authz; pub mod grpc; pub mod http; +pub mod local_rate_limit; pub mod meta; pub use self::{ authz::{Authentication, Authorization}, + local_rate_limit::{LocalRateLimit, RateLimitError}, meta::Meta, }; pub use linkerd_http_route as route; -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug)] pub struct ServerPolicy { pub protocol: Protocol, pub meta: Arc, + pub local_rate_limit: Arc, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -55,7 +58,7 @@ impl ServerPolicy { rules: vec![http::Rule { matches: vec![http::r#match::MatchRequest::default()], policy: http::Policy { - meta, + meta: meta.clone(), authorizations: Arc::new([]), filters: vec![http::Filter::InternalError( "invalid server configuration", @@ -65,6 +68,7 @@ impl ServerPolicy { }]), tcp_authorizations: Arc::new([]), }, + local_rate_limit: Arc::new(LocalRateLimit::default()), } } } @@ -131,6 +135,27 @@ pub mod proto { server_ips: _, } = proto; + let local_rate_limit = match protocol + .clone() + .and_then(|api::ProxyProtocol { kind }| kind) + .ok_or(InvalidServer::MissingProxyProtocol)? + { + api::proxy_protocol::Kind::Detect(api::proxy_protocol::Detect { + timeout: _, + http_routes: _, + http_local_rate_limit, + }) => http_local_rate_limit.unwrap_or_default().try_into(), + api::proxy_protocol::Kind::Http1(api::proxy_protocol::Http1 { + routes: _, + local_rate_limit, + }) + | api::proxy_protocol::Kind::Http2(api::proxy_protocol::Http2 { + routes: _, + local_rate_limit, + }) => local_rate_limit.unwrap_or_default().try_into(), + _ => Ok(Default::default()), + }?; + let authorizations = { // Always permit traffic from localhost. let localhost = Authorization { @@ -154,6 +179,7 @@ pub mod proto { api::proxy_protocol::Kind::Detect(api::proxy_protocol::Detect { http_routes, timeout, + http_local_rate_limit: _, }) => Protocol::Detect { http: mk_routes!(http, http_routes, authorizations.clone())?, timeout: timeout @@ -162,13 +188,15 @@ pub mod proto { tcp_authorizations: authorizations, }, - api::proxy_protocol::Kind::Http1(api::proxy_protocol::Http1 { routes }) => { - Protocol::Http1(mk_routes!(http, routes, authorizations)?) - } + api::proxy_protocol::Kind::Http1(api::proxy_protocol::Http1 { + routes, + local_rate_limit: _, + }) => Protocol::Http1(mk_routes!(http, routes, authorizations)?), - api::proxy_protocol::Kind::Http2(api::proxy_protocol::Http2 { routes }) => { - Protocol::Http2(mk_routes!(http, routes, authorizations)?) - } + api::proxy_protocol::Kind::Http2(api::proxy_protocol::Http2 { + routes, + local_rate_limit: _, + }) => Protocol::Http2(mk_routes!(http, routes, authorizations)?), api::proxy_protocol::Kind::Grpc(api::proxy_protocol::Grpc { routes }) => { Protocol::Grpc(mk_routes!(grpc, routes, authorizations)?) @@ -182,7 +210,11 @@ pub mod proto { // avoid label inference. let meta = Meta::try_new_with_default(labels, "policy.linkerd.io", "server")?; - Ok(ServerPolicy { protocol, meta }) + Ok(ServerPolicy { + protocol, + meta, + local_rate_limit: Arc::new(local_rate_limit), + }) } } } diff --git a/linkerd/proxy/server-policy/src/local_rate_limit.rs b/linkerd/proxy/server-policy/src/local_rate_limit.rs new file mode 100644 index 0000000000..3356582a7d --- /dev/null +++ b/linkerd/proxy/server-policy/src/local_rate_limit.rs @@ -0,0 +1,190 @@ +use crate::Meta; +#[cfg(test)] +use governor::clock::FakeRelativeClock; +use governor::{ + clock::{Clock, DefaultClock}, + middleware::NoOpMiddleware, + state::{keyed::HashMapStateStore, InMemoryState, RateLimiter, StateStore}, +}; +use linkerd_identity::Id; +use std::{collections::HashMap, num::NonZeroU32, sync::Arc}; + +#[cfg(test)] +mod tests; + +type Direct = InMemoryState; +type Keyed = HashMapStateStore>; + +#[derive(Debug, Default)] +pub struct LocalRateLimit { + meta: Option>, + total: Option>, + per_identity: Option>, + overrides: HashMap>>, +} + +#[derive(Debug)] +struct RateLimit +where + S: StateStore, + C: Clock, +{ + rps: NonZeroU32, + limiter: RateLimiter>, +} + +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +pub enum RateLimitError { + #[error("total rate limit exceeded: {0}rps")] + Total(NonZeroU32), + #[error("per-identity rate limit exceeded: {0}rps")] + PerIdentity(NonZeroU32), + #[error("override rate limit exceeded: {0}rps")] + Override(NonZeroU32), +} + +// === impl LocalRateLimit === + +#[cfg(any(feature = "proto", feature = "test-util"))] +impl RateLimit { + fn direct(rps: NonZeroU32) -> Self { + let limiter = RateLimiter::direct(governor::Quota::per_second(rps)); + Self { rps, limiter } + } +} + +#[cfg(any(feature = "proto", feature = "test-util"))] +impl RateLimit { + fn keyed(rps: NonZeroU32) -> Self { + let limiter = RateLimiter::hashmap(governor::Quota::per_second(rps)); + Self { rps, limiter } + } +} + +#[cfg(feature = "test-util")] +impl LocalRateLimit { + pub fn new_no_overrides_for_test( + total: Option, + per_identity: Option, + ) -> LocalRateLimit { + LocalRateLimit { + meta: None, + total: total.and_then(NonZeroU32::new).map(RateLimit::direct), + per_identity: per_identity.and_then(NonZeroU32::new).map(RateLimit::keyed), + overrides: HashMap::new(), + } + } +} + +impl LocalRateLimit { + pub fn check(&self, id: Option<&Id>) -> Result<(), RateLimitError> { + if let Some(lim) = &self.total { + if lim.limiter.check().is_err() { + return Err(RateLimitError::Total(lim.rps)); + } + } + + if let Some(id) = id { + if let Some(lim) = self.overrides.get(id) { + if lim.limiter.check().is_err() { + return Err(RateLimitError::Override(lim.rps)); + } + return Ok(()); + } + } + + if let Some(lim) = &self.per_identity { + // Note that clients with no identity share the same rate limit (Id = None) + if lim.limiter.check_key(&id.cloned()).is_err() { + return Err(RateLimitError::PerIdentity(lim.rps)); + } + } + + Ok(()) + } + + pub fn meta(&self) -> Option> { + self.meta.clone() + } +} + +// === impl RateLimit === + +#[cfg(test)] +impl RateLimit { + fn direct_for_test(rps: u32) -> Self { + let rps = NonZeroU32::new(rps).expect("non-zero RPS"); + let quota = governor::Quota::per_second(rps); + let limiter = RateLimiter::direct_with_clock(quota, FakeRelativeClock::default()); + + Self { rps, limiter } + } +} + +#[cfg(test)] +impl RateLimit { + fn keyed_for_test(rps: u32) -> Self { + let rps = NonZeroU32::new(rps).expect("non-zero RPS"); + let quota = governor::Quota::per_second(rps); + let limiter = RateLimiter::hashmap_with_clock(quota, FakeRelativeClock::default()); + + Self { rps, limiter } + } +} + +#[cfg(feature = "proto")] +pub mod proto { + use super::*; + use crate::meta::proto::InvalidMeta; + use linkerd2_proxy_api::inbound as api; + + impl TryFrom for LocalRateLimit { + type Error = InvalidMeta; + + fn try_from(proto: api::HttpLocalRateLimit) -> Result { + let meta = proto + .metadata + .map(Meta::try_from) + .transpose()? + .map(Arc::new); + let total = proto + .total + .and_then(|l| NonZeroU32::new(l.requests_per_second)) + .map(RateLimit::direct); + let per_identity = proto + .identity + .and_then(|l| NonZeroU32::new(l.requests_per_second)) + .map(RateLimit::keyed); + let overrides = proto + .overrides + .into_iter() + .flat_map(|ovr| { + let Some(limiter) = ovr + .limit + .and_then(|l| NonZeroU32::new(l.requests_per_second)) + .map(RateLimit::direct) + else { + return vec![]; + }; + let limit = Arc::new(limiter); + ovr.clients + .into_iter() + .flat_map(|cl| { + cl.identities + .into_iter() + .filter_map(|id| id.name.parse::().ok()) + }) + .map(move |id| (id, limit.clone())) + .collect() + }) + .collect(); + + Ok(Self { + meta, + total, + per_identity, + overrides, + }) + } + } +} diff --git a/linkerd/proxy/server-policy/src/local_rate_limit/tests.rs b/linkerd/proxy/server-policy/src/local_rate_limit/tests.rs new file mode 100644 index 0000000000..614083068a --- /dev/null +++ b/linkerd/proxy/server-policy/src/local_rate_limit/tests.rs @@ -0,0 +1,166 @@ +use super::*; +use maplit::hashmap; +use std::time::Duration; + +#[cfg(feature = "proto")] +#[tokio::test(flavor = "current_thread")] +async fn from_proto() { + use linkerd2_proxy_api::{ + inbound::{self, http_local_rate_limit}, + meta, + }; + + let client_1: Id = "client-1".parse().unwrap(); + let client_2: Id = "client-2".parse().unwrap(); + let client_3: Id = "client-3".parse().unwrap(); + let client_4: Id = "client-4".parse().unwrap(); + let rl_proto = inbound::HttpLocalRateLimit { + metadata: Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default("ratelimit-1".into())), + }), + total: Some(http_local_rate_limit::Limit { + requests_per_second: 100, + }), + identity: Some(http_local_rate_limit::Limit { + requests_per_second: 20, + }), + overrides: vec![ + http_local_rate_limit::Override { + limit: Some(http_local_rate_limit::Limit { + requests_per_second: 50, + }), + clients: Some(http_local_rate_limit::r#override::ClientIdentities { + identities: vec![ + inbound::Identity { + name: client_1.to_string(), + }, + inbound::Identity { + name: client_2.to_string(), + }, + ], + }), + }, + http_local_rate_limit::Override { + limit: Some(http_local_rate_limit::Limit { + requests_per_second: 75, + }), + clients: Some(http_local_rate_limit::r#override::ClientIdentities { + identities: vec![ + inbound::Identity { + name: client_3.to_string(), + }, + inbound::Identity { + name: client_4.to_string(), + }, + ], + }), + }, + ], + }; + + let rl = TryInto::::try_into(rl_proto).unwrap(); + assert_eq!(rl.total.as_ref().unwrap().rps.get(), 100); + assert_eq!(rl.per_identity.as_ref().unwrap().rps.get(), 20); + + assert_eq!(rl.overrides.get(&client_1).unwrap().rps.get(), 50); + assert_eq!(rl.overrides.get(&client_2).unwrap().rps.get(), 50); + assert_eq!(rl.overrides.get(&client_3).unwrap().rps.get(), 75); + assert_eq!(rl.overrides.get(&client_4).unwrap().rps.get(), 75); +} + +#[tokio::test(flavor = "current_thread")] +async fn check_rate_limits() { + let total = RateLimit::direct_for_test(35); + let per_identity = RateLimit::keyed_for_test(5); + let overrides = hashmap! { + "client-3".parse().unwrap() => Arc::new(RateLimit::direct_for_test(10)), + "client-4".parse().unwrap() => Arc::new(RateLimit::direct_for_test(15)), + }; + let rl = LocalRateLimit { + meta: None, + total: Some(total), + per_identity: Some(per_identity), + overrides, + }; + + // These clients will be rate-limited by the per_identity rate-limiter + let client_1: Id = "client-1".parse().unwrap(); + let client_2: Id = "client-2".parse().unwrap(); + + // These clients will be rate-limited by the overrides rate-limiters + let client_3: Id = "client-3".parse().unwrap(); + let client_4: Id = "client-4".parse().unwrap(); + + let total_clock = rl.total.as_ref().unwrap().limiter.clock(); + let per_identity_clock = rl.per_identity.as_ref().unwrap().limiter.clock(); + let client_3_clock = rl.overrides.get(&client_3).unwrap().limiter.clock(); + let client_4_clock = rl.overrides.get(&client_4).unwrap().limiter.clock(); + + // This loop checks that: + // - client_1 gets rate-limited first via the per_identity rate-limiter + // - then client_3 and client_4 via the overrides rate-limiters + // - then client_2 via the total rate-limiter + // - then we advance time to replenish the rate-limiters buckets and repeat the checks + for _ in 1..=5 { + // Requests per-client: 5 + // Total requests: 15 + // All clients should NOT be rate-limited + for _ in 1..=5 { + assert!(rl.check(Some(&client_1)).is_ok()); + assert!(rl.check(Some(&client_3)).is_ok()); + assert!(rl.check(Some(&client_4)).is_ok()); + } + // Reached per_identity limit for client_1 + // Total requests: 16 + assert_eq!( + rl.check(Some(&client_1)), + Err(RateLimitError::PerIdentity(NonZeroU32::new(5).unwrap())) + ); + + // Requests per-client: 10 + // Total requests thus far: 26 + // client_3 and client_4 should NOT be rate-limited + for _ in 1..=5 { + assert!(rl.check(Some(&client_3)).is_ok()); + assert!(rl.check(Some(&client_4)).is_ok()); + } + // Total requests thus far: 27 + // Reached override limit for client_3 + assert_eq!( + rl.check(Some(&client_3)), + Err(RateLimitError::Override(NonZeroU32::new(10).unwrap())) + ); + + // Requests per-client: 5 + // Total requests thus far: 32 + // client_4 should NOT be rate-limited + for _ in 1..=5 { + assert!(rl.check(Some(&client_4)).is_ok()); + } + // Total requests: 33 + // Reached override limit for client_4 + assert_eq!( + rl.check(Some(&client_4)), + Err(RateLimitError::Override(NonZeroU32::new(15).unwrap())) + ); + + // Total requests: 35 + // Only 2 requests for client_2 allowed as we're reaching the total rate-limit + for _ in 1..=2 { + assert!(rl.check(Some(&client_2)).is_ok()); + } + + // Total requests: 36 + // Reached total limit for all clients + assert_eq!( + rl.check(Some(&client_2)), + Err(RateLimitError::Total(NonZeroU32::new(35).unwrap())) + ); + + // Advance time for a seconds to replenish the rate-limiters buckets + total_clock.advance(Duration::from_secs(1)); + per_identity_clock.advance(Duration::from_secs(1)); + client_3_clock.advance(Duration::from_secs(1)); + client_4_clock.advance(Duration::from_secs(1)); + } +} diff --git a/linkerd/proxy/server-policy/src/meta.rs b/linkerd/proxy/server-policy/src/meta.rs index 6823adc13f..8817262784 100644 --- a/linkerd/proxy/server-policy/src/meta.rs +++ b/linkerd/proxy/server-policy/src/meta.rs @@ -39,6 +39,10 @@ impl Meta { Self::Resource { group, .. } => group, } } + + pub fn is_audit(&self) -> bool { + matches!(self, Self::Default { name } if name == "audit") + } } impl std::cmp::PartialEq for Meta { diff --git a/linkerd/proxy/spire-client/Cargo.toml b/linkerd/proxy/spire-client/Cargo.toml index 300d7d6dc0..4e16d03edb 100644 --- a/linkerd/proxy/spire-client/Cargo.toml +++ b/linkerd/proxy/spire-client/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-proxy-spire-client" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } @@ -16,13 +16,13 @@ linkerd-tonic-watch = { path = "../../tonic-watch" } linkerd-exp-backoff = { path = "../../exp-backoff" } linkerd-stack = { path = "../../stack" } tokio = { version = "1", features = ["time", "sync"] } -tonic = "0.10" -tower = "0.4" +tonic = { workspace = true } +tower = { workspace = true } tracing = "0.1" -x509-parser = "0.16.0" +x509-parser = "0.17.0" asn1 = { version = "0.6", package = "simple_asn1" } -thiserror = "1" +thiserror = "2" [dev-dependencies] -rcgen = "0.12.0" +rcgen = "0.13.2" tokio-test = "0.4" diff --git a/linkerd/proxy/spire-client/src/api.rs b/linkerd/proxy/spire-client/src/api.rs index 3c3ec16d4f..143c13705f 100644 --- a/linkerd/proxy/spire-client/src/api.rs +++ b/linkerd/proxy/spire-client/src/api.rs @@ -116,8 +116,8 @@ impl Api where S: tonic::client::GrpcService + Clone, S::Error: Into, - S::ResponseBody: Default + http::HttpBody + Send + 'static, - ::Error: Into + Send, + S::ResponseBody: Default + http::Body + Send + 'static, + ::Error: Into + Send, { pub fn watch(client: S, backoff: ExponentialBackoff) -> Watch { let client = Client::new(client); @@ -129,8 +129,8 @@ impl Service<()> for Api where S: tonic::client::GrpcService + Clone, S: Clone + Send + Sync + 'static, - S::ResponseBody: Default + http::HttpBody + Send + 'static, - ::Error: Into + Send, + S::ResponseBody: Default + http::Body + Send + 'static, + ::Error: Into + Send, S::Future: Send + 'static, { type Response = @@ -220,18 +220,19 @@ where #[cfg(test)] mod tests { use crate::api::Svid; - use rcgen::{Certificate, CertificateParams, SanType}; + use rcgen::{CertificateParams, KeyPair, SanType}; use spiffe_proto::client as api; fn gen_svid_pb(id: String, subject_alt_names: Vec) -> api::X509svid { let mut params = CertificateParams::default(); params.subject_alt_names = subject_alt_names; - let cert = Certificate::from_params(params).expect("should generate cert"); + let key = KeyPair::generate().expect("should generate key"); + let cert = params.self_signed(&key).expect("should generate cert"); api::X509svid { spiffe_id: id, - x509_svid: cert.serialize_der().expect("should serialize"), - x509_svid_key: cert.serialize_private_key_der(), + x509_svid: cert.der().to_vec(), + x509_svid_key: key.serialize_der(), bundle: Vec::default(), } } @@ -239,21 +240,21 @@ mod tests { #[test] fn can_parse_valid_proto() { let id = "spiffe://some-domain/some-workload"; - let svid_pb = gen_svid_pb(id.into(), vec![SanType::URI(id.into())]); + let svid_pb = gen_svid_pb(id.into(), vec![SanType::URI(id.parse().unwrap())]); assert!(Svid::try_from(svid_pb).is_ok()); } #[test] fn cannot_parse_non_spiffe_id() { let id = "some-domain.some-workload"; - let svid_pb = gen_svid_pb(id.into(), vec![SanType::DnsName(id.into())]); + let svid_pb = gen_svid_pb(id.into(), vec![SanType::DnsName(id.parse().unwrap())]); assert!(Svid::try_from(svid_pb).is_err()); } #[test] fn cannot_parse_empty_cert() { let id = "spiffe://some-domain/some-workload"; - let mut svid_pb = gen_svid_pb(id.into(), vec![SanType::URI(id.into())]); + let mut svid_pb = gen_svid_pb(id.into(), vec![SanType::URI(id.parse().unwrap())]); svid_pb.x509_svid = Vec::default(); assert!(Svid::try_from(svid_pb).is_err()); } @@ -261,7 +262,7 @@ mod tests { #[test] fn cannot_parse_empty_key() { let id = "spiffe://some-domain/some-workload"; - let mut svid_pb = gen_svid_pb(id.into(), vec![SanType::URI(id.into())]); + let mut svid_pb = gen_svid_pb(id.into(), vec![SanType::URI(id.parse().unwrap())]); svid_pb.x509_svid_key = Vec::default(); assert!(Svid::try_from(svid_pb).is_err()); } diff --git a/linkerd/proxy/spire-client/src/lib.rs b/linkerd/proxy/spire-client/src/lib.rs index 992e0e1a21..bcc0dab46b 100644 --- a/linkerd/proxy/spire-client/src/lib.rs +++ b/linkerd/proxy/spire-client/src/lib.rs @@ -62,10 +62,11 @@ mod tests { use crate::api::Svid; use linkerd_error::Result; use linkerd_identity::DerX509; - use rcgen::{Certificate, CertificateParams, SanType, SerialNumber}; + use rcgen::{CertificateParams, KeyPair, SanType, SerialNumber}; use std::time::SystemTime; fn gen_svid(id: Id, subject_alt_names: Vec, serial: SerialNumber) -> Svid { + let key = KeyPair::generate().expect("should generate key"); let mut params = CertificateParams::default(); params.subject_alt_names = subject_alt_names; params.serial_number = Some(serial); @@ -73,10 +74,11 @@ mod tests { Svid::new( id, DerX509( - Certificate::from_params(params) + params + .self_signed(&key) .expect("should generate cert") - .serialize_der() - .expect("should serialize"), + .der() + .to_vec(), ), Vec::default(), Vec::default(), @@ -151,7 +153,7 @@ mod tests { let serial_1 = SerialNumber::from_slice("some-serial-1".as_bytes()); let update_1 = SvidUpdate::new(vec![gen_svid( spiffe_id.clone(), - vec![SanType::URI(spiffe_san.into())], + vec![SanType::URI(spiffe_san.parse().unwrap())], serial_1.clone(), )]); @@ -164,7 +166,7 @@ mod tests { let serial_2 = SerialNumber::from_slice("some-serial-2".as_bytes()); let update_2 = SvidUpdate::new(vec![gen_svid( spiffe_id.clone(), - vec![SanType::URI(spiffe_san.into())], + vec![SanType::URI(spiffe_san.parse().unwrap())], serial_2.clone(), )]); @@ -186,7 +188,7 @@ mod tests { let serial_1 = SerialNumber::from_slice("some-serial-1".as_bytes()); let update_1 = SvidUpdate::new(vec![gen_svid( spiffe_id.clone(), - vec![SanType::URI(spiffe_san.into())], + vec![SanType::URI(spiffe_san.parse().unwrap())], serial_1.clone(), )]); @@ -228,7 +230,7 @@ mod tests { let serial_1 = SerialNumber::from_slice("some-serial-1".as_bytes()); let update_1 = SvidUpdate::new(vec![gen_svid( spiffe_id.clone(), - vec![SanType::URI(spiffe_san.into())], + vec![SanType::URI(spiffe_san.parse().unwrap())], serial_1.clone(), )]); @@ -242,7 +244,7 @@ mod tests { let mut update_sent = svid_tx.subscribe(); let update_2 = SvidUpdate::new(vec![gen_svid( spiffe_id_wrong, - vec![SanType::URI(spiffe_san_wrong.into())], + vec![SanType::URI(spiffe_san_wrong.parse().unwrap())], serial_2.clone(), )]); diff --git a/linkerd/proxy/tap/Cargo.toml b/linkerd/proxy/tap/Cargo.toml index ac437fb270..6c45368b89 100644 --- a/linkerd/proxy/tap/Cargo.toml +++ b/linkerd/proxy/tap/Cargo.toml @@ -1,17 +1,20 @@ [package] name = "linkerd-proxy-tap" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -http = "0.2" -hyper = { version = "0.14", features = ["http1", "http2"] } +bytes = { workspace = true } +http = { workspace = true } +http-body = { workspace = true } +hyper = { workspace = true, features = ["http1", "http2"] } +hyper-util = { workspace = true, features = ["service", "tokio", "tracing"] } futures = { version = "0.3", default-features = false } -ipnet = "2.7" -linkerd2-proxy-api = { version = "0.13", features = ["tap"] } +ipnet = "2.11" +linkerd2-proxy-api = { workspace = true, features = ["tap"] } linkerd-conditional = { path = "../../conditional" } linkerd-error = { path = "../../error" } linkerd-meshtls = { path = "../../meshtls" } @@ -20,15 +23,15 @@ linkerd-proxy-http = { path = "../http" } linkerd-stack = { path = "../../stack" } linkerd-tls = { path = "../../tls" } parking_lot = "0.12" -prost-types = "0.12" -rand = { version = "0.8" } -thiserror = "1" +prost-types = { workspace = true } +rand = { version = "0.9" } +thiserror = "2" tokio = { version = "1", features = ["time"] } -tower = { version = "0.4", default-features = false } -tonic = { version = "0.10", default-features = false } +tower = { workspace = true, default-features = false } +tonic = { workspace = true, default-features = false } tracing = "0.1" pin-project = "1" [dev-dependencies] -linkerd2-proxy-api = { version = "0.13", features = ["arbitrary"] } +linkerd2-proxy-api = { workspace = true, features = ["arbitrary"] } quickcheck = { version = "1", default-features = false } diff --git a/linkerd/proxy/tap/src/accept.rs b/linkerd/proxy/tap/src/accept.rs index 57f9411958..38e61b4d13 100644 --- a/linkerd/proxy/tap/src/accept.rs +++ b/linkerd/proxy/tap/src/accept.rs @@ -1,11 +1,11 @@ use crate::grpc::Server; use futures::future; +use hyper_util::rt::tokio::TokioExecutor; use linkerd2_proxy_api::tap::tap_server::{Tap, TapServer}; use linkerd_conditional::Conditional; use linkerd_error::Error; use linkerd_io as io; use linkerd_meshtls as meshtls; -use linkerd_proxy_http::TracingExecutor; use linkerd_tls as tls; use std::{ collections::HashSet, @@ -43,12 +43,12 @@ impl AcceptPermittedClients { T: Tap + Send + 'static, T::ObserveStream: Send + 'static, { + use hyper_util::{rt::TokioIo, service::TowerToHyperService}; let svc = TapServer::new(tap); Box::pin(async move { - hyper::server::conn::Http::new() - .with_executor(TracingExecutor) - .http2_only(true) - .serve_connection(io, svc) + hyper::server::conn::http2::Builder::new(TokioExecutor::new()) + .timer(hyper_util::rt::TokioTimer::new()) + .serve_connection(TokioIo::new(io), TowerToHyperService::new(svc)) .await .map_err(Into::into) }) diff --git a/linkerd/proxy/tap/src/grpc/match_.rs b/linkerd/proxy/tap/src/grpc/match_.rs index 3f5f50492f..eecca8bc58 100644 --- a/linkerd/proxy/tap/src/grpc/match_.rs +++ b/linkerd/proxy/tap/src/grpc/match_.rs @@ -281,10 +281,10 @@ impl TryFrom for HttpMatch { m.r#match.ok_or(InvalidMatch::Empty).and_then(|m| match m { Pb::Scheme(s) => s.r#type.ok_or(InvalidMatch::Empty).and_then(|s| match s { - Type::Registered(reg) if reg == Registered::Http.into() => { + Type::Registered(reg) if reg == i32::from(Registered::Http) => { Ok(HttpMatch::Scheme(http::uri::Scheme::HTTP)) } - Type::Registered(reg) if reg == Registered::Https.into() => { + Type::Registered(reg) if reg == i32::from(Registered::Https) => { Ok(HttpMatch::Scheme(http::uri::Scheme::HTTPS)) } Type::Registered(_) => Err(InvalidMatch::InvalidScheme), diff --git a/linkerd/proxy/tap/src/grpc/server.rs b/linkerd/proxy/tap/src/grpc/server.rs index 77e6a64c49..3f5838e37f 100644 --- a/linkerd/proxy/tap/src/grpc/server.rs +++ b/linkerd/proxy/tap/src/grpc/server.rs @@ -1,8 +1,9 @@ use super::match_::Match; use crate::{iface, Inspect, Registry}; +use bytes::Buf; use futures::ready; use futures::stream::Stream; -use hyper::body::{Buf, HttpBody}; +use http_body::Body; use linkerd2_proxy_api::{http_types, tap as api}; use linkerd_conditional::Conditional; use linkerd_proxy_http::HasH2Reason; @@ -236,7 +237,7 @@ impl iface::Tap for Tap { inspect: &I, ) -> Option<(TapRequestPayload, TapResponse)> where - B: HttpBody, + B: Body, I: Inspect, { let shared = self.shared.upgrade()?; @@ -310,7 +311,7 @@ impl iface::Tap for Tap { }; let init = api::tap_event::http::RequestInit { - id: Some(id.clone()), + id: Some(id), method: Some(req.method().clone().into()), scheme: req.uri().scheme().map(http_types::Scheme::from), authority, @@ -346,7 +347,7 @@ impl iface::Tap for Tap { impl iface::TapResponse for TapResponse { type TapPayload = TapResponsePayload; - fn tap(self, rsp: &http::Response) -> TapResponsePayload { + fn tap(self, rsp: &http::Response) -> TapResponsePayload { let response_init_at = Instant::now(); let headers = if self.extract_headers { @@ -365,7 +366,7 @@ impl iface::TapResponse for TapResponse { }; let since_request_init = response_init_at.saturating_duration_since(self.request_init_at); let init = api::tap_event::http::Event::ResponseInit(api::tap_event::http::ResponseInit { - id: Some(self.tap.id.clone()), + id: Some(self.tap.id), since_request_init: pb_duration(since_request_init), http_status: rsp.status().as_u16().into(), headers, @@ -399,7 +400,7 @@ impl iface::TapResponse for TapResponse { let reason = err.h2_reason(); let since_request_init = response_end_at.saturating_duration_since(self.request_init_at); let end = api::tap_event::http::Event::ResponseEnd(api::tap_event::http::ResponseEnd { - id: Some(self.tap.id.clone()), + id: Some(self.tap.id), since_request_init: pb_duration(since_request_init), since_response_init: None, response_bytes: 0, diff --git a/linkerd/proxy/tap/src/lib.rs b/linkerd/proxy/tap/src/lib.rs index 3f1f286bff..56b33c7436 100644 --- a/linkerd/proxy/tap/src/lib.rs +++ b/linkerd/proxy/tap/src/lib.rs @@ -72,7 +72,7 @@ pub trait Inspect { /// for Registry/Layer/grpc, but need not be implemented outside of the `tap` /// module. mod iface { - use hyper::body::{Buf, HttpBody}; + use bytes::Buf; use linkerd_proxy_http::HasH2Reason; pub trait Tap: Clone { @@ -87,7 +87,7 @@ mod iface { /// /// If the tap cannot be initialized, for instance because the tap has /// completed or been canceled, then `None` is returned. - fn tap( + fn tap( &mut self, req: &http::Request, inspect: &I, @@ -106,7 +106,7 @@ mod iface { type TapPayload: TapPayload; /// Record a response and obtain a handle to tap its body. - fn tap(self, rsp: &http::Response) -> Self::TapPayload; + fn tap(self, rsp: &http::Response) -> Self::TapPayload; /// Record a service failure. fn fail(self, error: &E); diff --git a/linkerd/proxy/tap/src/service.rs b/linkerd/proxy/tap/src/service.rs index fc70d5eee1..dc2b4ee8a6 100644 --- a/linkerd/proxy/tap/src/service.rs +++ b/linkerd/proxy/tap/src/service.rs @@ -2,7 +2,6 @@ use super::iface::{Tap, TapPayload, TapResponse}; use super::registry::Registry; use super::Inspect; use futures::ready; -use hyper::body::HttpBody; use linkerd_proxy_http::HasH2Reason; use linkerd_stack::{layer, NewService}; use pin_project::{pin_project, pinned_drop}; @@ -30,7 +29,7 @@ pub struct TapHttp { #[derive(Debug)] pub struct Body where - B: HttpBody, + B: linkerd_proxy_http::Body, B::Error: HasH2Reason, T: TapPayload, { @@ -79,9 +78,9 @@ where T::TapResponse: Send + 'static, T::TapRequestPayload: Send + 'static, T::TapResponsePayload: Send + 'static, - A: HttpBody, + A: linkerd_proxy_http::Body, A::Error: HasH2Reason, - B: HttpBody, + B: linkerd_proxy_http::Body, B::Error: HasH2Reason, { type Response = http::Response>; @@ -117,14 +116,8 @@ where Ok(rsp) => { // Tap the response headers and use the response // body taps to decorate the response body. - let taps = rsp_taps.drain(..).map(|t| t.tap(&rsp)).collect(); - let rsp = rsp.map(move |inner| { - let mut body = Body { inner, taps }; - if body.is_end_stream() { - eos(&mut body.taps, None); - } - body - }); + let taps = rsp_taps.drain(..).map(|t| t.tap(&rsp)).collect::>(); + let rsp = rsp.map(|inner| Body::new(inner, taps)); Ok(rsp) } Err(e) => { @@ -140,10 +133,26 @@ where // === Body === +impl Body +where + B: linkerd_proxy_http::Body, + B::Error: HasH2Reason, + T: TapPayload, +{ + fn new(inner: B, mut taps: Vec) -> Self { + // If the body is already finished, record the end of the stream. + if inner.is_end_stream() { + taps.drain(..).for_each(|t| t.eos(None)); + } + + Self { inner, taps } + } +} + // `T` need not implement Default. impl Default for Body where - B: HttpBody + Default, + B: linkerd_proxy_http::Body + Default, B::Error: HasH2Reason, T: TapPayload, { @@ -155,9 +164,9 @@ where } } -impl HttpBody for Body +impl linkerd_proxy_http::Body for Body where - B: HttpBody, + B: linkerd_proxy_http::Body, B::Error: HasH2Reason, T: TapPayload + Send + 'static, { @@ -169,88 +178,56 @@ where self.inner.is_end_stream() } - fn poll_data( - mut self: Pin<&mut Self>, + fn poll_frame( + self: Pin<&mut Self>, cx: &mut Context<'_>, - ) -> Poll>> { - let frame = ready!(self.as_mut().project().inner.poll_data(cx)); - match frame { - Some(Err(e)) => { - let e = self.as_mut().project().err(e); - Poll::Ready(Some(Err(e))) - } - Some(Ok(body)) => { - self.as_mut().project().data(Some(&body)); - Poll::Ready(Some(Ok(body))) + ) -> Poll, Self::Error>>> { + let BodyProj { mut inner, taps } = self.project(); + + // Poll the inner body for the next frame. + let frame = match ready!(inner.as_mut().poll_frame(cx)) { + Some(Ok(frame)) => frame, + Some(Err(error)) => { + // If an error occurred, we have reached the end of the stream. + taps.drain(..).for_each(|t| t.fail(&error)); + return Poll::Ready(Some(Err(error))); } None => { - self.as_mut().project().data(None); - Poll::Ready(None) + // If there is not another frame, we have reached the end of the stream. + taps.drain(..).for_each(|t| t.eos(None)); + return Poll::Ready(None); } - } - } + }; - fn poll_trailers( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll, B::Error>> { - let trailers = ready!(self.as_mut().project().inner.poll_trailers(cx)) - .map_err(|e| self.as_mut().project().err(e))?; - self.as_mut().project().eos(trailers.as_ref()); - Poll::Ready(Ok(trailers)) - } - - #[inline] - fn size_hint(&self) -> hyper::body::SizeHint { - self.inner.size_hint() - } -} - -impl BodyProj<'_, B, T> -where - B: HttpBody, - B::Error: HasH2Reason, - T: TapPayload, -{ - fn data(&mut self, frame: Option<&B::Data>) { - if let Some(f) = frame { - for ref mut tap in self.taps.iter_mut() { - tap.data(f); - } + // If we received a trailers frame, we have reached the end of the stream. + if let trailers @ Some(_) = frame.trailers_ref() { + taps.drain(..).for_each(|t| t.eos(trailers)); + return Poll::Ready(Some(Ok(frame))); } - if self.inner.is_end_stream() { - self.eos(None); + // Otherwise, we *may* reached the end of the stream. If so, there are no trailers. + if inner.is_end_stream() { + taps.drain(..).for_each(|t| t.eos(None)); } - } - fn eos(&mut self, trailers: Option<&http::HeaderMap>) { - eos(self.taps, trailers) + Poll::Ready(Some(Ok(frame))) } - fn err(&mut self, error: B::Error) -> B::Error { - for tap in self.taps.drain(..) { - tap.fail(&error); - } - - error + #[inline] + fn size_hint(&self) -> hyper::body::SizeHint { + self.inner.size_hint() } } #[pinned_drop] impl PinnedDrop for Body where - B: HttpBody, + B: linkerd_proxy_http::Body, B::Error: HasH2Reason, T: TapPayload, { fn drop(self: Pin<&mut Self>) { - self.project().eos(None); - } -} - -fn eos(taps: &mut Vec, trailers: Option<&http::HeaderMap>) { - for tap in taps.drain(..) { - tap.eos(trailers); + let BodyProj { inner: _, taps } = self.project(); + taps.drain(..).for_each(|t| t.eos(None)); } } diff --git a/linkerd/proxy/tcp/Cargo.toml b/linkerd/proxy/tcp/Cargo.toml index 3d7107eee8..dbe9f17373 100644 --- a/linkerd/proxy/tcp/Cargo.toml +++ b/linkerd/proxy/tcp/Cargo.toml @@ -1,11 +1,10 @@ [package] name = "linkerd-proxy-tcp" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false - +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } @@ -13,7 +12,7 @@ linkerd-duplex = { path = "../../duplex" } linkerd-error = { path = "../../error" } linkerd-proxy-balance = { path = "../../proxy/balance" } linkerd-stack = { path = "../../stack" } -rand = "0.8" +rand = "0.9" tokio = { version = "1" } -tower = { version = "0.4.13", default-features = false } +tower = { workspace = true, default-features = false } pin-project = "1" diff --git a/linkerd/proxy/transport/Cargo.toml b/linkerd/proxy/transport/Cargo.toml index 5d8c7e2952..4baf0caa1e 100644 --- a/linkerd/proxy/transport/Cargo.toml +++ b/linkerd/proxy/transport/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-proxy-transport" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Transport-level implementations that rely on core proxy infrastructure """ @@ -15,7 +15,7 @@ linkerd-error = { path = "../../error" } linkerd-io = { path = "../../io" } linkerd-stack = { path = "../../stack" } socket2 = "0.5" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "net"] } tokio-stream = { version = "0.1", features = ["net"] } tracing = "0.1" diff --git a/linkerd/proxy/transport/src/addrs.rs b/linkerd/proxy/transport/src/addrs.rs index d4f4160993..1f07f46687 100644 --- a/linkerd/proxy/transport/src/addrs.rs +++ b/linkerd/proxy/transport/src/addrs.rs @@ -133,6 +133,12 @@ impl fmt::Display for ServerAddr { } } +impl From for ServerAddr { + fn from(OrigDstAddr(addr): OrigDstAddr) -> ServerAddr { + ServerAddr(addr) + } +} + impl ServerAddr { pub fn ip(&self) -> IpAddr { self.0.ip() diff --git a/linkerd/proxy/transport/src/connect.rs b/linkerd/proxy/transport/src/connect.rs index 4989ac2129..d11b3753c6 100644 --- a/linkerd/proxy/transport/src/connect.rs +++ b/linkerd/proxy/transport/src/connect.rs @@ -1,4 +1,4 @@ -use crate::{ClientAddr, Keepalive, Local, Remote, ServerAddr}; +use crate::{ClientAddr, Keepalive, Local, Remote, ServerAddr, UserTimeout}; use linkerd_io as io; use linkerd_stack::{Param, Service}; use std::{ @@ -12,11 +12,15 @@ use tracing::debug; #[derive(Copy, Clone, Debug)] pub struct ConnectTcp { keepalive: Keepalive, + user_timeout: UserTimeout, } impl ConnectTcp { - pub fn new(keepalive: Keepalive) -> Self { - Self { keepalive } + pub fn new(keepalive: Keepalive, user_timeout: UserTimeout) -> Self { + Self { + keepalive, + user_timeout, + } } } @@ -31,12 +35,14 @@ impl>> Service for ConnectTcp { fn call(&mut self, t: T) -> Self::Future { let Keepalive(keepalive) = self.keepalive; + let UserTimeout(user_timeout) = self.user_timeout; let Remote(ServerAddr(addr)) = t.param(); debug!(server.addr = %addr, "Connecting"); Box::pin(async move { let io = TcpStream::connect(&addr).await?; super::set_nodelay_or_warn(&io); let io = super::set_keepalive_or_warn(io, keepalive)?; + let io = super::set_user_timeout_or_warn(io, user_timeout)?; let local_addr = io.local_addr()?; debug!( local.addr = %local_addr, diff --git a/linkerd/proxy/transport/src/lib.rs b/linkerd/proxy/transport/src/lib.rs index 655c554ccc..6a8078bae2 100644 --- a/linkerd/proxy/transport/src/lib.rs +++ b/linkerd/proxy/transport/src/lib.rs @@ -36,6 +36,15 @@ impl From for Option { } } +#[derive(Copy, Clone, Debug, Default)] +pub struct UserTimeout(pub Option); + +impl From for Option { + fn from(UserTimeout(duration): UserTimeout) -> Option { + duration + } +} + // Misc. fn set_nodelay_or_warn(socket: &TcpStream) { @@ -61,3 +70,30 @@ fn set_keepalive_or_warn( let stream: std::net::TcpStream = socket2::Socket::into(sock); tokio::net::TcpStream::from_std(stream) } + +#[cfg(target_os = "linux")] +fn set_user_timeout_or_warn( + tcp: TcpStream, + user_timeout: Option, +) -> io::Result { + let sock = { + let stream = tokio::net::TcpStream::into_std(tcp)?; + socket2::Socket::from(stream) + }; + if let Err(e) = sock.set_tcp_user_timeout(user_timeout) { + tracing::warn!("failed to set user timeout: {}", e); + } + let stream: std::net::TcpStream = socket2::Socket::into(sock); + tokio::net::TcpStream::from_std(stream) +} + +#[cfg(not(target_os = "linux"))] +fn set_user_timeout_or_warn( + tcp: TcpStream, + user_timeout: Option, +) -> io::Result { + if user_timeout.is_some() { + tracing::debug!("TCP_USER_TIMEOUT is supported on Linux only."); + } + Ok(tcp) +} diff --git a/linkerd/proxy/transport/src/listen.rs b/linkerd/proxy/transport/src/listen.rs index 1109a7f913..fe2031a38b 100644 --- a/linkerd/proxy/transport/src/listen.rs +++ b/linkerd/proxy/transport/src/listen.rs @@ -1,6 +1,6 @@ mod dual_bind; -use crate::{addrs::*, Keepalive}; +use crate::{addrs::*, Keepalive, UserTimeout}; use dual_bind::DualBind; use futures::prelude::*; use linkerd_error::Result; @@ -49,6 +49,10 @@ struct AcceptError(#[source] io::Error); #[error("failed to set TCP keepalive: {0}")] struct KeepaliveError(#[source] io::Error); +#[derive(Debug, Error)] +#[error("failed to set TCP User Timeout: {0}")] +struct UserTimeoutError(#[source] io::Error); + #[derive(Debug, Error)] #[error("failed to obtain peer address: {0}")] struct PeerAddrError(#[source] io::Error); @@ -67,7 +71,7 @@ impl BindTcp { impl Bind for BindTcp where - T: Param + Param, + T: Param + Param + Param, { type Addrs = Addrs; type BoundAddrs = Local; @@ -84,10 +88,13 @@ where }; let server = Local(ServerAddr(listen.local_addr()?)); let Keepalive(keepalive) = params.param(); + let UserTimeout(user_timeout) = params.param(); let accept = TcpListenerStream::new(listen).map(move |res| { let tcp = res.map_err(AcceptError)?; super::set_nodelay_or_warn(&tcp); let tcp = super::set_keepalive_or_warn(tcp, keepalive).map_err(KeepaliveError)?; + let tcp = + super::set_user_timeout_or_warn(tcp, user_timeout).map_err(UserTimeoutError)?; fn ipv4_mapped(orig: SocketAddr) -> SocketAddr { if let SocketAddr::V6(v6) = orig { diff --git a/linkerd/proxy/transport/src/listen/dual_bind.rs b/linkerd/proxy/transport/src/listen/dual_bind.rs index 6f7d238e28..f9dde45625 100644 --- a/linkerd/proxy/transport/src/listen/dual_bind.rs +++ b/linkerd/proxy/transport/src/listen/dual_bind.rs @@ -1,4 +1,4 @@ -use crate::{addrs::DualListenAddr, listen::Bind, Keepalive, ListenAddr}; +use crate::{addrs::DualListenAddr, listen::Bind, Keepalive, ListenAddr, UserTimeout}; use futures::Stream; use linkerd_error::Result; use linkerd_stack::Param; @@ -26,7 +26,7 @@ impl From for DualBind { impl Bind for DualBind where - T: Param + Param + Clone, + T: Param + Param + Param + Clone, B: Bind, Io = TcpStream> + Clone + 'static, { type Addrs = B::Addrs; @@ -62,6 +62,12 @@ impl> Param for Listen { } } +impl> Param for Listen { + fn param(&self) -> UserTimeout { + self.parent.param() + } +} + impl Param for Listen { fn param(&self) -> ListenAddr { ListenAddr(self.addr) diff --git a/linkerd/proxy/transport/src/orig_dst.rs b/linkerd/proxy/transport/src/orig_dst.rs index 10caa7ef99..a3aa5c3009 100644 --- a/linkerd/proxy/transport/src/orig_dst.rs +++ b/linkerd/proxy/transport/src/orig_dst.rs @@ -83,14 +83,8 @@ where let incoming = incoming.map(|res| { let (inner, tcp) = res?; - let orig_dst = match inner.param() { - // IPv4-mapped IPv6 addresses are unwrapped by BindTcp::bind() and received here as - // SocketAddr::V4. We must call getsockopt with IPv4 constants (via - // orig_dst_addr_v4) even if it originally was an IPv6 - Remote(ClientAddr(SocketAddr::V4(_))) => orig_dst_addr_v4(&tcp)?, - Remote(ClientAddr(SocketAddr::V6(_))) => orig_dst_addr_v6(&tcp)?, - }; - let orig_dst = OrigDstAddr(orig_dst); + let Remote(client_addr) = inner.param(); + let (orig_dst, tcp) = orig_dst(tcp, client_addr)?; let addrs = Addrs { inner, orig_dst }; Ok((addrs, tcp)) }); @@ -99,139 +93,26 @@ where } } -#[cfg(target_os = "linux")] -#[allow(unsafe_code)] -fn orig_dst_addr_v4(sock: &TcpStream) -> io::Result { - use std::os::unix::io::AsRawFd; - - let fd = sock.as_raw_fd(); - unsafe { linux::so_original_dst_v4(fd) } -} - -#[cfg(target_os = "linux")] -#[allow(unsafe_code)] -fn orig_dst_addr_v6(sock: &TcpStream) -> io::Result { - use std::os::unix::io::AsRawFd; - - let fd = sock.as_raw_fd(); - unsafe { linux::so_original_dst_v6(fd) } -} - -#[cfg(not(target_os = "linux"))] -fn orig_dst_addr_v4(_: &TcpStream) -> io::Result { - Err(io::Error::new( - io::ErrorKind::Other, - "SO_ORIGINAL_DST not supported on this operating system", - )) -} - -#[cfg(not(target_os = "linux"))] -fn orig_dst_addr_v6(_: &TcpStream) -> io::Result { - Err(io::Error::new( - io::ErrorKind::Other, - "SO_ORIGINAL_DST not supported on this operating system", - )) -} - -#[cfg(target_os = "linux")] -#[allow(unsafe_code)] -mod linux { - use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; - use std::os::unix::io::RawFd; - use std::{io, mem}; - - pub unsafe fn so_original_dst(fd: RawFd, level: i32, optname: i32) -> io::Result { - let mut sockaddr: libc::sockaddr_storage = mem::zeroed(); - let mut sockaddr_len: libc::socklen_t = mem::size_of::() as u32; - - let ret = libc::getsockopt( - fd, - level, - optname, - &mut sockaddr as *mut _ as *mut _, - &mut sockaddr_len as *mut _ as *mut _, - ); - if ret != 0 { - return Err(io::Error::last_os_error()); - } - - mk_addr(&sockaddr, sockaddr_len) - } - - pub unsafe fn so_original_dst_v4(fd: RawFd) -> io::Result { - so_original_dst(fd, libc::SOL_IP, libc::SO_ORIGINAL_DST) - } - - pub unsafe fn so_original_dst_v6(fd: RawFd) -> io::Result { - so_original_dst(fd, libc::SOL_IPV6, libc::IP6T_SO_ORIGINAL_DST) - } - - // Borrowed with love from net2-rs - // https://github.com/rust-lang-nursery/net2-rs/blob/1b4cb4fb05fbad750b271f38221eab583b666e5e/src/socket.rs#L103 - // - // Copyright (c) 2014 The Rust Project Developers - fn mk_addr(storage: &libc::sockaddr_storage, len: libc::socklen_t) -> io::Result { - match storage.ss_family as libc::c_int { - libc::AF_INET => { - assert!(len as usize >= mem::size_of::()); - - let sa = { - let sa = storage as *const _ as *const libc::sockaddr_in; - unsafe { *sa } - }; - - let bits = ntoh32(sa.sin_addr.s_addr); - let ip = Ipv4Addr::new( - (bits >> 24) as u8, - (bits >> 16) as u8, - (bits >> 8) as u8, - bits as u8, - ); - let port = sa.sin_port; - Ok(SocketAddr::V4(SocketAddrV4::new(ip, ntoh16(port)))) - } - libc::AF_INET6 => { - assert!(len as usize >= mem::size_of::()); - - let sa = { - let sa = storage as *const _ as *const libc::sockaddr_in6; - unsafe { *sa } - }; - - let arr = sa.sin6_addr.s6_addr; - let ip = Ipv6Addr::new( - (arr[0] as u16) << 8 | (arr[1] as u16), - (arr[2] as u16) << 8 | (arr[3] as u16), - (arr[4] as u16) << 8 | (arr[5] as u16), - (arr[6] as u16) << 8 | (arr[7] as u16), - (arr[8] as u16) << 8 | (arr[9] as u16), - (arr[10] as u16) << 8 | (arr[11] as u16), - (arr[12] as u16) << 8 | (arr[13] as u16), - (arr[14] as u16) << 8 | (arr[15] as u16), - ); - - let port = sa.sin6_port; - let flowinfo = sa.sin6_flowinfo; - let scope_id = sa.sin6_scope_id; - Ok(SocketAddr::V6(SocketAddrV6::new( - ip, - ntoh16(port), - flowinfo, - scope_id, - ))) - } - _ => Err(io::Error::new( - io::ErrorKind::InvalidInput, - "invalid argument", - )), - } - } - - fn ntoh16(i: u16) -> u16 { - ::from_be(i) - } - - fn ntoh32(i: u32) -> u32 { - ::from_be(i) - } +fn orig_dst(sock: TcpStream, client_addr: ClientAddr) -> io::Result<(OrigDstAddr, TcpStream)> { + let sock = { + let stream = tokio::net::TcpStream::into_std(sock)?; + socket2::Socket::from(stream) + }; + + let orig_dst = match client_addr { + // IPv4-mapped IPv6 addresses are unwrapped by BindTcp::bind() and received here as + // SocketAddr::V4. We must call getsockopt with IPv4 constants (via + // orig_dst_addr_v4) even if it originally was an IPv6 + ClientAddr(SocketAddr::V4(_)) => sock.original_dst()?, + ClientAddr(SocketAddr::V6(_)) => sock.original_dst_ipv6()?, + }; + + let orig_dst = orig_dst.as_socket().ok_or(io::Error::new( + io::ErrorKind::InvalidInput, + "Invalid address format", + ))?; + + let stream: std::net::TcpStream = socket2::Socket::into(sock); + let stream = tokio::net::TcpStream::from_std(stream)?; + Ok((OrigDstAddr(orig_dst), stream)) } diff --git a/linkerd/reconnect/Cargo.toml b/linkerd/reconnect/Cargo.toml index 6b280cfc15..9f75484d97 100644 --- a/linkerd/reconnect/Cargo.toml +++ b/linkerd/reconnect/Cargo.toml @@ -1,16 +1,16 @@ [package] name = "linkerd-reconnect" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] linkerd-error = { path = "../error" } linkerd-stack = { path = "../stack" } futures = { version = "0.3", default-features = false } -tower = { version = "0.4", default-features = false } +tower = { workspace = true, default-features = false } tracing = "0.1" pin-project = "1" @@ -19,4 +19,4 @@ linkerd-tracing = { path = "../tracing" } tokio = { version = "1", features = ["macros", "rt", "time"] } tokio-stream = { version = "0.1", features = ["time"] } tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } diff --git a/linkerd/retry/Cargo.toml b/linkerd/retry/Cargo.toml index 9f20307191..abd52c5920 100644 --- a/linkerd/retry/Cargo.toml +++ b/linkerd/retry/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "linkerd-retry" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } linkerd-error = { path = "../error" } linkerd-stack = { path = "../stack" } -tower = { version = "0.4", default-features = false, features = ["retry"] } +tower = { workspace = true, default-features = false, features = ["retry"] } tracing = "0.1" diff --git a/linkerd/retry/src/lib.rs b/linkerd/retry/src/lib.rs index ad580f37bb..1b4f27f180 100644 --- a/linkerd/retry/src/lib.rs +++ b/linkerd/retry/src/lib.rs @@ -2,20 +2,24 @@ #![forbid(unsafe_code)] use futures::future; -use linkerd_error::Error; +use linkerd_error::{Error, Result}; use linkerd_stack::{ layer::{self, Layer}, - proxy::{Proxy, ProxyService}, + proxy::Proxy, util::AndThen, - Either, NewService, Oneshot, Service, ServiceExt, + Either, NewService, Service, }; use std::{ future::Future, task::{Context, Poll}, }; -pub use tower::retry::{budget::Budget, Policy}; use tracing::trace; +pub use tower::retry::{ + budget::{Budget, TpsBudget}, + Policy, +}; + /// A strategy for obtaining per-target retry polices. pub trait NewPolicy { type Policy; @@ -25,8 +29,8 @@ pub trait NewPolicy { /// An extension to [`tower::retry::Policy`] that adds a method to prepare a /// request to be retried, possibly changing its type. -pub trait PrepareRetry: - tower::retry::Policy +pub trait PrepareRetry: + Sized + tower::retry::Policy { /// A request type that can be retried. /// @@ -44,18 +48,18 @@ pub trait PrepareRetry: /// /// If this retry policy doesn't need to asynchronously modify the response /// type, this can be `futures::future::Ready`; - type ResponseFuture: Future>; + type ResponseFuture: Future>; /// Prepare an initial request for a potential retry. /// - /// If the request is retryable, this should return `Either::A`. Otherwise, - /// if this returns `Either::B`, the request will not be retried if it + /// If the request is retryable, this should return `Either::Left`. Otherwise, + /// if this returns `Either::Right`, the request will not be retried if it /// fails. /// /// If retrying requires a specific request type other than the input type /// to this policy, this function may transform the request into a request /// of that type. - fn prepare_request(&self, req: Req) -> Either; + fn prepare_request(self, req: Req) -> Either<(Self, Self::RetryRequest), Req>; /// Prepare a response for a potential retry. /// @@ -82,48 +86,6 @@ pub struct Retry { proxy: O, } -#[derive(Clone, Debug)] -pub struct NewRetryLayer { - new_policy: P, - proxy: O, -} - -// === impl NewRetryLayer === -pub fn layer

(new_policy: P) -> NewRetryLayer

{ - NewRetryLayer { - new_policy, - proxy: (), - } -} - -impl Layer for NewRetryLayer -where - P: Clone, - O: Clone, -{ - type Service = NewRetry; - fn layer(&self, inner: N) -> Self::Service { - NewRetry { - inner, - new_policy: self.new_policy.clone(), - proxy: self.proxy.clone(), - } - } -} - -impl

NewRetryLayer { - /// Adds a [`Proxy`] that will be applied to both the inner service and the - /// retry service. - /// - /// By default, this is the identity proxy, and does nothing. - pub fn with_proxy(self, proxy: O) -> NewRetryLayer { - NewRetryLayer { - new_policy: self.new_policy, - proxy, - } - } -} - // === impl NewRetry === impl NewRetry { @@ -164,7 +126,7 @@ type RetrySvc = tower::retry::Retry F>>; impl Service for Retry where - P: PrepareRetry + Clone, + P: PrepareRetry + Clone + std::fmt::Debug, S: Service, S: Service, S: Clone, @@ -183,7 +145,7 @@ where type Error = Error; type Future = future::Either< >::Future, - Oneshot>, P::RetryRequest>, + >>::Future, >; #[inline] @@ -192,24 +154,30 @@ where } fn call(&mut self, req: Req) -> Self::Future { - trace!(retryable = %self.policy.is_some()); - - let policy = match self.policy.as_ref() { + let (policy, req) = match self.policy.clone() { + Some(p) => match p.prepare_request(req) { + Either::Left(req) => req, + Either::Right(req) => { + return future::Either::Left(self.proxy.proxy(&mut self.inner, req)) + } + }, None => return future::Either::Left(self.proxy.proxy(&mut self.inner, req)), - Some(p) => p, }; + trace!(retryable = true, ?policy); - let retry_req = match policy.prepare_request(req) { - Either::A(retry_req) => retry_req, - Either::B(req) => return future::Either::Left(self.proxy.proxy(&mut self.inner, req)), - }; + // Take the inner service, replacing it with a clone. This allows the + // readiness from poll_ready + let pending = self.inner.clone(); + let ready = std::mem::replace(&mut self.inner, pending); + + // Wrap response bodies (e.g. with WithTrailers) so the retry policy can + // interact with it. + let inner = AndThen::new(ready, P::prepare_response as fn(Rsp) -> P::ResponseFuture); + + // Retry::poll_ready is just a pass-through to the inner service, so we + // can rely on the fact that we've taken the ready inner service handle. + let mut inner = tower::retry::Retry::new(policy, inner); - let inner = AndThen::new( - self.inner.clone(), - P::prepare_response as fn(Rsp) -> P::ResponseFuture, - ); - let retry = tower::retry::Retry::new(policy.clone(), inner); - let retry = self.proxy.clone().into_service(retry); - future::Either::Right(retry.oneshot(retry_req)) + future::Either::Right(self.proxy.proxy(&mut inner, req)) } } diff --git a/linkerd/router/Cargo.toml b/linkerd/router/Cargo.toml index 2208766780..b797c93c2d 100644 --- a/linkerd/router/Cargo.toml +++ b/linkerd/router/Cargo.toml @@ -1,15 +1,16 @@ [package] name = "linkerd-router" -version = "0.1.0" -edition = "2021" -license = "Apache-2.0" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] ahash = "0.8" futures = { version = "0.3", default-features = false } parking_lot = "0.12" -thiserror = "1" +thiserror = "2" tracing = "0.1" linkerd-error = { path = "../error" } linkerd-stack = { path = "../stack" } diff --git a/linkerd/service-profiles/Cargo.toml b/linkerd/service-profiles/Cargo.toml index ccc9af5e70..33c02a960d 100644 --- a/linkerd/service-profiles/Cargo.toml +++ b/linkerd/service-profiles/Cargo.toml @@ -1,28 +1,28 @@ [package] name = "linkerd-service-profiles" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Implements client layers for Linkerd ServiceProfiles. """ [dependencies] -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } -http = "0.2" -http-body = "0.4" -linkerd2-proxy-api = { version = "0.13", features = ["destination"] } -once_cell = "1.17" -prost-types = "0.12" +http = { workspace = true } +http-body = { workspace = true } +linkerd2-proxy-api = { workspace = true, features = ["destination"] } +once_cell = "1.21" +prost-types = { workspace = true } regex = "1" tokio = { version = "1", features = ["macros", "rt", "sync", "time"] } tokio-stream = { version = "0.1", features = ["sync"] } -tonic = { version = "0.10", default-features = false } -tower = { version = "0.4.13", features = ["retry", "util"] } -thiserror = "1" +tonic = { workspace = true, default-features = false } +tower = { workspace = true, features = ["retry", "util"] } +thiserror = "2" tracing = "0.1" linkerd-addr = { path = "../addr" } @@ -35,5 +35,5 @@ linkerd-tonic-stream = { path = "../tonic-stream" } linkerd-tonic-watch = { path = "../tonic-watch" } [dev-dependencies] -linkerd2-proxy-api = { version = "0.13", features = ["arbitrary"] } +linkerd2-proxy-api = { workspace = true, features = ["arbitrary"] } quickcheck = { version = "1", default-features = false } diff --git a/linkerd/service-profiles/src/client.rs b/linkerd/service-profiles/src/client.rs index b360890b16..552b7584f2 100644 --- a/linkerd/service-profiles/src/client.rs +++ b/linkerd/service-profiles/src/client.rs @@ -33,7 +33,7 @@ impl Client where S: GrpcService + Clone + Send + 'static, S::ResponseBody: Send + Sync, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into> + Send, S::Future: Send, @@ -65,7 +65,7 @@ impl Service for Client where T: Param, S: GrpcService + Clone + Send + 'static, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into> + Send, S::Future: Send, @@ -112,7 +112,7 @@ type InnerFuture = impl Inner where S: GrpcService + Clone + Send + 'static, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into> + Send, S::Future: Send, @@ -129,7 +129,7 @@ where impl Service for Inner where S: GrpcService + Clone + Send + 'static, - S::ResponseBody: Default + Body + Send + 'static, + S::ResponseBody: Body + Send + 'static, ::Error: Into> + Send, S::Future: Send, diff --git a/linkerd/service-profiles/src/http.rs b/linkerd/service-profiles/src/http.rs index 74fa9c1952..a778ad5bd9 100644 --- a/linkerd/service-profiles/src/http.rs +++ b/linkerd/service-profiles/src/http.rs @@ -7,7 +7,7 @@ use std::{ sync::Arc, time::Duration, }; -use tower::retry::budget::Budget; +use tower::retry::budget::TpsBudget; pub use self::proxy::NewProxyRouter; @@ -56,7 +56,7 @@ pub enum ResponseMatch { #[derive(Clone, Debug)] pub struct Retries { - budget: Arc, + budget: Arc, } #[derive(Clone, Default)] @@ -107,7 +107,7 @@ impl Route { self.timeout } - pub fn set_retries(&mut self, budget: Arc) { + pub fn set_retries(&mut self, budget: Arc) { self.retries = Some(Retries { budget }); } @@ -201,7 +201,7 @@ impl ResponseMatch { // === impl Retries === impl Retries { - pub fn budget(&self) -> &Arc { + pub fn budget(&self) -> &Arc { &self.budget } } diff --git a/linkerd/service-profiles/src/http/proxy.rs b/linkerd/service-profiles/src/http/proxy.rs index 09ed44db7c..5f30c32894 100644 --- a/linkerd/service-profiles/src/http/proxy.rs +++ b/linkerd/service-profiles/src/http/proxy.rs @@ -94,6 +94,7 @@ where // If the routes have been updated, update the cache. if let Poll::Ready(Some(Profile { http_routes, .. })) = self.rx.poll_next_unpin(cx) { debug!(routes = %http_routes.len(), "Updating HTTP routes"); + #[allow(clippy::mutable_key_type)] let routes = http_routes .iter() .map(|(_, r)| r.clone()) diff --git a/linkerd/service-profiles/src/lib.rs b/linkerd/service-profiles/src/lib.rs index 7dff551522..38fd511295 100644 --- a/linkerd/service-profiles/src/lib.rs +++ b/linkerd/service-profiles/src/lib.rs @@ -193,6 +193,12 @@ impl Profile { pub fn has_routes_or_targets(&self) -> bool { !self.http_routes.is_empty() || !self.targets.is_empty() } + + /// Returns `true` if this profile provides configuration that should + /// override opaque client policy configuration. + pub fn has_targets(&self) -> bool { + !self.targets.is_empty() + } } // === impl LookupAddr === diff --git a/linkerd/service-profiles/src/proto.rs b/linkerd/service-profiles/src/proto.rs index 951cdc37ba..762f382578 100644 --- a/linkerd/service-profiles/src/proto.rs +++ b/linkerd/service-profiles/src/proto.rs @@ -5,7 +5,7 @@ use linkerd_dns_name::Name; use linkerd_proxy_api_resolve::pb as resolve; use regex::Regex; use std::{str::FromStr, sync::Arc, time::Duration}; -use tower::retry::budget::Budget; +use tower::retry::budget::TpsBudget; use tracing::warn; pub(super) fn convert_profile(proto: api::DestinationProfile, port: u16) -> Profile { @@ -36,7 +36,7 @@ pub(super) fn convert_profile(proto: api::DestinationProfile, port: u16) -> Prof fn convert_route( orig: api::Route, - retry_budget: Option<&Arc>, + retry_budget: Option<&Arc>, ) -> Option<(http::RequestMatch, http::Route)> { let req_match = orig.condition.and_then(convert_req_match)?; let rsp_classes = orig @@ -65,7 +65,7 @@ fn convert_dst_override(orig: api::WeightedDst) -> Option { }) } -fn set_route_retry(route: &mut http::Route, retry_budget: Option<&Arc>) { +fn set_route_retry(route: &mut http::Route, retry_budget: Option<&Arc>) { let budget = match retry_budget { Some(budget) => budget.clone(), None => { @@ -170,7 +170,7 @@ fn convert_rsp_match(orig: api::ResponseMatch) -> Option { Some(m) } -fn convert_retry_budget(orig: api::RetryBudget) -> Option> { +fn convert_retry_budget(orig: api::RetryBudget) -> Option> { let min_retries = if orig.min_retries_per_second <= i32::MAX as u32 { orig.min_retries_per_second } else { @@ -217,7 +217,7 @@ fn convert_retry_budget(orig: api::RetryBudget) -> Option> { } }; - Some(Arc::new(Budget::new(ttl, min_retries, retry_ratio))) + Some(Arc::new(TpsBudget::new(ttl, min_retries, retry_ratio))) } #[cfg(test)] diff --git a/linkerd/signal/Cargo.toml b/linkerd/signal/Cargo.toml index 3f87915d0e..04ca9ebf03 100644 --- a/linkerd/signal/Cargo.toml +++ b/linkerd/signal/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-signal" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] tokio = { version = "1", features = ["macros", "signal"] } diff --git a/linkerd/stack/Cargo.toml b/linkerd/stack/Cargo.toml index 62c2c0c007..581c08d9ed 100644 --- a/linkerd/stack/Cargo.toml +++ b/linkerd/stack/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-stack" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Utilities for composing Tower services. """ @@ -17,15 +17,15 @@ futures = { version = "0.3", default-features = false } linkerd-error = { path = "../error" } parking_lot = "0.12" pin-project = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "rt", "sync", "time"] } tokio-util = { version = "0.7" } -tower = { version = "0.4", features = ["buffer", "filter", "spawn-ready", "util"] } +tower = { workspace = true, features = ["buffer", "filter", "spawn-ready", "util"] } tracing = "0.1" [dev-dependencies] linkerd-tracing = { path = "../tracing", features = ["ansi"] } -tower-test = "0.4" +tower-test = { workspace = true } tokio-test = "0.4" tokio = { version = "1", features = ["rt-multi-thread", "time", "macros"] } -tower = { version = "0.4", features = ["buffer", "filter", "util"] } +tower = { workspace = true, features = ["buffer", "filter", "util"] } diff --git a/linkerd/stack/metrics/Cargo.toml b/linkerd/stack/metrics/Cargo.toml index 7854005d13..6b30c7023b 100644 --- a/linkerd/stack/metrics/Cargo.toml +++ b/linkerd/stack/metrics/Cargo.toml @@ -1,18 +1,18 @@ [package] name = "linkerd-stack-metrics" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] linkerd-metrics = { path = "../../metrics" } parking_lot = "0.12" -tower = { version = "0.4", default-features = false } +tower = { workspace = true, default-features = false } tokio = { version = "1", features = ["time"] } [dev-dependencies] tokio = { version = "1", features = ["macros"] } tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } diff --git a/linkerd/stack/src/either.rs b/linkerd/stack/src/either.rs index 4905dc2522..9dea29bc87 100644 --- a/linkerd/stack/src/either.rs +++ b/linkerd/stack/src/either.rs @@ -1,5 +1,8 @@ use crate::{layer, NewService}; -pub use tower::util::Either; + +// pub use tower::util::Either; +pub use self::vendor::Either; +mod vendor; #[derive(Clone, Debug)] pub struct NewEither { @@ -30,8 +33,8 @@ where fn new_service(&self, target: Either) -> Self::Service { match target { - Either::A(t) => Either::A(self.left.new_service(t)), - Either::B(t) => Either::B(self.right.new_service(t)), + Either::Left(t) => Either::Left(self.left.new_service(t)), + Either::Right(t) => Either::Right(self.right.new_service(t)), } } } @@ -47,8 +50,8 @@ where fn new_service(&self, target: T) -> Self::Service { match self { - Either::A(n) => Either::A(n.new_service(target)), - Either::B(n) => Either::B(n.new_service(target)), + Either::Left(n) => Either::Left(n.new_service(target)), + Either::Right(n) => Either::Right(n.new_service(target)), } } } diff --git a/linkerd/stack/src/either/vendor.rs b/linkerd/stack/src/either/vendor.rs new file mode 100644 index 0000000000..603a20c174 --- /dev/null +++ b/linkerd/stack/src/either/vendor.rs @@ -0,0 +1,101 @@ +//! Contains [`Either`] and related types and functions. +//! +//! See [`Either`] documentation for more details. +//! +//! TODO(kate): this is a lightly modified variant of `tower`'s `Either` service. +//! +//! This is pulled in-tree to punt on addressing breaking changes to the trait bounds of +//! `Either`'s `Service` implementation, related to how it no longer boxes the errors +//! returned by its inner services. see #3744. +//! +//! This is vendored from . +//! +//! The variants `A` and `B` have been renamed to `Left` and `Right` to match the names of the v0.5 +//! interface. + +use futures::ready; +use linkerd_error::Error; +use pin_project::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use tower::Layer; +use tower::Service; + +/// Combine two different service types into a single type. +/// +/// Both services must be of the same request, response, and error types. +/// [`Either`] is useful for handling conditional branching in service middleware +/// to different inner service types. +#[pin_project(project = EitherProj)] +#[derive(Clone, Debug)] +pub enum Either { + /// One type of backing [`Service`]. + Left(#[pin] A), + /// The other type of backing [`Service`]. + Right(#[pin] B), +} + +impl Service for Either +where + A: Service, + A::Error: Into, + B: Service, + B::Error: Into, +{ + type Response = A::Response; + type Error = Error; + type Future = Either; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + use self::Either::*; + + match self { + Left(service) => Poll::Ready(Ok(ready!(service.poll_ready(cx)).map_err(Into::into)?)), + Right(service) => Poll::Ready(Ok(ready!(service.poll_ready(cx)).map_err(Into::into)?)), + } + } + + fn call(&mut self, request: Request) -> Self::Future { + use self::Either::*; + + match self { + Left(service) => Left(service.call(request)), + Right(service) => Right(service.call(request)), + } + } +} + +impl Future for Either +where + A: Future>, + AE: Into, + B: Future>, + BE: Into, +{ + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + match self.project() { + EitherProj::Left(fut) => Poll::Ready(Ok(ready!(fut.poll(cx)).map_err(Into::into)?)), + EitherProj::Right(fut) => Poll::Ready(Ok(ready!(fut.poll(cx)).map_err(Into::into)?)), + } + } +} + +impl Layer for Either +where + A: Layer, + B: Layer, +{ + type Service = Either; + + fn layer(&self, inner: S) -> Self::Service { + match self { + Either::Left(layer) => Either::Left(layer.layer(inner)), + Either::Right(layer) => Either::Right(layer.layer(inner)), + } + } +} diff --git a/linkerd/stack/src/loadshed.rs b/linkerd/stack/src/loadshed.rs index e512d34863..c9a9163d32 100644 --- a/linkerd/stack/src/loadshed.rs +++ b/linkerd/stack/src/loadshed.rs @@ -138,20 +138,29 @@ mod tests { let (service, mut handle) = mock::pair::<(), ()>(); let service = LoadShed::new(Buffer::new(service, 3)); + let spawn_svc = |id: &'static str| { + use tracing::{error_span, Instrument}; + task::spawn( + service + .clone() + .oneshot(()) + .instrument(error_span!("worker", %id)), + ) + }; // The inner starts unavailable... handle.allow(0); // ...but the buffer will accept requests while it has capacity. - let mut oneshot1 = task::spawn(service.clone().oneshot(())); + let mut oneshot1 = spawn_svc("oneshot1"); assert_pending!(oneshot1.poll()); - let mut oneshot2 = task::spawn(service.clone().oneshot(())); + let mut oneshot2 = spawn_svc("oneshot2"); assert_pending!(oneshot2.poll()); - let mut oneshot3 = task::spawn(service.clone().oneshot(())); + let mut oneshot3 = spawn_svc("oneshot3"); assert_pending!(oneshot3.poll()); // The buffer is now full, so the loadshed service should fail this // request. - let mut oneshot4 = task::spawn(service.clone().oneshot(())); + let mut oneshot4 = spawn_svc("oneshot4"); assert_ready_err!(oneshot4.poll()); // Complete one request. @@ -166,15 +175,17 @@ mod tests { // Now that there's space in the buffer, the service should no longer be // shedding load. - let mut oneshot5 = task::spawn(service.clone().oneshot(())); + let mut oneshot5 = spawn_svc("oneshot5"); assert_pending!(oneshot5.poll()); + let mut oneshot6 = spawn_svc("oneshot6"); + assert_pending!(oneshot6.poll()); // The buffer is now full, so the loadshed service should fail any // additional requests. - let mut oneshot6 = task::spawn(service.clone().oneshot(())); - let mut oneshot7 = task::spawn(service.clone().oneshot(())); - assert_ready_err!(oneshot6.poll()); + let mut oneshot7 = spawn_svc("oneshot7"); + let mut oneshot8 = spawn_svc("oneshot8"); assert_ready_err!(oneshot7.poll()); + assert_ready_err!(oneshot8.poll()); // Complete all remaining requests handle.allow(3); diff --git a/linkerd/stack/src/queue.rs b/linkerd/stack/src/queue.rs index 2e9368f8d0..1fc2af9e17 100644 --- a/linkerd/stack/src/queue.rs +++ b/linkerd/stack/src/queue.rs @@ -34,9 +34,11 @@ pub struct NewQueueWithoutTimeout { _req: PhantomData, } -pub type Queue = gate::Gate, Req>>; +pub type Queue = + gate::Gate as tower::Service>::Future>>; -pub type QueueWithoutTimeout = Buffer, Req>; +pub type QueueWithoutTimeout = + Buffer as tower::Service>::Future>; // === impl NewQueue === diff --git a/linkerd/stack/src/switch_ready.rs b/linkerd/stack/src/switch_ready.rs index 959cfd479c..d70a8684a3 100644 --- a/linkerd/stack/src/switch_ready.rs +++ b/linkerd/stack/src/switch_ready.rs @@ -1,4 +1,5 @@ use super::NewService; +use futures::future::Either; use linkerd_error::Error; use std::{ future::Future, @@ -6,7 +7,7 @@ use std::{ task::{Context, Poll}, }; use tokio::time; -use tower::{spawn_ready::SpawnReady, util::Either}; +use tower::spawn_ready::SpawnReady; use tracing::{debug, trace}; /// A service which falls back to a secondary service if the primary service @@ -100,12 +101,13 @@ where Req: 'static, A: tower::Service + Send + 'static, A::Error: Into, - B: tower::Service, + B: tower::Service, B::Error: Into, { type Response = A::Response; type Error = Error; - type Future = Either< as tower::Service>::Future, B::Future>; + type Future = + Either< as tower::Service>::Future, >::Future>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { loop { @@ -165,8 +167,8 @@ where fn call(&mut self, req: Req) -> Self::Future { trace!(state = ?self.state, "SwitchReady::call"); match self.state { - State::Primary => Either::A(self.primary.call(req)), - State::Secondary => Either::B(self.secondary.call(req)), + State::Primary => Either::Left(self.primary.call(req)), + State::Secondary => Either::Right(self.secondary.call(req)), State::Waiting => panic!("called before ready!"), } } diff --git a/linkerd/stack/src/unwrap_or.rs b/linkerd/stack/src/unwrap_or.rs index a9c313fc87..636c54228b 100644 --- a/linkerd/stack/src/unwrap_or.rs +++ b/linkerd/stack/src/unwrap_or.rs @@ -26,8 +26,8 @@ impl Predicate<(Option, U)> for UnwrapOr { fn check(&mut self, (t, u): (Option, U)) -> Result, Error> { match t { - Some(t) => Ok(Either::A((t, u))), - None => Ok(Either::B(u)), + Some(t) => Ok(Either::Left((t, u))), + None => Ok(Either::Right(u)), } } } @@ -36,7 +36,8 @@ impl Predicate> for UnwrapOr { type Request = Either; fn check(&mut self, t: Option) -> Result, Error> { - Ok(t.map(Either::A).unwrap_or_else(|| Either::B(U::default()))) + Ok(t.map(Either::Left) + .unwrap_or_else(|| Either::Right(U::default()))) } } diff --git a/linkerd/stack/tracing/Cargo.toml b/linkerd/stack/tracing/Cargo.toml index 1e9b4f5cb4..3a20c0c754 100644 --- a/linkerd/stack/tracing/Cargo.toml +++ b/linkerd/stack/tracing/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "linkerd-stack-tracing" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } linkerd-error = { path = "../../error" } linkerd-stack = { path = ".." } -tower = "0.4" +tower = { workspace = true } tracing = "0.1" diff --git a/linkerd/system/Cargo.toml b/linkerd/system/Cargo.toml deleted file mode 100644 index e36892a32e..0000000000 --- a/linkerd/system/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "linkerd-system" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false -description = """ -Unsafe code for accessing system-level counters for memory & CPU usage. -""" - -[dependencies] -tracing = "0.1" - -[target.'cfg(target_os = "linux")'.dependencies] -libc = "0.2" -procfs = { version = "0.16.0", default-features = false } diff --git a/linkerd/system/src/lib.rs b/linkerd/system/src/lib.rs deleted file mode 100644 index 8f1a26908c..0000000000 --- a/linkerd/system/src/lib.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Unsafe code for accessing system-level counters for memory & CPU usage. - -#![deny( - rust_2018_idioms, - rust_2018_idioms, - clippy::disallowed_methods, - unsafe_code -)] - -#[cfg(target_os = "linux")] -mod linux; - -#[cfg(target_os = "linux")] -pub use self::linux::{blocking_stat, max_fds, ms_per_tick, open_fds, page_size, Stat}; - -#[cfg(not(target_os = "linux"))] -compile_error!("The system crate requires Linux"); diff --git a/linkerd/system/src/linux.rs b/linkerd/system/src/linux.rs deleted file mode 100644 index 80f5e1060c..0000000000 --- a/linkerd/system/src/linux.rs +++ /dev/null @@ -1,65 +0,0 @@ -use libc::pid_t; -use procfs::{ - process::{self, LimitValue, Process}, - ProcResult, -}; -use std::{fs, io}; -use tracing::{error, warn}; - -pub use process::Stat; - -pub fn page_size() -> io::Result { - sysconf(libc::_SC_PAGESIZE, "page size") -} - -pub fn ms_per_tick() -> io::Result { - // On Linux, CLK_TCK is ~always `100`, so pure integer division - // works. This is probably not suitable if we encounter other - // values. - let clock_ticks_per_sec = sysconf(libc::_SC_CLK_TCK, "clock ticks per second")?; - let ms_per_tick = 1_000 / clock_ticks_per_sec; - if clock_ticks_per_sec != 100 { - warn!( - clock_ticks_per_sec, - ms_per_tick, "Unexpected value; process_cpu_seconds_total may be inaccurate." - ); - } - Ok(ms_per_tick) -} - -pub fn blocking_stat() -> ProcResult { - Process::myself()?.stat() -} - -pub fn open_fds(pid: pid_t) -> io::Result { - let mut open = 0; - for f in fs::read_dir(format!("/proc/{}/fd", pid))? { - if !f?.file_type()?.is_dir() { - open += 1; - } - } - Ok(open) -} - -pub fn max_fds() -> ProcResult { - let limits = Process::myself()?.limits()?.max_open_files; - match limits.soft_limit { - LimitValue::Unlimited => match limits.hard_limit { - LimitValue::Unlimited => Ok(0), - LimitValue::Value(hard) => Ok(hard), - }, - LimitValue::Value(soft) => Ok(soft), - } -} - -#[allow(unsafe_code)] -fn sysconf(num: libc::c_int, name: &'static str) -> Result { - match unsafe { libc::sysconf(num) } { - e if e <= 0 => { - let error = io::Error::last_os_error(); - error!("error getting {}: {:?}", name, error); - Err(error) - } - val => Ok(val as u64), - } -} diff --git a/linkerd/tls/Cargo.toml b/linkerd/tls/Cargo.toml index 3600ef2c69..ad59fb9b8c 100644 --- a/linkerd/tls/Cargo.toml +++ b/linkerd/tls/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "linkerd-tls" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] async-trait = "0.1" -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } linkerd-conditional = { path = "../conditional" } linkerd-dns-name = { path = "../dns/name" } @@ -17,12 +17,15 @@ linkerd-identity = { path = "../identity" } linkerd-io = { path = "../io" } linkerd-stack = { path = "../stack" } pin-project = "1" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "time"] } -tower = "0.4" +tower = { workspace = true } tracing = "0.1" untrusted = "0.9" [dev-dependencies] linkerd-tracing = { path = "../tracing", features = ["ansi"] } tokio = { version = "1", features = ["rt-multi-thread"] } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/linkerd/tls/fuzz/Cargo.toml b/linkerd/tls/fuzz/Cargo.toml index 2ad7dad215..1ea7722796 100644 --- a/linkerd/tls/fuzz/Cargo.toml +++ b/linkerd/tls/fuzz/Cargo.toml @@ -1,10 +1,10 @@ - [package] name = "linkerd-tls-fuzz" -version = "0.0.0" -authors = ["Linkerd Developers "] -publish = false -edition = "2021" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [package.metadata] cargo-fuzz = true diff --git a/linkerd/tls/route/Cargo.toml b/linkerd/tls/route/Cargo.toml new file mode 100644 index 0000000000..143ae5f952 --- /dev/null +++ b/linkerd/tls/route/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "linkerd-tls-route" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } + +[features] +proto = ["linkerd2-proxy-api"] + +[dependencies] +regex = "1" +rand = "0.9" +thiserror = "2" +tracing = "0.1" +linkerd-tls = { path = "../" } +linkerd-dns = { path = "../../dns" } + +[dependencies.linkerd2-proxy-api] +workspace = true +optional = true +features = ["outbound"] diff --git a/linkerd/tls/route/src/lib.rs b/linkerd/tls/route/src/lib.rs new file mode 100644 index 0000000000..6b9f1e17c8 --- /dev/null +++ b/linkerd/tls/route/src/lib.rs @@ -0,0 +1,70 @@ +//! An TLS route matching library for Linkerd to support the TLSRoute +//! Kubernetes Gateway API types. + +#![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] +#![forbid(unsafe_code)] + +use linkerd_tls::ServerName; +use tracing::trace; + +pub mod sni; +#[cfg(test)] +mod tests; + +pub use self::sni::{InvalidSni, MatchSni, SniMatch}; + +/// Groups routing rules under a common set of SNIs. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct Route

{ + /// A list of SNIs that this route applies to, to be matched against, + /// + /// If at least one match is specified, any match may apply for rules to applied. + /// When no SNI matches are present, all SNIs match. + pub snis: Vec, + + pub policy: P, +} + +/// Summarizes a matched route so that route matches may be compared/ordered. A +/// greater match is preferred over a lesser match. +#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +pub struct RouteMatch { + sni: Option, +} + +/// Provides metadata information about a TLS session. For now this contains +/// only the SNI value but further down the line, we could add more metadata +/// if want to support more advanced routing scenarios. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub struct SessionInfo { + pub sni: ServerName, +} + +pub fn find

(routes: &[Route

], session_info: SessionInfo) -> Option<(RouteMatch, &P)> { + trace!(routes = ?routes.len(), "Finding matching route"); + + best(routes.iter().filter_map(|rt| { + trace!(snis = ?rt.snis); + let sni = if rt.snis.is_empty() { + None + } else { + let session_sni = &session_info.sni; + trace!(%session_sni, "matching sni"); + let sni_match = rt + .snis + .iter() + .filter_map(|a| a.summarize_match(session_sni)) + .max()?; + Some(sni_match) + }; + + Some((RouteMatch { sni }, &rt.policy)) + })) +} + +#[inline] +fn best(matches: impl Iterator) -> Option<(M, P)> { + // This is roughly equivalent to `max_by(...)` but we want to ensure + // that the first match wins. + matches.reduce(|(m0, p0), (m1, p1)| if m0 >= m1 { (m0, p0) } else { (m1, p1) }) +} diff --git a/linkerd/tls/route/src/sni.rs b/linkerd/tls/route/src/sni.rs new file mode 100644 index 0000000000..fceda5c3bd --- /dev/null +++ b/linkerd/tls/route/src/sni.rs @@ -0,0 +1,212 @@ +use linkerd_dns as dns; +use linkerd_tls::ServerName; + +/// Defines a way to match against SNI attributes of the TLS ClientHello +/// message in a TLS handshake. The SNI value being matched is the equivalent +/// of a hostname (as defined in RFC 1123) with 2 notable exceptions: +/// +/// 1. IPs are not allowed in SNI names per RFC 6066. +/// 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard +/// label must appear by itself as the first label. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum MatchSni { + Exact(String), + + /// Tokenized reverse list of DNS name suffix labels. + /// + /// For example: the match `*.example.com` is stored as `["com", + /// "example"]`. + Suffix(Vec), +} + +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum SniMatch { + Exact(usize), + Suffix(usize), +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, thiserror::Error)] +pub enum InvalidSni { + #[error("invalid sni: {0}")] + Invalid(#[from] dns::InvalidName), +} + +// === impl MatchSni === + +impl std::str::FromStr for MatchSni { + type Err = InvalidSni; + + fn from_str(sni: &str) -> Result { + if let Some(sni) = sni.strip_prefix("*.") { + return Ok(Self::Suffix( + sni.split('.').map(|s| s.to_string()).rev().collect(), + )); + } + + Ok(Self::Exact(sni.to_string())) + } +} + +impl MatchSni { + pub fn summarize_match(&self, sni: &ServerName) -> Option { + let mut sni = sni.as_str(); + + match self { + Self::Exact(h) => { + if !h.ends_with('.') { + sni = sni.strip_suffix('.').unwrap_or(sni); + } + if h == sni { + Some(SniMatch::Exact(h.len())) + } else { + None + } + } + + Self::Suffix(suffix) => { + if suffix.first().map(|s| &**s) != Some("") { + sni = sni.strip_suffix('.').unwrap_or(sni); + } + let mut length = 0; + for sfx in suffix.iter() { + sni = sni.strip_suffix(sfx)?; + sni = sni.strip_suffix('.')?; + length += sfx.len() + 1; + } + + Some(SniMatch::Suffix(length)) + } + } + } +} + +// === impl SniMatch === + +impl std::cmp::PartialOrd for SniMatch { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl std::cmp::Ord for SniMatch { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + use std::cmp::Ordering; + match (self, other) { + (Self::Exact(l), Self::Exact(r)) => l.cmp(r), + (Self::Suffix(l), Self::Suffix(r)) => l.cmp(r), + (Self::Exact(_), Self::Suffix(_)) => Ordering::Greater, + (Self::Suffix(_), Self::Exact(_)) => Ordering::Less, + } + } +} + +#[cfg(feature = "proto")] +pub mod proto { + use super::*; + use linkerd2_proxy_api::tls_route as api; + #[derive(Debug, thiserror::Error)] + pub enum InvalidSniMatch { + #[error("sni match must contain a match")] + Missing, + } + // === impl MatchSni === + impl TryFrom for MatchSni { + type Error = InvalidSniMatch; + fn try_from(hm: api::SniMatch) -> Result { + match hm.r#match.ok_or(InvalidSniMatch::Missing)? { + api::sni_match::Match::Exact(h) => Ok(MatchSni::Exact(h)), + api::sni_match::Match::Suffix(sfx) => Ok(MatchSni::Suffix(sfx.reverse_labels)), + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn exact() { + let m = "example.com" + .parse::() + .expect("example.com parses"); + assert_eq!(m, MatchSni::Exact("example.com".to_string())); + assert_eq!( + m.summarize_match(&"example.com".parse().unwrap()), + Some(SniMatch::Exact("example.com".len())) + ); + assert_eq!( + m.summarize_match(&"example.com.".parse().unwrap()), + Some(SniMatch::Exact("example.com".len())) + ); + assert_eq!(m.summarize_match(&"foo.example.com".parse().unwrap()), None); + + let m = "example.com." + .parse::() + .expect("example.com parses"); + assert_eq!(m, MatchSni::Exact("example.com.".to_string())); + assert_eq!(m.summarize_match(&"example.com".parse().unwrap()), None,); + assert_eq!( + m.summarize_match(&"example.com.".parse().unwrap()), + Some(SniMatch::Exact("example.com.".len())) + ); + } + + #[test] + fn suffix() { + let m = "*.example.com" + .parse::() + .expect("*.example.com parses"); + assert_eq!( + m, + MatchSni::Suffix(vec!["com".to_string(), "example".to_string()]) + ); + + assert_eq!(m.summarize_match(&"example.com".parse().unwrap()), None); + assert_eq!( + m.summarize_match(&"foo.example.com".parse().unwrap()), + Some(SniMatch::Suffix(".example.com".len())) + ); + assert_eq!( + m.summarize_match(&"foo.example.com".parse().unwrap()), + Some(SniMatch::Suffix(".example.com".len())) + ); + assert_eq!( + m.summarize_match(&"bar.foo.example.com".parse().unwrap()), + Some(SniMatch::Suffix(".example.com".len())) + ); + + let m = "*.example.com." + .parse::() + .expect("*.example.com. parses"); + assert_eq!( + m, + MatchSni::Suffix(vec![ + "".to_string(), + "com".to_string(), + "example".to_string() + ]) + ); + assert_eq!( + m.summarize_match(&"bar.foo.example.com".parse().unwrap()), + None + ); + assert_eq!( + m.summarize_match(&"bar.foo.example.com.".parse().unwrap()), + Some(SniMatch::Suffix(".example.com.".len())) + ); + } + + #[test] + fn cmp() { + assert!(SniMatch::Exact("example.com".len()) > SniMatch::Suffix(".example.com".len())); + assert!(SniMatch::Exact("foo.example.com".len()) > SniMatch::Exact("example.com".len())); + assert!( + SniMatch::Suffix(".foo.example.com".len()) > SniMatch::Suffix(".example.com".len()) + ); + assert_eq!( + SniMatch::Suffix(".foo.example.com".len()), + SniMatch::Suffix(".bar.example.com".len()) + ); + } +} diff --git a/linkerd/tls/route/src/tests.rs b/linkerd/tls/route/src/tests.rs new file mode 100644 index 0000000000..f23148f7e0 --- /dev/null +++ b/linkerd/tls/route/src/tests.rs @@ -0,0 +1,96 @@ +use super::*; + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Policy { + Expected, + Unexpected, +} + +impl Default for Policy { + fn default() -> Self { + Self::Unexpected + } +} + +/// Given two equivalent routes, choose the explicit sni match and not +/// the wildcard. +#[test] +fn sni_precedence() { + let rts = vec![ + Route { + snis: vec!["*.example.com".parse().unwrap()], + policy: Policy::Unexpected, + }, + Route { + snis: vec!["foo.example.com".parse().unwrap()], + policy: Policy::Expected, + }, + ]; + + let si = SessionInfo { + sni: "foo.example.com".parse().expect("must parse"), + }; + + let (_, policy) = find(&rts, si).expect("must match"); + assert_eq!(*policy, Policy::Expected, "incorrect rule matched"); +} + +#[test] +fn first_identical_wins() { + let rts = vec![ + Route { + policy: Policy::Expected, + snis: vec![], + }, + // Redundant route. + Route { + policy: Policy::Unexpected, + snis: vec![], + }, + ]; + + let si = SessionInfo { + sni: "api.github.io".parse().expect("must parse"), + }; + + let (_, policy) = find(&rts, si).expect("must match"); + assert_eq!(*policy, Policy::Expected, "incorrect rule matched"); +} + +#[test] +fn no_match_suffix() { + let rts = vec![Route { + snis: vec!["*.test.example.com".parse().unwrap()], + policy: Policy::Unexpected, + }]; + + let si = SessionInfo { + sni: "test.example.com".parse().expect("must parse"), + }; + + assert!(find(&rts, si).is_none(), "should have no matches"); +} + +#[test] +fn no_match_exact() { + let rts = vec![Route { + snis: vec!["test.example.com".parse().unwrap()], + policy: Policy::Unexpected, + }]; + + let si = SessionInfo { + sni: "fest.example.com".parse().expect("must parse"), + }; + + assert!(find(&rts, si).is_none(), "should have no matches"); +} + +#[test] +fn no_routes_no_match() { + let rts: Vec> = Vec::default(); + let si = SessionInfo { + sni: "fest.example.com".parse().expect("must parse"), + }; + + assert!(find(&rts, si).is_none(), "should have no matches"); +} diff --git a/linkerd/tls/src/client.rs b/linkerd/tls/src/client.rs index ca07551208..642db8a618 100644 --- a/linkerd/tls/src/client.rs +++ b/linkerd/tls/src/client.rs @@ -102,7 +102,7 @@ where T: Param, L: NewService, C: MakeConnection, - C::Connection: io::AsyncRead + io::AsyncWrite + Send + Unpin, + C::Connection: Send + Unpin, C::Metadata: Send + Unpin, C::Future: Send + 'static, H: Service), Error = io::Error> diff --git a/linkerd/tls/src/lib.rs b/linkerd/tls/src/lib.rs index 0e54d86442..4d6b0f6136 100755 --- a/linkerd/tls/src/lib.rs +++ b/linkerd/tls/src/lib.rs @@ -6,7 +6,10 @@ pub mod server; pub use self::{ client::{Client, ClientTls, ConditionalClientTls, ConnectMeta, NoClientTls, ServerId}, - server::{ClientId, ConditionalServerTls, NewDetectTls, NoServerTls, ServerTls}, + server::{ + ClientId, ConditionalServerTls, NewDetectRequiredSni, NewDetectTls, NoServerTls, + NoSniFoundError, ServerTls, SniDetectionTimeoutError, + }, }; use linkerd_dns_name as dns; diff --git a/linkerd/tls/src/server.rs b/linkerd/tls/src/server.rs index 04862401f9..dfe2b4fb04 100644 --- a/linkerd/tls/src/server.rs +++ b/linkerd/tls/src/server.rs @@ -1,4 +1,5 @@ mod client_hello; +mod required_sni; use crate::{NegotiatedProtocol, ServerName}; use bytes::BytesMut; @@ -18,6 +19,8 @@ use thiserror::Error; use tokio::time::{self, Duration}; use tracing::{debug, trace, warn}; +pub use self::required_sni::{NewDetectRequiredSni, NoSniFoundError, SniDetectionTimeoutError}; + /// Describes the authenticated identity of a remote client. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct ClientId(pub id::Id); @@ -65,6 +68,7 @@ pub struct NewDetectTls { _local_identity: std::marker::PhantomData L>, } +/// A param type used to indicate the timeout after which detection should fail. #[derive(Copy, Clone, Debug)] pub struct Timeout(pub Duration); @@ -192,7 +196,7 @@ where } /// Peek or buffer the provided stream to determine an SNI value. -async fn detect_sni(mut io: I) -> io::Result<(Option, DetectIo)> +pub(crate) async fn detect_sni(mut io: I) -> io::Result<(Option, DetectIo)> where I: io::Peek + io::AsyncRead + io::AsyncWrite + Send + Sync + Unpin, { @@ -207,12 +211,8 @@ where debug!(sz, "Peeked bytes from TCP stream"); // Peek may return 0 bytes if the socket is not peekable. if sz > 0 { - match client_hello::parse_sni(buf.as_ref()) { - Ok(sni) => { - return Ok((sni, EitherIo::Left(io))); - } - - Err(client_hello::Incomplete) => {} + if let Ok(sni) = client_hello::parse_sni(buf.as_ref()) { + return Ok((sni, EitherIo::Left(io))); } } diff --git a/linkerd/tls/src/server/required_sni.rs b/linkerd/tls/src/server/required_sni.rs new file mode 100644 index 0000000000..1daf6d39bb --- /dev/null +++ b/linkerd/tls/src/server/required_sni.rs @@ -0,0 +1,118 @@ +use crate::{ + server::{detect_sni, DetectIo}, + ServerName, +}; +use linkerd_error::Error; +use linkerd_io as io; +use linkerd_stack::{layer, NewService, Service, ServiceExt}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; +use thiserror::Error; +use tokio::time; +use tracing::debug; + +#[derive(Clone, Debug, Error)] +#[error("SNI detection timed out")] +pub struct SniDetectionTimeoutError; + +#[derive(Clone, Debug, Error)] +#[error("Could not find SNI")] +pub struct NoSniFoundError; + +/// A NewService that instruments an inner stack with knowledge of the +/// connection's TLS ServerName (i.e. from an SNI header). +/// +/// This differs from the parent module's NewDetectTls in a a few ways: +/// +/// - It requires that all connections have an SNI. +/// - It assumes that these connections may not be terminated locally, so there +/// is no concept of a local server name. +/// - There are no special affordances for mutually authenticated TLS, so we +/// make no attempt to detect the client's identity. +/// - The detection timeout is fixed and cannot vary per target (for +/// convenience, to reduce needless boilerplate). +#[derive(Clone, Debug)] +pub struct NewDetectRequiredSni { + inner: N, + timeout: time::Duration, +} + +#[derive(Clone, Debug)] +pub struct DetectRequiredSni { + target: T, + inner: N, + timeout: time::Duration, +} + +// === impl NewDetectRequiredSni === + +impl NewDetectRequiredSni { + fn new(timeout: time::Duration, inner: N) -> Self { + Self { inner, timeout } + } + + pub fn layer(timeout: time::Duration) -> impl layer::Layer + Clone { + layer::mk(move |inner| Self::new(timeout, inner)) + } +} + +impl NewService for NewDetectRequiredSni +where + N: Clone, +{ + type Service = DetectRequiredSni; + + fn new_service(&self, target: T) -> Self::Service { + DetectRequiredSni::new(self.timeout, target, self.inner.clone()) + } +} + +// === impl DetectRequiredSni === + +impl DetectRequiredSni { + fn new(timeout: time::Duration, target: T, inner: N) -> Self { + Self { + target, + inner, + timeout, + } + } +} + +impl Service for DetectRequiredSni +where + T: Clone + Send + Sync + 'static, + I: io::AsyncRead + io::Peek + io::AsyncWrite + Send + Sync + Unpin + 'static, + N: NewService<(ServerName, T), Service = S> + Clone + Send + 'static, + S: Service> + Send, + S::Error: Into, + S::Future: Send, +{ + type Response = S::Response; + type Error = Error; + type Future = Pin> + Send + 'static>>; + + #[inline] + fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, io: I) -> Self::Future { + let target = self.target.clone(); + let new_accept = self.inner.clone(); + + // Detect the SNI from a ClientHello (or timeout). + let detect = time::timeout(self.timeout, detect_sni(io)); + Box::pin(async move { + let (res, io) = detect.await.map_err(|_| SniDetectionTimeoutError)??; + let sni = res.ok_or(NoSniFoundError)?; + debug!(?sni, "Detected TLS"); + + let svc = new_accept.new_service((sni, target)); + svc.oneshot(io).await.map_err(Into::into) + }) + } +} diff --git a/linkerd/tls/test-util/Cargo.toml b/linkerd/tls/test-util/Cargo.toml index 0cc98c09a4..fd7cb6fb90 100644 --- a/linkerd/tls/test-util/Cargo.toml +++ b/linkerd/tls/test-util/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "linkerd-tls-test-util" -version = "0.1.0" -license = "Apache-2.0" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } edition = "2018" -publish = false +publish = { workspace = true } diff --git a/linkerd/tonic-stream/Cargo.toml b/linkerd/tonic-stream/Cargo.toml index 1a0ffc7655..2eaccc26c3 100644 --- a/linkerd/tonic-stream/Cargo.toml +++ b/linkerd/tonic-stream/Cargo.toml @@ -1,16 +1,16 @@ [package] name = "linkerd-tonic-stream" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] futures = { version = "0.3", default-features = false } linkerd-stack = { path = "../stack" } pin-project = "1" -tonic = { version = "0.10", default-features = false } +tonic = { workspace = true, default-features = false } tokio = { version = "1", features = ["time"] } tracing = "0.1" diff --git a/linkerd/tonic-watch/Cargo.toml b/linkerd/tonic-watch/Cargo.toml index 1f3ecfe527..9b3610797c 100644 --- a/linkerd/tonic-watch/Cargo.toml +++ b/linkerd/tonic-watch/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-tonic-watch" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ Provides a utility for creating robust watches from a service that returns a stream. """ @@ -13,7 +13,7 @@ Provides a utility for creating robust watches from a service that returns a str futures = { version = "0.3", default-features = false } linkerd-error = { path = "../error" } linkerd-stack = { path = "../stack" } -tonic = { version = "0.10", default-features = false } +tonic = { workspace = true, default-features = false } tokio = { version = "1", features = ["macros", "rt", "sync", "time"] } tracing = "0.1" @@ -22,4 +22,4 @@ linkerd-tracing = { path = "../tracing" } tokio = { version = "1", features = ["macros"] } tokio-stream = { version = "0.1", features = ["sync", "time"] } tokio-test = "0.4" -tower-test = "0.4" +tower-test = { workspace = true } diff --git a/linkerd/trace-context/Cargo.toml b/linkerd/trace-context/Cargo.toml index 0c4f284d3e..f4dbce5caa 100644 --- a/linkerd/trace-context/Cargo.toml +++ b/linkerd/trace-context/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "linkerd-trace-context" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] base64 = "0.13" -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } hex = "0.4" -http = "0.2" +http = { workspace = true } linkerd-error = { path = "../error" } linkerd-stack = { path = "../stack" } rand = "0.8" thiserror = "1" -tower = { version = "0.4", default-features = false, features = ["util"] } +tower = { workspace = true, default-features = false, features = ["util"] } tracing = "0.1" diff --git a/linkerd/trace-context/src/export.rs b/linkerd/trace-context/src/export.rs new file mode 100644 index 0000000000..262c98ee7f --- /dev/null +++ b/linkerd/trace-context/src/export.rs @@ -0,0 +1,18 @@ +use crate::Span; +use std::collections::HashMap; +use std::sync::Arc; + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum SpanKind { + Server = 1, + Client = 2, +} + +pub type SpanLabels = Arc>; + +#[derive(Debug)] +pub struct ExportSpan { + pub span: Span, + pub kind: SpanKind, + pub labels: SpanLabels, +} diff --git a/linkerd/trace-context/src/lib.rs b/linkerd/trace-context/src/lib.rs index ed8c36017c..f88b04bca4 100644 --- a/linkerd/trace-context/src/lib.rs +++ b/linkerd/trace-context/src/lib.rs @@ -1,6 +1,7 @@ #![deny(rust_2018_idioms, clippy::disallowed_methods, clippy::disallowed_types)] #![forbid(unsafe_code)] +pub mod export; mod propagation; mod service; @@ -18,6 +19,27 @@ const SPAN_ID_LEN: usize = 8; #[derive(Debug, Default)] pub struct Id(Vec); +#[derive(Debug, Error)] +#[error("ID '{:?} should have {} bytes, but it has {}", self.id, self.expected_size, self.actual_size)] +pub struct IdLengthError { + id: Vec, + expected_size: usize, + actual_size: usize, +} + +impl Id { + pub fn into_bytes(self) -> Result<[u8; N], IdLengthError> { + self.as_ref().try_into().map_err(|_| { + let bytes: Vec = self.into(); + IdLengthError { + expected_size: N, + actual_size: bytes.len(), + id: bytes, + } + }) + } +} + #[derive(Debug, Default)] pub struct Flags(u8); diff --git a/linkerd/trace-context/src/service.rs b/linkerd/trace-context/src/service.rs index 310509b303..129d1ff361 100644 --- a/linkerd/trace-context/src/service.rs +++ b/linkerd/trace-context/src/service.rs @@ -1,5 +1,6 @@ use crate::{propagation, Span, SpanSink}; use futures::{future::Either, prelude::*}; +use http::Uri; use linkerd_stack::layer; use std::{ collections::HashMap, @@ -34,21 +35,41 @@ impl TraceContext { }) } + /// Returns labels for the provided request. + /// + /// The OpenTelemetry spec defines the semantic conventions that HTTP + /// services should use for the labels included in traces: + /// https://opentelemetry.io/docs/specs/semconv/http/http-spans/ fn request_labels(req: &http::Request) -> HashMap<&'static str, String> { - let mut labels = HashMap::with_capacity(5); - labels.insert("http.method", format!("{}", req.method())); - let path = req - .uri() - .path_and_query() - .map(|pq| pq.as_str().to_owned()) - .unwrap_or_default(); - labels.insert("http.path", path); - if let Some(authority) = req.uri().authority() { - labels.insert("http.authority", authority.as_str().to_string()); + let mut labels = HashMap::with_capacity(7); + labels.insert("http.request.method", format!("{}", req.method())); + let url = req.uri(); + if let Some(scheme) = url.scheme_str() { + labels.insert("url.scheme", scheme.to_string()); } - if let Some(host) = req.headers().get("host") { + labels.insert("url.path", url.path().to_string()); + if let Some(query) = url.query() { + labels.insert("url.query", query.to_string()); + } + + // This is the order of precendence for host headers, + // see https://opentelemetry.io/docs/specs/semconv/http/http-spans/ + let host_header = req + .headers() + .get("X-Forwarded-Host") + .or_else(|| req.headers().get(":authority")) + .or_else(|| req.headers().get("host")); + + if let Some(host) = host_header { if let Ok(host) = host.to_str() { - labels.insert("http.host", host.to_string()); + if let Ok(uri) = host.parse::() { + if let Some(host) = uri.host() { + labels.insert("server.address", host.to_string()); + } + if let Some(port) = uri.port() { + labels.insert("server.port", port.to_string()); + } + } } } labels @@ -58,7 +79,10 @@ impl TraceContext { mut labels: HashMap<&'static str, String>, rsp: &http::Response, ) -> HashMap<&'static str, String> { - labels.insert("http.status_code", rsp.status().as_str().to_string()); + labels.insert( + "http.response.status_code", + rsp.status().as_str().to_string(), + ); labels } } diff --git a/linkerd/tracing/Cargo.toml b/linkerd/tracing/Cargo.toml index 1e6ee794e1..af6acff0d2 100644 --- a/linkerd/tracing/Cargo.toml +++ b/linkerd/tracing/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-tracing" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [features] default = [] @@ -20,7 +20,7 @@ tracing = "0.1" tracing-log = "0.2" [dependencies.tracing-subscriber] -version = "0.3.16" +version = "0.3.19" default-features = false features = [ "env-filter", diff --git a/linkerd/tracing/src/access_log.rs b/linkerd/tracing/src/access_log.rs index 6a115857c7..59436ffe39 100644 --- a/linkerd/tracing/src/access_log.rs +++ b/linkerd/tracing/src/access_log.rs @@ -114,7 +114,7 @@ impl ApacheCommon { ]; } -impl<'writer> FormatFields<'writer> for ApacheCommon { +impl FormatFields<'_> for ApacheCommon { fn format_fields(&self, writer: format::Writer<'_>, fields: R) -> fmt::Result { let mut visitor = ApacheCommonVisitor { writer, diff --git a/linkerd/tracing/src/lib.rs b/linkerd/tracing/src/lib.rs index bbbeacf19a..f1c02b92d6 100644 --- a/linkerd/tracing/src/lib.rs +++ b/linkerd/tracing/src/lib.rs @@ -26,7 +26,7 @@ const ENV_LOG_LEVEL: &str = "LINKERD2_PROXY_LOG"; const ENV_LOG_FORMAT: &str = "LINKERD2_PROXY_LOG_FORMAT"; const ENV_ACCESS_LOG: &str = "LINKERD2_PROXY_ACCESS_LOG"; -const DEFAULT_LOG_LEVEL: &str = "warn,linkerd=info,hickory_dns=error"; +const DEFAULT_LOG_LEVEL: &str = "warn,linkerd=info,hickory=error"; const DEFAULT_LOG_FORMAT: &str = "PLAIN"; #[derive(Debug, Default)] @@ -110,7 +110,8 @@ impl Settings { S: Send + Sync, { let fmt = tracing_subscriber::fmt::format() - .with_timer(self.timer()) + // Always use absolute system time instead of uptime in JSON. + .with_timer(tracing_subscriber::fmt::time::SystemTime) .with_thread_ids(!self.is_test) // Configure the formatter to output JSON logs. .json() diff --git a/linkerd/tracing/src/stream.rs b/linkerd/tracing/src/stream.rs index dd8fa7ec8c..d43b00d927 100644 --- a/linkerd/tracing/src/stream.rs +++ b/linkerd/tracing/src/stream.rs @@ -251,7 +251,7 @@ where // === impl Line === -impl<'a> io::Write for Line<'a> { +impl io::Write for Line<'_> { fn write(&mut self, buf: &[u8]) -> io::Result { if let Some(ref mut line) = self.0 { line.extend_from_slice(buf) diff --git a/linkerd/transport-header/Cargo.toml b/linkerd/transport-header/Cargo.toml index 35e221f90a..ead088abdc 100644 --- a/linkerd/transport-header/Cargo.toml +++ b/linkerd/transport-header/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "linkerd-transport-header" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] async-trait = "0.1" -bytes = "1" +bytes = { workspace = true } futures = { version = "0.3", default-features = false } linkerd-dns-name = { path = "../dns/name" } linkerd-error = { path = "../error" } linkerd-io = { path = "../io" } linkerd-stack = { path = "../stack" } -prost = "0.12" +prost = { workspace = true } tokio = { version = "1", features = ["time"] } tracing = "0.1" @@ -23,6 +23,9 @@ arbitrary = { version = "1", features = ["derive"] } libfuzzer-sys = { version = "0.4", features = ["arbitrary-derive"] } [dev-dependencies] -prost-build = { version = "0.12", default-features = false } +prost-build = { workspace = true } tokio = { version = "1", features = ["macros"] } tokio-test = "0.4" + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/linkerd/transport-header/fuzz/Cargo.toml b/linkerd/transport-header/fuzz/Cargo.toml index c8fd55ba0b..d2b3e81d22 100644 --- a/linkerd/transport-header/fuzz/Cargo.toml +++ b/linkerd/transport-header/fuzz/Cargo.toml @@ -1,9 +1,10 @@ [package] name = "linkerd-transport-header-fuzz" -version = "0.0.0" -authors = ["Linkerd Developers "] -publish = false -edition = "2021" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [package.metadata] cargo-fuzz = true diff --git a/linkerd/transport-header/src/gen/transport.l5d.io.rs b/linkerd/transport-header/src/gen/transport.l5d.io.rs index 32d0d4947b..6e6c89f79a 100644 --- a/linkerd/transport-header/src/gen/transport.l5d.io.rs +++ b/linkerd/transport-header/src/gen/transport.l5d.io.rs @@ -1,5 +1,4 @@ // This file is @generated by prost-build. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Header { /// The target port. @@ -13,22 +12,18 @@ pub struct Header { #[prost(message, optional, tag = "3")] pub session_protocol: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct SessionProtocol { #[prost(oneof = "session_protocol::Kind", tags = "1, 2")] pub kind: ::core::option::Option, } /// Nested message and enum types in `SessionProtocol`. pub mod session_protocol { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Http1 {} - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Http2 {} - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum Kind { #[prost(message, tag = "1")] Http1(Http1), diff --git a/linkerd/transport-metrics/Cargo.toml b/linkerd/transport-metrics/Cargo.toml index 4811695c68..eb020bb295 100644 --- a/linkerd/transport-metrics/Cargo.toml +++ b/linkerd/transport-metrics/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "linkerd-transport-metrics" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """Transport-level metrics""" [dependencies] diff --git a/linkerd/transport-metrics/src/lib.rs b/linkerd/transport-metrics/src/lib.rs index 44a8df0459..f84e019e64 100644 --- a/linkerd/transport-metrics/src/lib.rs +++ b/linkerd/transport-metrics/src/lib.rs @@ -5,6 +5,7 @@ mod client; mod report; mod sensor; mod server; +pub mod zone; pub use self::{ client::Client, diff --git a/linkerd/transport-metrics/src/zone.rs b/linkerd/transport-metrics/src/zone.rs new file mode 100644 index 0000000000..48181a9b84 --- /dev/null +++ b/linkerd/transport-metrics/src/zone.rs @@ -0,0 +1,80 @@ +use linkerd_metrics::prom; +use linkerd_stack::{ExtractParam, Param}; +use std::{fmt::Debug, hash::Hash}; + +pub use self::sensor::ZoneSensorIo; + +pub mod client; +mod sensor; + +#[derive(Clone, Debug)] +pub struct TcpZoneMetrics { + pub recv_bytes: prom::Counter, + pub send_bytes: prom::Counter, +} + +#[derive(Clone, Debug)] +pub struct TcpZoneMetricsParams { + transfer_cost: prom::Family, +} + +impl Default for TcpZoneMetricsParams { + fn default() -> Self { + Self { + transfer_cost: prom::Family::default(), + } + } +} + +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub struct TcpZoneLabels { + pub recv_labels: L, + pub send_labels: L, +} + +impl TcpZoneMetricsParams +where + L: Clone + Hash + Eq + prom::encoding::EncodeLabelSet + Debug + Send + Sync + 'static, +{ + pub fn register(registry: &mut prom::Registry) -> Self { + let transfer_cost = prom::Family::default(); + registry.register_with_unit( + "transfer_cost", + "The total amount of data transferred in and out of this proxy, by cost zone", + prom::Unit::Bytes, + transfer_cost.clone(), + ); + + TcpZoneMetricsParams { transfer_cost } + } +} + +impl TcpZoneMetricsParams +where + L: Clone + Hash + Eq, +{ + pub fn metrics(&self, labels: TcpZoneLabels) -> TcpZoneMetrics { + let recv_bytes = self + .transfer_cost + .get_or_create(&labels.recv_labels) + .clone(); + let send_bytes = self + .transfer_cost + .get_or_create(&labels.send_labels) + .clone(); + TcpZoneMetrics { + recv_bytes, + send_bytes, + } + } +} + +impl ExtractParam for TcpZoneMetricsParams +where + T: Param>, + L: Clone + Hash + Eq, +{ + fn extract_param(&self, t: &T) -> TcpZoneMetrics { + self.metrics(t.param()) + } +} diff --git a/linkerd/transport-metrics/src/zone/client.rs b/linkerd/transport-metrics/src/zone/client.rs new file mode 100644 index 0000000000..c7b29d2367 --- /dev/null +++ b/linkerd/transport-metrics/src/zone/client.rs @@ -0,0 +1,77 @@ +use crate::zone::{ + sensor::{ZoneMetricsSensor, ZoneSensorIo}, + TcpZoneMetrics, +}; +use futures::{ready, TryFuture}; +use linkerd_stack::{layer, ExtractParam, MakeConnection, Service}; +use pin_project::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +#[derive(Clone, Debug)] +pub struct ZoneMetricsClient { + inner: S, + params: P, +} + +#[pin_project] +pub struct ConnectFuture { + #[pin] + inner: F, + metrics: Option, +} + +// === impl Client === + +impl ZoneMetricsClient { + pub fn layer(params: P) -> impl layer::Layer + Clone { + layer::mk(move |inner| Self { + inner, + params: params.clone(), + }) + } +} + +impl Service for ZoneMetricsClient +where + P: ExtractParam, + S: MakeConnection, +{ + type Response = (ZoneSensorIo, S::Metadata); + type Error = S::Error; + type Future = ConnectFuture; + + #[inline] + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, target: T) -> Self::Future { + let metrics = self.params.extract_param(&target); + let inner = self.inner.connect(target); + ConnectFuture { + metrics: Some(metrics), + inner, + } + } +} + +// === impl ConnectFuture === + +impl> Future for ConnectFuture { + type Output = Result<(ZoneSensorIo, M), F::Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + let (io, meta) = ready!(this.inner.try_poll(cx))?; + let metrics = this + .metrics + .take() + .expect("future must not be polled after ready"); + let io = ZoneSensorIo::new(io, ZoneMetricsSensor { metrics }); + Poll::Ready(Ok((io, meta))) + } +} diff --git a/linkerd/transport-metrics/src/zone/sensor.rs b/linkerd/transport-metrics/src/zone/sensor.rs new file mode 100644 index 0000000000..51a9187a26 --- /dev/null +++ b/linkerd/transport-metrics/src/zone/sensor.rs @@ -0,0 +1,24 @@ +use crate::zone::TcpZoneMetrics; + +#[derive(Clone, Debug)] +pub struct ZoneMetricsSensor { + pub metrics: TcpZoneMetrics, +} + +pub type ZoneSensorIo = linkerd_io::SensorIo; + +impl linkerd_io::Sensor for ZoneMetricsSensor { + fn record_read(&mut self, sz: usize) { + self.metrics.recv_bytes.inc_by(sz as u64); + } + + fn record_write(&mut self, sz: usize) { + self.metrics.send_bytes.inc_by(sz as u64); + } + + fn record_close(&mut self, _eos: Option) {} + + fn record_error(&mut self, op: linkerd_io::Poll) -> linkerd_io::Poll { + op + } +} diff --git a/linkerd/workers/Cargo.toml b/linkerd/workers/Cargo.toml new file mode 100644 index 0000000000..3bec66affe --- /dev/null +++ b/linkerd/workers/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "linkerd-workers" +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = "CPU core allocation logic for Linkerd" + +[dependencies] diff --git a/linkerd/workers/src/lib.rs b/linkerd/workers/src/lib.rs new file mode 100644 index 0000000000..f8ebc482b3 --- /dev/null +++ b/linkerd/workers/src/lib.rs @@ -0,0 +1,163 @@ +//! Core allocation logic for Linkerd's worker threads. + +use std::num::NonZeroUsize; + +/// Determines the number of worker threads to use in a runtime. +#[derive(Copy, Clone, Debug)] +pub struct Workers { + pub available: NonZeroUsize, + pub max_ratio: Option, + pub max_cores: Option, + pub min_cores: NonZeroUsize, +} + +impl Workers { + /// Calculate the number of cores to use based on the constraints. + /// + /// The algorithm uses the following precedence: + /// 1. The explicitly configured maximum cores, if present + /// 2. The ratio-based calculation, if present + /// 3. Default to 1 core + /// + /// The result is constrained by both the minimum cores and the available cores. + pub fn cores(&self) -> NonZeroUsize { + let Self { + available, + max_ratio, + max_cores, + min_cores, + } = *self; + + max_cores + .or_else(|| { + max_ratio.and_then(|ratio| { + let max = (available.get() as f64 * ratio).round() as usize; + max.try_into().ok() + }) + }) + .unwrap_or_else(|| 1.try_into().unwrap()) + .max(min_cores) + .min(available) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn min_cores_exceeds_max_cores() { + let workers = Workers { + available: NonZeroUsize::new(8).unwrap(), + max_cores: NonZeroUsize::new(2), + min_cores: NonZeroUsize::new(4).unwrap(), + max_ratio: None, + }; + assert_eq!(workers.cores().get(), 4); + } + + #[test] + fn available_limits_max_cores() { + let workers = Workers { + available: NonZeroUsize::new(2).unwrap(), + max_cores: NonZeroUsize::new(4), + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: None, + }; + assert_eq!(workers.cores().get(), 2); + } + + #[test] + fn max_ratio_calculates_cores() { + let workers = Workers { + available: NonZeroUsize::new(10).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: Some(0.5), + }; + assert_eq!(workers.cores().get(), 5); // 10 * 0.5 = 5 + } + + #[test] + fn max_cores_overrides_ratio() { + let workers = Workers { + available: NonZeroUsize::new(10).unwrap(), + max_cores: NonZeroUsize::new(3), + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: Some(0.5), + }; + assert_eq!(workers.cores().get(), 3); + } + + #[test] + fn min_cores_exceeds_ratio_calculation() { + let workers = Workers { + available: NonZeroUsize::new(10).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(6).unwrap(), + max_ratio: Some(0.5), + }; + assert_eq!(workers.cores().get(), 6); // min_cores > max_cores from ratio (5) + } + + #[test] + fn fallback_to_min_cores_when_no_max() { + let workers = Workers { + available: NonZeroUsize::new(8).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(2).unwrap(), + max_ratio: None, + }; + assert_eq!(workers.cores().get(), 2); + } + + #[test] + fn single_cpu_environment() { + let workers = Workers { + available: NonZeroUsize::new(1).unwrap(), + max_cores: NonZeroUsize::new(4), + min_cores: NonZeroUsize::new(2).unwrap(), + max_ratio: None, + }; + assert_eq!(workers.cores().get(), 1); + } + + #[test] + fn ratio() { + // For 10 CPUs with 0.31 ratio, we get 3.1 cores, which rounds to 3 + let workers = Workers { + available: NonZeroUsize::new(10).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: Some(0.31), + }; + assert_eq!(workers.cores().get(), 3); + + // For 10 CPUs with 0.35 ratio, we get 3.5 cores, which rounds to 4 + let workers = Workers { + available: NonZeroUsize::new(10).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: Some(0.35), + }; + assert_eq!(workers.cores().get(), 4); + + // For 8 CPUs with 0.25 ratio, we get exactly 2 cores + let workers = Workers { + available: NonZeroUsize::new(8).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: Some(0.25), + }; + assert_eq!(workers.cores().get(), 2); + + // For 96 CPUs with 1.0 ratio, we get all 96 cores + let workers = Workers { + available: NonZeroUsize::new(96).unwrap(), + max_cores: None, + min_cores: NonZeroUsize::new(1).unwrap(), + max_ratio: Some(1.0), + }; + assert_eq!(workers.cores().get(), 96); + } +} diff --git a/linkerd2-proxy/Cargo.toml b/linkerd2-proxy/Cargo.toml index 91f87c9b81..e9affe6080 100644 --- a/linkerd2-proxy/Cargo.toml +++ b/linkerd2-proxy/Cargo.toml @@ -1,15 +1,14 @@ [package] name = "linkerd2-proxy" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = "The main proxy executable" [features] -default = ["multicore", "meshtls-rustls"] -multicore = ["tokio/rt-multi-thread", "num_cpus"] +default = ["meshtls-rustls"] meshtls-boring = ["linkerd-meshtls/boring"] meshtls-boring-fips = ["linkerd-meshtls/boring-fips"] meshtls-rustls = ["linkerd-meshtls/rustls"] @@ -18,15 +17,19 @@ pprof = ["linkerd-app/pprof"] [dependencies] futures = { version = "0.3", default-features = false } -num_cpus = { version = "1", optional = true } +kubert-prometheus-tokio = { version = "0.2.0", features = ["rt"] } linkerd-app = { path = "../linkerd/app" } linkerd-metrics = { path = "../linkerd/metrics" } # We don't actually use code from this crate in `main`; it's here only so we can # control its feature flags. linkerd-meshtls = { path = "../linkerd/meshtls" } linkerd-signal = { path = "../linkerd/signal" } -tokio = { version = "1", features = ["rt", "time", "net"] } +num_cpus = "1" +tokio = { version = "1", features = ["rt", "rt-multi-thread", "time", "net"] } tracing = "0.1" [target.x86_64-unknown-linux-gnu.dependencies] jemallocator = { version = "0.5" } + +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] } diff --git a/linkerd2-proxy/src/main.rs b/linkerd2-proxy/src/main.rs index b3a6370336..f198d135e2 100644 --- a/linkerd2-proxy/src/main.rs +++ b/linkerd2-proxy/src/main.rs @@ -42,7 +42,7 @@ fn main() { vendor = BUILD_INFO.vendor, ); - let metrics = linkerd_metrics::prom::Registry::default(); + let mut metrics = linkerd_metrics::prom::Registry::default(); // Load configuration from the environment without binding ports. let config = match Config::try_from_env() { @@ -57,6 +57,9 @@ fn main() { // `LINKERD2_PROXY_CORES` env or the number of available CPUs (as provided // by cgroups, when possible). rt::build().block_on(async move { + // Spawn a task to run in the background, exporting runtime metrics at a regular interval. + rt::spawn_metrics_exporter(&mut metrics); + let (shutdown_tx, mut shutdown_rx) = mpsc::unbounded_channel(); let shutdown_grace_period = config.shutdown_grace_period; @@ -105,14 +108,11 @@ fn main() { ), } - if let Some(oc) = app.opencensus_addr() { - match oc.identity.value() { - None => info!("OpenCensus tracing collector at {}", oc.addr), + if let Some(tracing) = app.tracing_addr() { + match tracing.identity.value() { + None => info!("Tracing collector at {}", tracing.addr), Some(tls) => { - info!( - "OpenCensus tracing collector at {} ({})", - oc.addr, tls.server_id - ) + info!("Tracing collector at {} ({})", tracing.addr, tls.server_id) } } } diff --git a/linkerd2-proxy/src/rt.rs b/linkerd2-proxy/src/rt.rs index b88eb17cce..e374c6481e 100644 --- a/linkerd2-proxy/src/rt.rs +++ b/linkerd2-proxy/src/rt.rs @@ -1,7 +1,9 @@ +use std::num::NonZeroUsize; + +use linkerd_app::Workers; use tokio::runtime::{Builder, Runtime}; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; -#[cfg(feature = "multicore")] pub(crate) fn build() -> Runtime { // The proxy creates an additional admin thread, but it would be wasteful to // allocate a whole core to it; so we let the main runtime consume all @@ -10,31 +12,60 @@ pub(crate) fn build() -> Runtime { // // The basic scheduler is used when the threaded scheduler would provide no // benefit. - let mut cores = std::env::var("LINKERD2_PROXY_CORES") + + let min_cores = std::env::var("LINKERD2_PROXY_CORES_MIN") .ok() .and_then(|v| { - let opt = v.parse::().ok().filter(|n| *n > 0); + let opt = v.parse::().ok().and_then(NonZeroUsize::new); if opt.is_none() { - warn!(LINKERD2_PROXY_CORES = %v, "Ignoring invalid configuration"); + warn!(LINKERD2_PROXY_CORES_MIN = %v, "Ignoring invalid configuration"); } opt }) - .unwrap_or(0); + .or_else(|| { + std::env::var("LINKERD2_PROXY_CORES").ok().and_then(|v| { + let opt = v.parse::().ok().and_then(NonZeroUsize::new); + if opt.is_none() { + warn!(LINKERD2_PROXY_CORES = %v, "Ignoring invalid configuration"); + } + opt + }) + }) + .unwrap_or_else(|| NonZeroUsize::new(1).unwrap()); - let cpus = num_cpus::get(); - debug_assert!(cpus > 0, "At least one CPU must be available"); - if cores > cpus { - warn!( - cpus, - LINKERD2_PROXY_CORES = cores, - "Ignoring configuration due to insufficient resources" - ); - cores = cpus; - } + let max_cores = std::env::var("LINKERD2_PROXY_CORES_MAX") + .ok() + .and_then(|v| { + let opt = v.parse::().ok().and_then(NonZeroUsize::new); + if opt.is_none() { + warn!(LINKERD2_PROXY_CORES_MAX = %v, "Ignoring invalid configuration"); + } + opt + }); - match cores { - // `0` is unexpected, but it's a wild world out there. - 0 | 1 => { + let cores_ratio = std::env::var("LINKERD2_PROXY_CORES_MAX_RATIO") + .ok() + .and_then(|v| { + let opt = v.parse::().ok().filter(|n| *n > 0.0 && *n <= 1.0); + if opt.is_none() { + warn!(LINKERD2_PROXY_CORES_MAX_RATIO = %v, "Ignoring invalid configuration"); + } + opt + }); + + let available_cpus = num_cpus::get(); + debug_assert!(available_cpus > 0, "At least one CPU must be available"); + let workers = Workers { + available: NonZeroUsize::new(available_cpus) + .unwrap_or_else(|| NonZeroUsize::new(1).unwrap()), + max_ratio: cores_ratio, + min_cores, + max_cores, + }; + debug!(?workers); + + match workers.cores().get() { + 1 => { info!("Using single-threaded proxy runtime"); Builder::new_current_thread() .enable_all() @@ -42,25 +73,49 @@ pub(crate) fn build() -> Runtime { .build() .expect("failed to build basic runtime!") } - num_cpus => { + cores => { info!(%cores, "Using multi-threaded proxy runtime"); Builder::new_multi_thread() .enable_all() .thread_name("proxy") - .worker_threads(num_cpus) - .max_blocking_threads(num_cpus) + .worker_threads(cores) + .max_blocking_threads(cores) .build() .expect("failed to build threaded runtime!") } } } -#[cfg(not(feature = "multicore"))] -pub(crate) fn build() -> Runtime { - Builder::new() - .enable_all() - .thread_name("proxy") - .basic_scheduler() - .build() - .expect("failed to build basic runtime!") +/// Spawns a task to scrape metrics for the given runtime at a regular interval. +/// +/// Note that this module requires unstable tokio functionality that must be +/// enabled via the `tokio_unstable` feature. When it is not enabled, no metrics +/// will be registered. +/// +/// `RUSTFLAGS="--cfg tokio_unstable"` must be set at build-time to use this feature. +pub fn spawn_metrics_exporter(registry: &mut linkerd_metrics::prom::Registry) { + #[cfg(tokio_unstable)] + { + use {std::time::Duration, tracing::Instrument}; + + /// The fixed interval at which tokio runtime metrics are updated. + // + // TODO(kate): perhaps this could be configurable eventually. for now, it's hard-coded. + const INTERVAL: Duration = Duration::from_secs(1); + + let mut interval = tokio::time::interval(INTERVAL); + + let registry = registry.sub_registry_with_prefix("tokio_rt"); + let runtime = tokio::runtime::Handle::current(); + let metrics = kubert_prometheus_tokio::Runtime::register(registry, runtime); + + tokio::spawn( + async move { metrics.updated(&mut interval).await } + .instrument(tracing::info_span!("kubert-prom-tokio-rt")), + ); + } + #[cfg(not(tokio_unstable))] + { + tracing::debug!("Tokio runtime metrics cannot be monitored without the tokio_unstable cfg"); + } } diff --git a/opencensus-proto/Cargo.toml b/opencensus-proto/Cargo.toml index c339c66335..7b6ac6a1e3 100644 --- a/opencensus-proto/Cargo.toml +++ b/opencensus-proto/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "opencensus-proto" -version = "0.1.0" +version = { workspace = true } authors = ["The OpenCensus Authors"] -license = "Apache-2.0" -edition = "2021" -publish = false +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } description = """ gRPC bindings for OpenCensus. @@ -12,17 +12,17 @@ Vendored from https://github.com/census-instrumentation/opencensus-proto/. """ [dependencies] -bytes = "1" -prost = "0.12" -prost-types = "0.12" +bytes = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } [dependencies.tonic] -version = "0.10" +workspace = true default-features = false features = ["prost", "codegen"] [dev-dependencies.tonic-build] -version = "0.10" +workspace = true default-features = false features = ["prost"] diff --git a/opencensus-proto/src/gen/opencensus.proto.agent.common.v1.rs b/opencensus-proto/src/gen/opencensus.proto.agent.common.v1.rs index dbe89ba88b..879a28bc1b 100644 --- a/opencensus-proto/src/gen/opencensus.proto.agent.common.v1.rs +++ b/opencensus-proto/src/gen/opencensus.proto.agent.common.v1.rs @@ -3,7 +3,6 @@ /// Note, this is not the metadata about the Node or service that is described by associated spans. /// In the future we plan to extend the identifier proto definition to support /// additional information (e.g cloud id, etc.) -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Node { /// Identifier that uniquely identifies a process within a VM/container. @@ -23,7 +22,6 @@ pub struct Node { >, } /// Identifier that uniquely identifies a process within a VM/container. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ProcessIdentifier { /// The host name. Usually refers to the machine/container name. @@ -38,7 +36,6 @@ pub struct ProcessIdentifier { pub start_timestamp: ::core::option::Option<::prost_types::Timestamp>, } /// Information on OpenCensus Library. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct LibraryInfo { /// Language of OpenCensus Library. @@ -85,17 +82,17 @@ pub mod library_info { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Language::Unspecified => "LANGUAGE_UNSPECIFIED", - Language::Cpp => "CPP", - Language::CSharp => "C_SHARP", - Language::Erlang => "ERLANG", - Language::GoLang => "GO_LANG", - Language::Java => "JAVA", - Language::NodeJs => "NODE_JS", - Language::Php => "PHP", - Language::Python => "PYTHON", - Language::Ruby => "RUBY", - Language::WebJs => "WEB_JS", + Self::Unspecified => "LANGUAGE_UNSPECIFIED", + Self::Cpp => "CPP", + Self::CSharp => "C_SHARP", + Self::Erlang => "ERLANG", + Self::GoLang => "GO_LANG", + Self::Java => "JAVA", + Self::NodeJs => "NODE_JS", + Self::Php => "PHP", + Self::Python => "PYTHON", + Self::Ruby => "RUBY", + Self::WebJs => "WEB_JS", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -118,7 +115,6 @@ pub mod library_info { } } /// Additional service information. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ServiceInfo { /// Name of the service. diff --git a/opencensus-proto/src/gen/opencensus.proto.agent.trace.v1.rs b/opencensus-proto/src/gen/opencensus.proto.agent.trace.v1.rs index afe20f6d0e..9884c60e08 100644 --- a/opencensus-proto/src/gen/opencensus.proto.agent.trace.v1.rs +++ b/opencensus-proto/src/gen/opencensus.proto.agent.trace.v1.rs @@ -1,5 +1,4 @@ // This file is @generated by prost-build. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct CurrentLibraryConfig { /// This is required only in the first message on the stream or if the @@ -11,7 +10,6 @@ pub struct CurrentLibraryConfig { #[prost(message, optional, tag = "2")] pub config: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdatedLibraryConfig { /// This field is ignored when the RPC is used to configure only one Application. @@ -23,7 +21,6 @@ pub struct UpdatedLibraryConfig { #[prost(message, optional, tag = "2")] pub config: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExportTraceServiceRequest { /// This is required only in the first message on the stream or if the @@ -41,12 +38,17 @@ pub struct ExportTraceServiceRequest { #[prost(message, optional, tag = "3")] pub resource: ::core::option::Option, } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ExportTraceServiceResponse {} /// Generated client implementations. pub mod trace_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; /// Service that can be used to push spans and configs between one Application @@ -61,8 +63,8 @@ pub mod trace_service_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -87,7 +89,7 @@ pub mod trace_service_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { TraceServiceClient::new(InterceptedService::new(inner, interceptor)) } @@ -138,8 +140,7 @@ pub mod trace_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; @@ -172,8 +173,7 @@ pub mod trace_service_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; diff --git a/opencensus-proto/src/gen/opencensus.proto.resource.v1.rs b/opencensus-proto/src/gen/opencensus.proto.resource.v1.rs index 43dccb1072..fb57bed51e 100644 --- a/opencensus-proto/src/gen/opencensus.proto.resource.v1.rs +++ b/opencensus-proto/src/gen/opencensus.proto.resource.v1.rs @@ -1,6 +1,5 @@ // This file is @generated by prost-build. /// Resource information. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Resource { /// Type identifier for the resource. diff --git a/opencensus-proto/src/gen/opencensus.proto.trace.v1.rs b/opencensus-proto/src/gen/opencensus.proto.trace.v1.rs index a8643afa48..e52c2e86cc 100644 --- a/opencensus-proto/src/gen/opencensus.proto.trace.v1.rs +++ b/opencensus-proto/src/gen/opencensus.proto.trace.v1.rs @@ -9,7 +9,6 @@ /// /// The next id is 17. /// TODO(bdrutu): Add an example. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Span { /// A unique identifier for a trace. All spans from the same trace share @@ -122,7 +121,6 @@ pub mod span { /// It is a list of Tracestate.Entry with a maximum of 32 members in the list. /// /// See the for more details about this field. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Tracestate { /// A list of entries that represent the Tracestate. @@ -131,7 +129,6 @@ pub mod span { } /// Nested message and enum types in `Tracestate`. pub mod tracestate { - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Entry { /// The key must begin with a lowercase letter, and can only contain @@ -147,7 +144,6 @@ pub mod span { } } /// A set of attributes, each with a key and a value. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Attributes { /// The set of attributes. The value can be a string, an integer, a double @@ -170,7 +166,6 @@ pub mod span { pub dropped_attributes_count: i32, } /// A time-stamped annotation or message event in the Span. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimeEvent { /// The time the event occurred. @@ -184,7 +179,6 @@ pub mod span { /// Nested message and enum types in `TimeEvent`. pub mod time_event { /// A text annotation with a set of attributes. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Annotation { /// A user-supplied message describing the event. @@ -195,8 +189,7 @@ pub mod span { pub attributes: ::core::option::Option, } /// An event describing a message sent/received between Spans. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct MessageEvent { /// The type of MessageEvent. Indicates whether the message was sent or /// received. @@ -246,9 +239,9 @@ pub mod span { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::Sent => "SENT", - Type::Received => "RECEIVED", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::Sent => "SENT", + Self::Received => "RECEIVED", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -264,7 +257,6 @@ pub mod span { } /// A `TimeEvent` can contain either an `Annotation` object or a /// `MessageEvent` object, but not both. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { /// A text annotation with a set of attributes. @@ -278,7 +270,6 @@ pub mod span { /// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation /// on the span, consisting of either user-supplied key-value pairs, or /// details of a message sent/received between Spans. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimeEvents { /// A collection of `TimeEvent`s. @@ -297,7 +288,6 @@ pub mod span { /// different trace. For example, this can be used in batching operations, /// where a single batch handler processes multiple requests from different /// traces or when the handler receives a request from a different project. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Link { /// A unique identifier of a trace that this linked span is part of. The ID is a @@ -349,9 +339,9 @@ pub mod span { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - Type::Unspecified => "TYPE_UNSPECIFIED", - Type::ChildLinkedSpan => "CHILD_LINKED_SPAN", - Type::ParentLinkedSpan => "PARENT_LINKED_SPAN", + Self::Unspecified => "TYPE_UNSPECIFIED", + Self::ChildLinkedSpan => "CHILD_LINKED_SPAN", + Self::ParentLinkedSpan => "PARENT_LINKED_SPAN", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -367,7 +357,6 @@ pub mod span { } /// A collection of links, which are references from this span to a span /// in the same or different trace. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Links { /// A collection of links. @@ -409,9 +398,9 @@ pub mod span { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - SpanKind::Unspecified => "SPAN_KIND_UNSPECIFIED", - SpanKind::Server => "SERVER", - SpanKind::Client => "CLIENT", + Self::Unspecified => "SPAN_KIND_UNSPECIFIED", + Self::Server => "SERVER", + Self::Client => "CLIENT", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -430,7 +419,6 @@ pub mod span { /// are a subset of those of /// [google.rpc.Status](), /// which is used by [gRPC](). -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Status { /// The status code. This is optional field. It is safe to assume 0 (OK) @@ -442,7 +430,6 @@ pub struct Status { pub message: ::prost::alloc::string::String, } /// The value of an Attribute. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct AttributeValue { /// The type of the value. @@ -452,7 +439,6 @@ pub struct AttributeValue { /// Nested message and enum types in `AttributeValue`. pub mod attribute_value { /// The type of the value. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum Value { /// A string up to 256 bytes long. @@ -470,7 +456,6 @@ pub mod attribute_value { } } /// The call stack which originated this span. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StackTrace { /// Stack frames in this stack trace. @@ -494,7 +479,6 @@ pub struct StackTrace { /// Nested message and enum types in `StackTrace`. pub mod stack_trace { /// A single stack frame in a stack trace. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StackFrame { /// The fully-qualified name that uniquely identifies the function or @@ -524,7 +508,6 @@ pub mod stack_trace { pub source_version: ::core::option::Option, } /// A collection of stack frames, which can be truncated. - #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct StackFrames { /// Stack frames in this call stack. @@ -538,7 +521,6 @@ pub mod stack_trace { } } /// A description of a binary module. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Module { /// TODO: document the meaning of this field. @@ -552,7 +534,6 @@ pub struct Module { pub build_id: ::core::option::Option, } /// A string that might be shortened to a specified length. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct TruncatableString { /// The shortened string. For example, if the original string was 500 bytes long and @@ -570,8 +551,7 @@ pub struct TruncatableString { } /// Global configuration of the trace service. All fields must be specified, or /// the default (zero) values will be used for each type. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct TraceConfig { /// The global default max number of attributes per span. #[prost(int64, tag = "4")] @@ -592,8 +572,7 @@ pub struct TraceConfig { /// Nested message and enum types in `TraceConfig`. pub mod trace_config { /// The global default sampler used to make decisions on span sampling. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, Copy, PartialEq, ::prost::Oneof)] pub enum Sampler { #[prost(message, tag = "1")] ProbabilitySampler(super::ProbabilitySampler), @@ -605,16 +584,14 @@ pub mod trace_config { } /// Sampler that tries to uniformly sample traces with a given probability. /// The probability of sampling a trace is equal to that of the specified probability. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ProbabilitySampler { /// The desired probability of sampling. Must be within \[0.0, 1.0\]. #[prost(double, tag = "1")] pub sampling_probability: f64, } /// Sampler that always makes a constant decision on span sampling. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct ConstantSampler { #[prost(enumeration = "constant_sampler::ConstantDecision", tag = "1")] pub decision: i32, @@ -649,9 +626,9 @@ pub mod constant_sampler { /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { - ConstantDecision::AlwaysOff => "ALWAYS_OFF", - ConstantDecision::AlwaysOn => "ALWAYS_ON", - ConstantDecision::AlwaysParent => "ALWAYS_PARENT", + Self::AlwaysOff => "ALWAYS_OFF", + Self::AlwaysOn => "ALWAYS_ON", + Self::AlwaysParent => "ALWAYS_PARENT", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -666,8 +643,7 @@ pub mod constant_sampler { } } /// Sampler that tries to sample with a rate per time window. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RateLimitingSampler { /// Rate per second. #[prost(int64, tag = "1")] diff --git a/opencensus-proto/tests/bootstrap.rs b/opencensus-proto/tests/bootstrap.rs index ddb181dec9..7646236132 100644 --- a/opencensus-proto/tests/bootstrap.rs +++ b/opencensus-proto/tests/bootstrap.rs @@ -33,7 +33,7 @@ fn generate(out_dir: &std::path::Path) { .build_server(false) .emit_rerun_if_changed(false) .out_dir(out_dir) - .compile(iface_files, &["."]) + .compile_protos(iface_files, &["."]) { panic!("failed to compile protobuf: {error}") } diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml new file mode 100644 index 0000000000..07256eb222 --- /dev/null +++ b/opentelemetry-proto/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "opentelemetry-proto" +version = { workspace = true } +authors = ["The OpenTelemetry Authors"] +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } +description = """ +gRPC bindings for OpenTelemetry. + +Vendored from https://github.com/open-telemetry/opentelemetry-rust/. +""" + +[dependencies] +tonic = { workspace = true, features = ["codegen", "prost", "transport"] } +prost = { workspace = true } +opentelemetry = { version = "0.29", default-features = false, features = ["trace"] } +opentelemetry_sdk = { version = "0.29", default-features = false, features = ["trace"] } + +[dev-dependencies] +opentelemetry = { version = "0.29", default-features = false, features = ["trace", "testing"] } +tonic-build = { workspace = true, default-features = false, features = ["prost"] } + +[lib] +doctest = false diff --git a/opentelemetry-proto/README.md b/opentelemetry-proto/README.md new file mode 100644 index 0000000000..c802805818 --- /dev/null +++ b/opentelemetry-proto/README.md @@ -0,0 +1,21 @@ +# opentelemetry-proto + +This library mirrors parts of the +[`opentelemetry-proto`](https://github.com/census-instrumentation/opencensus-proto/) +repo, with the non-tracing and build-related components removed. + +## License + +Copyright 2024, OpenTelemetry Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto b/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto new file mode 100644 index 0000000000..d6fe67f9e5 --- /dev/null +++ b/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto @@ -0,0 +1,79 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.collector.trace.v1; + +import "opentelemetry/proto/trace/v1/trace.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Collector.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.collector.trace.v1"; +option java_outer_classname = "TraceServiceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/collector/trace/v1"; + +// Service that can be used to push spans between one Application instrumented with +// OpenTelemetry and a collector, or between a collector and a central collector (in this +// case spans are sent/received to/from multiple Applications). +service TraceService { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + rpc Export(ExportTraceServiceRequest) returns (ExportTraceServiceResponse) {} +} + +message ExportTraceServiceRequest { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated opentelemetry.proto.trace.v1.ResourceSpans resource_spans = 1; +} + +message ExportTraceServiceResponse { + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + ExportTracePartialSuccess partial_success = 1; +} + +message ExportTracePartialSuccess { + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + int64 rejected_spans = 1; + + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + string error_message = 2; +} diff --git a/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto b/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto new file mode 100644 index 0000000000..ff8a21a1fa --- /dev/null +++ b/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto @@ -0,0 +1,81 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.common.v1; + +option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.common.v1"; +option java_outer_classname = "CommonProto"; +option go_package = "go.opentelemetry.io/proto/otlp/common/v1"; + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +message AnyValue { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + oneof value { + string string_value = 1; + bool bool_value = 2; + int64 int_value = 3; + double double_value = 4; + ArrayValue array_value = 5; + KeyValueList kvlist_value = 6; + bytes bytes_value = 7; + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +message ArrayValue { + // Array of values. The array may be empty (contain 0 elements). + repeated AnyValue values = 1; +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +message KeyValueList { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + repeated KeyValue values = 1; +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +message KeyValue { + string key = 1; + AnyValue value = 2; +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +message InstrumentationScope { + // An empty instrumentation scope name means the name is unknown. + string name = 1; + string version = 2; + + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 3; + uint32 dropped_attributes_count = 4; +} diff --git a/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto b/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto new file mode 100644 index 0000000000..6637560bc3 --- /dev/null +++ b/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto @@ -0,0 +1,37 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.resource.v1; + +import "opentelemetry/proto/common/v1/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.resource.v1"; +option java_outer_classname = "ResourceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/resource/v1"; + +// Resource information. +message Resource { + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 1; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + uint32 dropped_attributes_count = 2; +} diff --git a/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto b/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto new file mode 100644 index 0000000000..5cb2f3ce1c --- /dev/null +++ b/opentelemetry-proto/opentelemetry/proto/trace/v1/trace.proto @@ -0,0 +1,355 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry.proto.trace.v1; + +import "opentelemetry/proto/common/v1/common.proto"; +import "opentelemetry/proto/resource/v1/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Trace.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.trace.v1"; +option java_outer_classname = "TraceProto"; +option go_package = "go.opentelemetry.io/proto/otlp/trace/v1"; + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message TracesData { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceSpans resource_spans = 1; +} + +// A collection of ScopeSpans from a Resource. +message ResourceSpans { + reserved 1000; + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + opentelemetry.proto.resource.v1.Resource resource = 1; + + // A list of ScopeSpans that originate from a resource. + repeated ScopeSpans scope_spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Spans produced by an InstrumentationScope. +message ScopeSpans { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + // A list of Spans that originate from an instrumentation scope. + repeated Span spans = 2; + + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + string schema_url = 3; +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +message Span { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes trace_id = 1; + + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + bytes span_id = 2; + + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + string trace_state = 3; + + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + bytes parent_span_id = 4; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + fixed32 flags = 16; + + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + string name = 5; + + // SpanKind is the type of span. Can be used to specify additional relationships between spans + // in addition to a parent/child relationship. + enum SpanKind { + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + SPAN_KIND_UNSPECIFIED = 0; + + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SPAN_KIND_INTERNAL = 1; + + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SPAN_KIND_SERVER = 2; + + // Indicates that the span describes a request to some remote service. + SPAN_KIND_CLIENT = 3; + + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SPAN_KIND_PRODUCER = 4; + + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SPAN_KIND_CONSUMER = 5; + } + + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + SpanKind kind = 6; + + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 start_time_unix_nano = 7; + + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + fixed64 end_time_unix_nano = 8; + + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 9; + + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + uint32 dropped_attributes_count = 10; + + // Event is a time-stamped annotation of the span, consisting of user-supplied + // text description and key-value pairs. + message Event { + // time_unix_nano is the time the event occurred. + fixed64 time_unix_nano = 1; + + // name of the event. + // This field is semantically required to be set to non-empty string. + string name = 2; + + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 3; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 4; + } + + // events is a collection of Event items. + repeated Event events = 11; + + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + uint32 dropped_events_count = 12; + + // A pointer from the current span to another span in the same trace or in a + // different trace. For example, this can be used in batching operations, + // where a single batch handler processes multiple requests from different + // traces or when the handler receives a request from a different project. + message Link { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + bytes trace_id = 1; + + // A unique identifier for the linked span. The ID is an 8-byte array. + bytes span_id = 2; + + // The trace_state associated with the link. + string trace_state = 3; + + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated opentelemetry.proto.common.v1.KeyValue attributes = 4; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + uint32 dropped_attributes_count = 5; + + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + fixed32 flags = 6; + } + + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + repeated Link links = 13; + + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + uint32 dropped_links_count = 14; + + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status status = 15; +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +message Status { + reserved 1; + + // A developer-facing human readable error message. + string message = 2; + + // For the semantics of status codes see + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status + enum StatusCode { + // The default status. + STATUS_CODE_UNSET = 0; + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + STATUS_CODE_OK = 1; + // The Span contains an error. + STATUS_CODE_ERROR = 2; + }; + + // The status code. + StatusCode code = 3; +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +enum SpanFlags { + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SPAN_FLAGS_DO_NOT_USE = 0; + + // Bits 0-7 are used for trace flags. + SPAN_FLAGS_TRACE_FLAGS_MASK = 0x000000FF; + + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK = 0x00000100; + SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK = 0x00000200; + + // Bits 10-31 are reserved for future use. +} diff --git a/opentelemetry-proto/src/gen/opentelemetry.proto.collector.trace.v1.rs b/opentelemetry-proto/src/gen/opentelemetry.proto.collector.trace.v1.rs new file mode 100644 index 0000000000..9fc9bf3850 --- /dev/null +++ b/opentelemetry-proto/src/gen/opentelemetry.proto.collector.trace.v1.rs @@ -0,0 +1,167 @@ +// This file is @generated by prost-build. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportTraceServiceRequest { + /// An array of ResourceSpans. + /// For data coming from a single resource this array will typically contain one + /// element. Intermediary nodes (such as OpenTelemetry Collector) that receive + /// data from multiple origins typically batch the data before forwarding further and + /// in that case this array will contain multiple elements. + #[prost(message, repeated, tag = "1")] + pub resource_spans: ::prost::alloc::vec::Vec< + super::super::super::trace::v1::ResourceSpans, + >, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportTraceServiceResponse { + /// The details of a partially successful export request. + /// + /// If the request is only partially accepted + /// (i.e. when the server accepts only parts of the data and rejects the rest) + /// the server MUST initialize the `partial_success` field and MUST + /// set the `rejected_` with the number of items it rejected. + /// + /// Servers MAY also make use of the `partial_success` field to convey + /// warnings/suggestions to senders even when the request was fully accepted. + /// In such cases, the `rejected_` MUST have a value of `0` and + /// the `error_message` MUST be non-empty. + /// + /// A `partial_success` message with an empty value (rejected_ = 0 and + /// `error_message` = "") is equivalent to it not being set/present. Senders + /// SHOULD interpret it the same way as in the full success case. + #[prost(message, optional, tag = "1")] + pub partial_success: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExportTracePartialSuccess { + /// The number of rejected spans. + /// + /// A `rejected_` field holding a `0` value indicates that the + /// request was fully accepted. + #[prost(int64, tag = "1")] + pub rejected_spans: i64, + /// A developer-facing human-readable message in English. It should be used + /// either to explain why the server rejected parts of the data during a partial + /// success or to convey warnings/suggestions during a full success. The message + /// should offer guidance on how users can address such issues. + /// + /// error_message is an optional field. An error_message with an empty value + /// is equivalent to it not being set. + #[prost(string, tag = "2")] + pub error_message: ::prost::alloc::string::String, +} +/// Generated client implementations. +pub mod trace_service_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service that can be used to push spans between one Application instrumented with + /// OpenTelemetry and a collector, or between a collector and a central collector (in this + /// case spans are sent/received to/from multiple Applications). + #[derive(Debug, Clone)] + pub struct TraceServiceClient { + inner: tonic::client::Grpc, + } + impl TraceServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> TraceServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + TraceServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// For performance reasons, it is recommended to keep this RPC + /// alive for the entire life of the application. + pub async fn export( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "opentelemetry.proto.collector.trace.v1.TraceService", + "Export", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/opentelemetry-proto/src/gen/opentelemetry.proto.common.v1.rs b/opentelemetry-proto/src/gen/opentelemetry.proto.common.v1.rs new file mode 100644 index 0000000000..e9c1a57675 --- /dev/null +++ b/opentelemetry-proto/src/gen/opentelemetry.proto.common.v1.rs @@ -0,0 +1,81 @@ +// This file is @generated by prost-build. +/// AnyValue is used to represent any type of attribute value. AnyValue may contain a +/// primitive value such as a string or integer or it may contain an arbitrary nested +/// object containing arrays, key-value lists and primitives. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnyValue { + /// The value is one of the listed fields. It is valid for all values to be unspecified + /// in which case this AnyValue is considered to be "empty". + #[prost(oneof = "any_value::Value", tags = "1, 2, 3, 4, 5, 6, 7")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `AnyValue`. +pub mod any_value { + /// The value is one of the listed fields. It is valid for all values to be unspecified + /// in which case this AnyValue is considered to be "empty". + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(string, tag = "1")] + StringValue(::prost::alloc::string::String), + #[prost(bool, tag = "2")] + BoolValue(bool), + #[prost(int64, tag = "3")] + IntValue(i64), + #[prost(double, tag = "4")] + DoubleValue(f64), + #[prost(message, tag = "5")] + ArrayValue(super::ArrayValue), + #[prost(message, tag = "6")] + KvlistValue(super::KeyValueList), + #[prost(bytes, tag = "7")] + BytesValue(::prost::alloc::vec::Vec), + } +} +/// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +/// since oneof in AnyValue does not allow repeated fields. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArrayValue { + /// Array of values. The array may be empty (contain 0 elements). + #[prost(message, repeated, tag = "1")] + pub values: ::prost::alloc::vec::Vec, +} +/// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +/// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +/// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +/// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +/// are semantically equivalent. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValueList { + /// A collection of key/value pairs of key-value pairs. The list may be empty (may + /// contain 0 elements). + /// The keys MUST be unique (it is not allowed to have more than one + /// value with the same key). + #[prost(message, repeated, tag = "1")] + pub values: ::prost::alloc::vec::Vec, +} +/// KeyValue is a key-value pair that is used to store Span attributes, Link +/// attributes, etc. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValue { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// InstrumentationScope is a message representing the instrumentation scope information +/// such as the fully qualified name and version. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InstrumentationScope { + /// An empty instrumentation scope name means the name is unknown. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + /// Additional attributes that describe the scope. \[Optional\]. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag = "3")] + pub attributes: ::prost::alloc::vec::Vec, + #[prost(uint32, tag = "4")] + pub dropped_attributes_count: u32, +} diff --git a/opentelemetry-proto/src/gen/opentelemetry.proto.resource.v1.rs b/opentelemetry-proto/src/gen/opentelemetry.proto.resource.v1.rs new file mode 100644 index 0000000000..0daa6740b1 --- /dev/null +++ b/opentelemetry-proto/src/gen/opentelemetry.proto.resource.v1.rs @@ -0,0 +1,14 @@ +// This file is @generated by prost-build. +/// Resource information. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Resource { + /// Set of attributes that describe the resource. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag = "1")] + pub attributes: ::prost::alloc::vec::Vec, + /// dropped_attributes_count is the number of dropped attributes. If the value is 0, then + /// no attributes were dropped. + #[prost(uint32, tag = "2")] + pub dropped_attributes_count: u32, +} diff --git a/opentelemetry-proto/src/gen/opentelemetry.proto.trace.v1.rs b/opentelemetry-proto/src/gen/opentelemetry.proto.trace.v1.rs new file mode 100644 index 0000000000..0c6a8a368c --- /dev/null +++ b/opentelemetry-proto/src/gen/opentelemetry.proto.trace.v1.rs @@ -0,0 +1,431 @@ +// This file is @generated by prost-build. +/// TracesData represents the traces data that can be stored in a persistent storage, +/// OR can be embedded by other protocols that transfer OTLP traces data but do +/// not implement the OTLP protocol. +/// +/// The main difference between this message and collector protocol is that +/// in this message there will not be any "control" or "metadata" specific to +/// OTLP protocol. +/// +/// When new fields are added into this message, the OTLP request MUST be updated +/// as well. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TracesData { + /// An array of ResourceSpans. + /// For data coming from a single resource this array will typically contain + /// one element. Intermediary nodes that receive data from multiple origins + /// typically batch the data before forwarding further and in that case this + /// array will contain multiple elements. + #[prost(message, repeated, tag = "1")] + pub resource_spans: ::prost::alloc::vec::Vec, +} +/// A collection of ScopeSpans from a Resource. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceSpans { + /// The resource for the spans in this message. + /// If this field is not set then no resource info is known. + #[prost(message, optional, tag = "1")] + pub resource: ::core::option::Option, + /// A list of ScopeSpans that originate from a resource. + #[prost(message, repeated, tag = "2")] + pub scope_spans: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the resource data + /// is recorded in. To learn more about Schema URL see + /// + /// This schema_url applies to the data in the "resource" field. It does not apply + /// to the data in the "scope_spans" field which have their own schema_url field. + #[prost(string, tag = "3")] + pub schema_url: ::prost::alloc::string::String, +} +/// A collection of Spans produced by an InstrumentationScope. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ScopeSpans { + /// The instrumentation scope information for the spans in this message. + /// Semantically when InstrumentationScope isn't set, it is equivalent with + /// an empty instrumentation scope name (unknown). + #[prost(message, optional, tag = "1")] + pub scope: ::core::option::Option, + /// A list of Spans that originate from an instrumentation scope. + #[prost(message, repeated, tag = "2")] + pub spans: ::prost::alloc::vec::Vec, + /// The Schema URL, if known. This is the identifier of the Schema that the span data + /// is recorded in. To learn more about Schema URL see + /// + /// This schema_url applies to all spans and span events in the "spans" field. + #[prost(string, tag = "3")] + pub schema_url: ::prost::alloc::string::String, +} +/// A Span represents a single operation performed by a single component of the system. +/// +/// The next available field id is 17. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Span { + /// A unique identifier for a trace. All spans from the same trace share + /// the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + /// of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + /// is zero-length and thus is also invalid). + /// + /// This field is required. + #[prost(bytes = "vec", tag = "1")] + pub trace_id: ::prost::alloc::vec::Vec, + /// A unique identifier for a span within a trace, assigned when the span + /// is created. The ID is an 8-byte array. An ID with all zeroes OR of length + /// other than 8 bytes is considered invalid (empty string in OTLP/JSON + /// is zero-length and thus is also invalid). + /// + /// This field is required. + #[prost(bytes = "vec", tag = "2")] + pub span_id: ::prost::alloc::vec::Vec, + /// trace_state conveys information about request position in multiple distributed tracing graphs. + /// It is a trace_state in w3c-trace-context format: + /// See also for more details about this field. + #[prost(string, tag = "3")] + pub trace_state: ::prost::alloc::string::String, + /// The `span_id` of this span's parent span. If this is a root span, then this + /// field must be empty. The ID is an 8-byte array. + #[prost(bytes = "vec", tag = "4")] + pub parent_span_id: ::prost::alloc::vec::Vec, + /// Flags, a bit field. + /// + /// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + /// Context specification. To read the 8-bit W3C trace flag, use + /// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + /// + /// See for the flag definitions. + /// + /// Bits 8 and 9 represent the 3 states of whether a span's parent + /// is remote. The states are (unknown, is not remote, is remote). + /// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + /// To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + /// + /// When creating span messages, if the message is logically forwarded from another source + /// with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + /// be copied as-is. If creating from a source that does not have an equivalent flags field + /// (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + /// be set to zero. + /// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + /// + /// \[Optional\]. + #[prost(fixed32, tag = "16")] + pub flags: u32, + /// A description of the span's operation. + /// + /// For example, the name can be a qualified method name or a file name + /// and a line number where the operation is called. A best practice is to use + /// the same display name at the same call point in an application. + /// This makes it easier to correlate spans in different traces. + /// + /// This field is semantically required to be set to non-empty string. + /// Empty value is equivalent to an unknown span name. + /// + /// This field is required. + #[prost(string, tag = "5")] + pub name: ::prost::alloc::string::String, + /// Distinguishes between spans generated in a particular context. For example, + /// two spans with the same name may be distinguished using `CLIENT` (caller) + /// and `SERVER` (callee) to identify queueing latency associated with the span. + #[prost(enumeration = "span::SpanKind", tag = "6")] + pub kind: i32, + /// start_time_unix_nano is the start time of the span. On the client side, this is the time + /// kept by the local machine where the span execution starts. On the server side, this + /// is the time when the server's application handler starts running. + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + /// + /// This field is semantically required and it is expected that end_time >= start_time. + #[prost(fixed64, tag = "7")] + pub start_time_unix_nano: u64, + /// end_time_unix_nano is the end time of the span. On the client side, this is the time + /// kept by the local machine where the span execution ends. On the server side, this + /// is the time when the server application handler stops running. + /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + /// + /// This field is semantically required and it is expected that end_time >= start_time. + #[prost(fixed64, tag = "8")] + pub end_time_unix_nano: u64, + /// attributes is a collection of key/value pairs. Note, global attributes + /// like server name can be set using the resource API. Examples of attributes: + /// + /// "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + /// "/http/server_latency": 300 + /// "example.com/myattribute": true + /// "example.com/score": 10.239 + /// + /// The OpenTelemetry API specification further restricts the allowed value types: + /// + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag = "9")] + pub attributes: ::prost::alloc::vec::Vec, + /// dropped_attributes_count is the number of attributes that were discarded. Attributes + /// can be discarded because their keys are too long or because there are too many + /// attributes. If this value is 0, then no attributes were dropped. + #[prost(uint32, tag = "10")] + pub dropped_attributes_count: u32, + /// events is a collection of Event items. + #[prost(message, repeated, tag = "11")] + pub events: ::prost::alloc::vec::Vec, + /// dropped_events_count is the number of dropped events. If the value is 0, then no + /// events were dropped. + #[prost(uint32, tag = "12")] + pub dropped_events_count: u32, + /// links is a collection of Links, which are references from this span to a span + /// in the same or different trace. + #[prost(message, repeated, tag = "13")] + pub links: ::prost::alloc::vec::Vec, + /// dropped_links_count is the number of dropped links after the maximum size was + /// enforced. If this value is 0, then no links were dropped. + #[prost(uint32, tag = "14")] + pub dropped_links_count: u32, + /// An optional final status for this span. Semantically when Status isn't set, it means + /// span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + #[prost(message, optional, tag = "15")] + pub status: ::core::option::Option, +} +/// Nested message and enum types in `Span`. +pub mod span { + /// Event is a time-stamped annotation of the span, consisting of user-supplied + /// text description and key-value pairs. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Event { + /// time_unix_nano is the time the event occurred. + #[prost(fixed64, tag = "1")] + pub time_unix_nano: u64, + /// name of the event. + /// This field is semantically required to be set to non-empty string. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// attributes is a collection of attribute key/value pairs on the event. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag = "3")] + pub attributes: ::prost::alloc::vec::Vec< + super::super::super::common::v1::KeyValue, + >, + /// dropped_attributes_count is the number of dropped attributes. If the value is 0, + /// then no attributes were dropped. + #[prost(uint32, tag = "4")] + pub dropped_attributes_count: u32, + } + /// A pointer from the current span to another span in the same trace or in a + /// different trace. For example, this can be used in batching operations, + /// where a single batch handler processes multiple requests from different + /// traces or when the handler receives a request from a different project. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Link { + /// A unique identifier of a trace that this linked span is part of. The ID is a + /// 16-byte array. + #[prost(bytes = "vec", tag = "1")] + pub trace_id: ::prost::alloc::vec::Vec, + /// A unique identifier for the linked span. The ID is an 8-byte array. + #[prost(bytes = "vec", tag = "2")] + pub span_id: ::prost::alloc::vec::Vec, + /// The trace_state associated with the link. + #[prost(string, tag = "3")] + pub trace_state: ::prost::alloc::string::String, + /// attributes is a collection of attribute key/value pairs on the link. + /// Attribute keys MUST be unique (it is not allowed to have more than one + /// attribute with the same key). + #[prost(message, repeated, tag = "4")] + pub attributes: ::prost::alloc::vec::Vec< + super::super::super::common::v1::KeyValue, + >, + /// dropped_attributes_count is the number of dropped attributes. If the value is 0, + /// then no attributes were dropped. + #[prost(uint32, tag = "5")] + pub dropped_attributes_count: u32, + /// Flags, a bit field. + /// + /// Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + /// Context specification. To read the 8-bit W3C trace flag, use + /// `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + /// + /// See for the flag definitions. + /// + /// Bits 8 and 9 represent the 3 states of whether the link is remote. + /// The states are (unknown, is not remote, is remote). + /// To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + /// To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + /// + /// Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + /// When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + /// + /// \[Optional\]. + #[prost(fixed32, tag = "6")] + pub flags: u32, + } + /// SpanKind is the type of span. Can be used to specify additional relationships between spans + /// in addition to a parent/child relationship. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum SpanKind { + /// Unspecified. Do NOT use as default. + /// Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Unspecified = 0, + /// Indicates that the span represents an internal operation within an application, + /// as opposed to an operation happening at the boundaries. Default value. + Internal = 1, + /// Indicates that the span covers server-side handling of an RPC or other + /// remote network request. + Server = 2, + /// Indicates that the span describes a request to some remote service. + Client = 3, + /// Indicates that the span describes a producer sending a message to a broker. + /// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + /// between producer and consumer spans. A PRODUCER span ends when the message was accepted + /// by the broker while the logical processing of the message might span a much longer time. + Producer = 4, + /// Indicates that the span describes consumer receiving a message from a broker. + /// Like the PRODUCER kind, there is often no direct critical path latency relationship + /// between producer and consumer spans. + Consumer = 5, + } + impl SpanKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unspecified => "SPAN_KIND_UNSPECIFIED", + Self::Internal => "SPAN_KIND_INTERNAL", + Self::Server => "SPAN_KIND_SERVER", + Self::Client => "SPAN_KIND_CLIENT", + Self::Producer => "SPAN_KIND_PRODUCER", + Self::Consumer => "SPAN_KIND_CONSUMER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SPAN_KIND_UNSPECIFIED" => Some(Self::Unspecified), + "SPAN_KIND_INTERNAL" => Some(Self::Internal), + "SPAN_KIND_SERVER" => Some(Self::Server), + "SPAN_KIND_CLIENT" => Some(Self::Client), + "SPAN_KIND_PRODUCER" => Some(Self::Producer), + "SPAN_KIND_CONSUMER" => Some(Self::Consumer), + _ => None, + } + } + } +} +/// The Status type defines a logical error model that is suitable for different +/// programming environments, including REST APIs and RPC APIs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Status { + /// A developer-facing human readable error message. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + /// The status code. + #[prost(enumeration = "status::StatusCode", tag = "3")] + pub code: i32, +} +/// Nested message and enum types in `Status`. +pub mod status { + /// For the semantics of status codes see + /// + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum StatusCode { + /// The default status. + Unset = 0, + /// The Span has been validated by an Application developer or Operator to + /// have completed successfully. + Ok = 1, + /// The Span contains an error. + Error = 2, + } + impl StatusCode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::Unset => "STATUS_CODE_UNSET", + Self::Ok => "STATUS_CODE_OK", + Self::Error => "STATUS_CODE_ERROR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "STATUS_CODE_UNSET" => Some(Self::Unset), + "STATUS_CODE_OK" => Some(Self::Ok), + "STATUS_CODE_ERROR" => Some(Self::Error), + _ => None, + } + } + } +} +/// SpanFlags represents constants used to interpret the +/// Span.flags field, which is protobuf 'fixed32' type and is to +/// be used as bit-fields. Each non-zero value defined in this enum is +/// a bit-mask. To extract the bit-field, for example, use an +/// expression like: +/// +/// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +/// +/// See for the flag definitions. +/// +/// Note that Span flags were introduced in version 1.1 of the +/// OpenTelemetry protocol. Older Span producers do not set this +/// field, consequently consumers should not rely on the absence of a +/// particular flag bit to indicate the presence of a particular feature. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SpanFlags { + /// The zero value for the enum. Should not be used for comparisons. + /// Instead use bitwise "and" with the appropriate mask as shown above. + DoNotUse = 0, + /// Bits 0-7 are used for trace flags. + TraceFlagsMask = 255, + /// Bits 8 and 9 are used to indicate that the parent span or link span is remote. + /// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + /// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + ContextHasIsRemoteMask = 256, + ContextIsRemoteMask = 512, +} +impl SpanFlags { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Self::DoNotUse => "SPAN_FLAGS_DO_NOT_USE", + Self::TraceFlagsMask => "SPAN_FLAGS_TRACE_FLAGS_MASK", + Self::ContextHasIsRemoteMask => "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + Self::ContextIsRemoteMask => "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SPAN_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse), + "SPAN_FLAGS_TRACE_FLAGS_MASK" => Some(Self::TraceFlagsMask), + "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK" => Some(Self::ContextHasIsRemoteMask), + "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK" => Some(Self::ContextIsRemoteMask), + _ => None, + } + } +} diff --git a/opentelemetry-proto/src/lib.rs b/opentelemetry-proto/src/lib.rs new file mode 100644 index 0000000000..d9ed84a41f --- /dev/null +++ b/opentelemetry-proto/src/lib.rs @@ -0,0 +1,11 @@ +//! gRPC bindings for OpenTelemetry. +//! +//! Vendored from . + +// proto mod contains file generated by protobuf or other build tools. +// we shouldn't manually change it. Thus skip format and lint check. +#[rustfmt::skip] +#[allow(warnings)] +pub mod proto; + +pub mod transform; diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs new file mode 100644 index 0000000000..46bf90c367 --- /dev/null +++ b/opentelemetry-proto/src/proto.rs @@ -0,0 +1,25 @@ +pub mod collector { + pub mod trace { + pub mod v1 { + include!("gen/opentelemetry.proto.collector.trace.v1.rs"); + } + } +} + +pub mod common { + pub mod v1 { + include!("gen/opentelemetry.proto.common.v1.rs"); + } +} + +pub mod trace { + pub mod v1 { + include!("gen/opentelemetry.proto.trace.v1.rs"); + } +} + +pub mod resource { + pub mod v1 { + include!("gen/opentelemetry.proto.resource.v1.rs"); + } +} \ No newline at end of file diff --git a/opentelemetry-proto/src/transform/common.rs b/opentelemetry-proto/src/transform/common.rs new file mode 100644 index 0000000000..b6f1be7573 --- /dev/null +++ b/opentelemetry-proto/src/transform/common.rs @@ -0,0 +1,150 @@ +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +pub(crate) fn to_nanos(time: SystemTime) -> u64 { + time.duration_since(UNIX_EPOCH) + .unwrap_or_else(|_| Duration::from_secs(0)) + .as_nanos() as u64 +} + +use crate::proto::common::v1::{any_value, AnyValue, ArrayValue, InstrumentationScope, KeyValue}; +use opentelemetry::{Array, Value}; +use std::borrow::Cow; + +#[derive(Debug, Default)] +pub struct ResourceAttributesWithSchema { + pub attributes: Attributes, + pub schema_url: Option, +} + +impl From<&opentelemetry_sdk::Resource> for ResourceAttributesWithSchema { + fn from(resource: &opentelemetry_sdk::Resource) -> Self { + ResourceAttributesWithSchema { + attributes: resource_attributes(resource), + schema_url: resource.schema_url().map(ToString::to_string), + } + } +} + +impl + From<( + opentelemetry::InstrumentationScope, + Option>, + )> for InstrumentationScope +{ + fn from( + data: ( + opentelemetry::InstrumentationScope, + Option>, + ), + ) -> Self { + let (scope, target) = data; + if let Some(t) = target { + InstrumentationScope { + name: t.to_string(), + version: String::new(), + attributes: vec![], + ..Default::default() + } + } else { + let Attributes(attributes) = + Attributes::from(scope.attributes().cloned().collect::>()); + InstrumentationScope { + name: scope.name().to_string(), + version: scope.version().map(ToString::to_string).unwrap_or_default(), + attributes, + ..Default::default() + } + } + } +} + +impl + From<( + &opentelemetry::InstrumentationScope, + Option>, + )> for InstrumentationScope +{ + fn from( + data: ( + &opentelemetry::InstrumentationScope, + Option>, + ), + ) -> Self { + let (scope, target) = data; + if let Some(t) = target { + InstrumentationScope { + name: t.to_string(), + version: String::new(), + attributes: vec![], + ..Default::default() + } + } else { + let Attributes(attributes) = + Attributes::from(scope.attributes().cloned().collect::>()); + InstrumentationScope { + name: scope.name().to_string(), + version: scope.version().map(ToString::to_string).unwrap_or_default(), + attributes, + ..Default::default() + } + } + } +} + +/// Wrapper type for Vec<`KeyValue`> +#[derive(Default, Debug)] +pub struct Attributes(pub ::std::vec::Vec); + +impl From> for Attributes { + fn from(kvs: Vec) -> Self { + Attributes( + kvs.into_iter() + .map(|api_kv| KeyValue { + key: api_kv.key.as_str().to_string(), + value: Some(api_kv.value.into()), + }) + .collect(), + ) + } +} + +impl From for AnyValue { + fn from(value: Value) -> Self { + AnyValue { + value: match value { + Value::Bool(val) => Some(any_value::Value::BoolValue(val)), + Value::I64(val) => Some(any_value::Value::IntValue(val)), + Value::F64(val) => Some(any_value::Value::DoubleValue(val)), + Value::String(val) => Some(any_value::Value::StringValue(val.to_string())), + Value::Array(array) => Some(any_value::Value::ArrayValue(match array { + Array::Bool(vals) => array_into_proto(vals), + Array::I64(vals) => array_into_proto(vals), + Array::F64(vals) => array_into_proto(vals), + Array::String(vals) => array_into_proto(vals), + _ => ArrayValue::default(), + })), + _ => None, + }, + } + } +} + +fn array_into_proto(vals: Vec) -> ArrayValue +where + Value: From, +{ + let values = vals + .into_iter() + .map(|val| AnyValue::from(Value::from(val))) + .collect(); + + ArrayValue { values } +} + +pub(crate) fn resource_attributes(resource: &opentelemetry_sdk::Resource) -> Attributes { + resource + .iter() + .map(|(k, v)| opentelemetry::KeyValue::new(k.clone(), v.clone())) + .collect::>() + .into() +} diff --git a/opentelemetry-proto/src/transform/mod.rs b/opentelemetry-proto/src/transform/mod.rs new file mode 100644 index 0000000000..c764fb5e07 --- /dev/null +++ b/opentelemetry-proto/src/transform/mod.rs @@ -0,0 +1,2 @@ +pub mod common; +pub mod trace; diff --git a/opentelemetry-proto/src/transform/trace.rs b/opentelemetry-proto/src/transform/trace.rs new file mode 100644 index 0000000000..4c90ba0737 --- /dev/null +++ b/opentelemetry-proto/src/transform/trace.rs @@ -0,0 +1,355 @@ +use crate::{ + proto::{ + resource::v1::Resource, + trace::v1::{span, status, ResourceSpans, ScopeSpans, Span, Status}, + }, + transform::common::{to_nanos, Attributes, ResourceAttributesWithSchema}, +}; +use opentelemetry::{ + trace, + trace::{Link, SpanId, SpanKind}, +}; +use opentelemetry_sdk::trace::SpanData; +use std::collections::HashMap; + +impl From for span::SpanKind { + fn from(span_kind: SpanKind) -> Self { + match span_kind { + SpanKind::Client => span::SpanKind::Client, + SpanKind::Consumer => span::SpanKind::Consumer, + SpanKind::Internal => span::SpanKind::Internal, + SpanKind::Producer => span::SpanKind::Producer, + SpanKind::Server => span::SpanKind::Server, + } + } +} + +impl From<&trace::Status> for status::StatusCode { + fn from(status: &trace::Status) -> Self { + match status { + trace::Status::Ok => status::StatusCode::Ok, + trace::Status::Unset => status::StatusCode::Unset, + trace::Status::Error { .. } => status::StatusCode::Error, + } + } +} + +impl From for span::Link { + fn from(link: Link) -> Self { + span::Link { + trace_id: link.span_context.trace_id().to_bytes().to_vec(), + span_id: link.span_context.span_id().to_bytes().to_vec(), + trace_state: link.span_context.trace_state().header(), + attributes: Attributes::from(link.attributes).0, + dropped_attributes_count: link.dropped_attributes_count, + flags: link.span_context.trace_flags().to_u8() as u32, + } + } +} +impl From for Span { + fn from(source_span: opentelemetry_sdk::trace::SpanData) -> Self { + let span_kind: span::SpanKind = source_span.span_kind.into(); + Span { + trace_id: source_span.span_context.trace_id().to_bytes().to_vec(), + span_id: source_span.span_context.span_id().to_bytes().to_vec(), + trace_state: source_span.span_context.trace_state().header(), + parent_span_id: { + if source_span.parent_span_id != SpanId::INVALID { + source_span.parent_span_id.to_bytes().to_vec() + } else { + vec![] + } + }, + flags: source_span.span_context.trace_flags().to_u8() as u32, + name: source_span.name.into_owned(), + kind: span_kind as i32, + start_time_unix_nano: to_nanos(source_span.start_time), + end_time_unix_nano: to_nanos(source_span.end_time), + dropped_attributes_count: source_span.dropped_attributes_count, + attributes: Attributes::from(source_span.attributes).0, + dropped_events_count: source_span.events.dropped_count, + events: source_span + .events + .into_iter() + .map(|event| span::Event { + time_unix_nano: to_nanos(event.timestamp), + name: event.name.into(), + attributes: Attributes::from(event.attributes).0, + dropped_attributes_count: event.dropped_attributes_count, + }) + .collect(), + dropped_links_count: source_span.links.dropped_count, + links: source_span.links.into_iter().map(Into::into).collect(), + status: Some(Status { + code: status::StatusCode::from(&source_span.status).into(), + message: match source_span.status { + trace::Status::Error { description } => description.to_string(), + _ => Default::default(), + }, + }), + } + } +} + +impl ResourceSpans { + pub fn new(source_span: SpanData, resource: &ResourceAttributesWithSchema) -> Self { + let span_kind: span::SpanKind = source_span.span_kind.into(); + ResourceSpans { + resource: Some(Resource { + attributes: resource.attributes.0.clone(), + dropped_attributes_count: 0, + }), + schema_url: resource.schema_url.clone().unwrap_or_default(), + scope_spans: vec![ScopeSpans { + schema_url: source_span + .instrumentation_scope + .schema_url() + .map(ToString::to_string) + .unwrap_or_default(), + scope: Some((source_span.instrumentation_scope, None).into()), + spans: vec![Span { + trace_id: source_span.span_context.trace_id().to_bytes().to_vec(), + span_id: source_span.span_context.span_id().to_bytes().to_vec(), + trace_state: source_span.span_context.trace_state().header(), + parent_span_id: { + if source_span.parent_span_id != SpanId::INVALID { + source_span.parent_span_id.to_bytes().to_vec() + } else { + vec![] + } + }, + flags: source_span.span_context.trace_flags().to_u8() as u32, + name: source_span.name.into_owned(), + kind: span_kind as i32, + start_time_unix_nano: to_nanos(source_span.start_time), + end_time_unix_nano: to_nanos(source_span.end_time), + dropped_attributes_count: source_span.dropped_attributes_count, + attributes: Attributes::from(source_span.attributes).0, + dropped_events_count: source_span.events.dropped_count, + events: source_span + .events + .into_iter() + .map(|event| span::Event { + time_unix_nano: to_nanos(event.timestamp), + name: event.name.into(), + attributes: Attributes::from(event.attributes).0, + dropped_attributes_count: event.dropped_attributes_count, + }) + .collect(), + dropped_links_count: source_span.links.dropped_count, + links: source_span.links.into_iter().map(Into::into).collect(), + status: Some(Status { + code: status::StatusCode::from(&source_span.status).into(), + message: match source_span.status { + trace::Status::Error { description } => description.to_string(), + _ => Default::default(), + }, + }), + }], + }], + } + } +} + +pub fn group_spans_by_resource_and_scope( + spans: Vec, + resource: &ResourceAttributesWithSchema, +) -> Vec { + // Group spans by their instrumentation library + let scope_map = spans.iter().fold( + HashMap::new(), + |mut scope_map: HashMap<&opentelemetry::InstrumentationScope, Vec<&SpanData>>, span| { + let instrumentation = &span.instrumentation_scope; + scope_map.entry(instrumentation).or_default().push(span); + scope_map + }, + ); + + // Convert the grouped spans into ScopeSpans + let scope_spans = scope_map + .into_iter() + .map(|(instrumentation, span_records)| ScopeSpans { + scope: Some((instrumentation, None).into()), + schema_url: resource.schema_url.clone().unwrap_or_default(), + spans: span_records + .into_iter() + .map(|span_data| span_data.clone().into()) + .collect(), + }) + .collect(); + + // Wrap ScopeSpans into a single ResourceSpans + vec![ResourceSpans { + resource: Some(Resource { + attributes: resource.attributes.0.clone(), + dropped_attributes_count: 0, + }), + scope_spans, + schema_url: resource.schema_url.clone().unwrap_or_default(), + }] +} + +#[cfg(test)] +mod tests { + use crate::{ + proto::common::v1::any_value::Value, transform::common::ResourceAttributesWithSchema, + }; + use opentelemetry::{ + trace::{SpanContext, SpanId, SpanKind, Status, TraceFlags, TraceId, TraceState}, + InstrumentationScope, KeyValue, + }; + use opentelemetry_sdk::{ + resource::Resource, + trace::{SpanData, SpanEvents, SpanLinks}, + }; + use std::{ + borrow::Cow, + time::{Duration, SystemTime}, + }; + + fn create_test_span_data(instrumentation_name: &'static str) -> SpanData { + let span_context = SpanContext::new( + TraceId::from_u128(123), + SpanId::from_u64(456), + TraceFlags::default(), + false, + TraceState::default(), + ); + + SpanData { + span_context, + parent_span_id: SpanId::from_u64(0), + span_kind: SpanKind::Internal, + name: Cow::Borrowed("test_span"), + start_time: SystemTime::now(), + end_time: SystemTime::now() + Duration::from_secs(1), + attributes: vec![KeyValue::new("key", "value")], + dropped_attributes_count: 0, + events: SpanEvents::default(), + links: SpanLinks::default(), + status: Status::Unset, + instrumentation_scope: InstrumentationScope::builder(instrumentation_name).build(), + } + } + + #[test] + fn test_group_spans_by_resource_and_scope_single_scope() { + let resource = Resource::builder_empty() + .with_attribute(KeyValue::new("resource_key", "resource_value")) + .build(); + let span_data = create_test_span_data("lib1"); + + let spans = vec![span_data.clone()]; + let resource: ResourceAttributesWithSchema = (&resource).into(); // Convert Resource to ResourceAttributesWithSchema + + let grouped_spans = + crate::transform::trace::group_spans_by_resource_and_scope(spans, &resource); + + assert_eq!(grouped_spans.len(), 1); + + let resource_spans = &grouped_spans[0]; + assert_eq!( + resource_spans.resource.as_ref().unwrap().attributes.len(), + 1 + ); + assert_eq!( + resource_spans.resource.as_ref().unwrap().attributes[0].key, + "resource_key" + ); + assert_eq!( + resource_spans.resource.as_ref().unwrap().attributes[0] + .value + .clone() + .unwrap() + .value + .unwrap(), + Value::StringValue("resource_value".to_string()) + ); + + let scope_spans = &resource_spans.scope_spans; + assert_eq!(scope_spans.len(), 1); + + let scope_span = &scope_spans[0]; + assert_eq!(scope_span.scope.as_ref().unwrap().name, "lib1"); + assert_eq!(scope_span.spans.len(), 1); + + assert_eq!( + scope_span.spans[0].trace_id, + span_data.span_context.trace_id().to_bytes().to_vec() + ); + } + + #[test] + fn test_group_spans_by_resource_and_scope_multiple_scopes() { + let resource = Resource::builder_empty() + .with_attribute(KeyValue::new("resource_key", "resource_value")) + .build(); + let span_data1 = create_test_span_data("lib1"); + let span_data2 = create_test_span_data("lib1"); + let span_data3 = create_test_span_data("lib2"); + + let spans = vec![span_data1.clone(), span_data2.clone(), span_data3.clone()]; + let resource: ResourceAttributesWithSchema = (&resource).into(); // Convert Resource to ResourceAttributesWithSchema + + let grouped_spans = + crate::transform::trace::group_spans_by_resource_and_scope(spans, &resource); + + assert_eq!(grouped_spans.len(), 1); + + let resource_spans = &grouped_spans[0]; + assert_eq!( + resource_spans.resource.as_ref().unwrap().attributes.len(), + 1 + ); + assert_eq!( + resource_spans.resource.as_ref().unwrap().attributes[0].key, + "resource_key" + ); + assert_eq!( + resource_spans.resource.as_ref().unwrap().attributes[0] + .value + .clone() + .unwrap() + .value + .unwrap(), + Value::StringValue("resource_value".to_string()) + ); + + let scope_spans = &resource_spans.scope_spans; + assert_eq!(scope_spans.len(), 2); + + // Check the scope spans for both lib1 and lib2 + let mut lib1_scope_span = None; + let mut lib2_scope_span = None; + + for scope_span in scope_spans { + match scope_span.scope.as_ref().unwrap().name.as_str() { + "lib1" => lib1_scope_span = Some(scope_span), + "lib2" => lib2_scope_span = Some(scope_span), + _ => {} + } + } + + let lib1_scope_span = lib1_scope_span.expect("lib1 scope span not found"); + let lib2_scope_span = lib2_scope_span.expect("lib2 scope span not found"); + + assert_eq!(lib1_scope_span.scope.as_ref().unwrap().name, "lib1"); + assert_eq!(lib2_scope_span.scope.as_ref().unwrap().name, "lib2"); + + assert_eq!(lib1_scope_span.spans.len(), 2); + assert_eq!(lib2_scope_span.spans.len(), 1); + + assert_eq!( + lib1_scope_span.spans[0].trace_id, + span_data1.span_context.trace_id().to_bytes().to_vec() + ); + assert_eq!( + lib1_scope_span.spans[1].trace_id, + span_data2.span_context.trace_id().to_bytes().to_vec() + ); + assert_eq!( + lib2_scope_span.spans[0].trace_id, + span_data3.span_context.trace_id().to_bytes().to_vec() + ); + } +} diff --git a/opentelemetry-proto/tests/bootstrap.rs b/opentelemetry-proto/tests/bootstrap.rs new file mode 100644 index 0000000000..0b0bcd6fa4 --- /dev/null +++ b/opentelemetry-proto/tests/bootstrap.rs @@ -0,0 +1,52 @@ +//! A test that regenerates the Rust protobuf bindings. +//! +//! It can be run via: +//! +//! ```no_run +//! cargo test -p opentelmetry-proto --test=bootstrap +//! ``` + +/// Generates protobuf bindings into src/gen and fails if the generated files do +/// not match those that are already checked into git +#[test] +fn bootstrap() { + let out_dir = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")) + .join("src") + .join("gen"); + generate(&out_dir); + if changed(&out_dir) { + panic!("protobuf interfaces do not match generated sources"); + } +} + +/// Generates protobuf bindings into the given directory +fn generate(out_dir: &std::path::Path) { + let iface_files = &[ + "opentelemetry/proto/collector/trace/v1/trace_service.proto", + "opentelemetry/proto/common/v1/common.proto", + "opentelemetry/proto/resource/v1/resource.proto", + "opentelemetry/proto/trace/v1/trace.proto", + ]; + if let Err(error) = tonic_build::configure() + .build_client(true) + .build_server(false) + .emit_rerun_if_changed(false) + .out_dir(out_dir) + .compile_protos(iface_files, &["."]) + { + panic!("failed to compile protobuf: {error}") + } +} + +/// Returns true if the given path contains files that have changed since the +/// last Git commit +fn changed(path: &std::path::Path) -> bool { + let status = std::process::Command::new("git") + .arg("diff") + .arg("--exit-code") + .arg("--") + .arg(path) + .status() + .expect("failed to run git"); + !status.success() +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 624eb0ea63..0193dee360 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.76.0" +channel = "1.83.0" diff --git a/spiffe-proto/Cargo.toml b/spiffe-proto/Cargo.toml index 9e1790e63c..28ce208ba7 100644 --- a/spiffe-proto/Cargo.toml +++ b/spiffe-proto/Cargo.toml @@ -1,23 +1,23 @@ [package] name = "spiffe-proto" -version = "0.1.0" -authors = ["Linkerd Developers "] -license = "Apache-2.0" -edition = "2021" -publish = false +version = { workspace = true } +authors = { workspace = true } +license = { workspace = true } +edition = { workspace = true } +publish = { workspace = true } [dependencies] -bytes = "1" -prost = "0.12" -prost-types = "0.12" +bytes = { workspace = true } +prost = { workspace = true } +prost-types = { workspace = true } [dependencies.tonic] -version = "0.10" +workspace = true default-features = false features = ["prost", "codegen"] [dev-dependencies.tonic-build] -version = "0.10" +workspace = true default-features = false features = ["prost"] diff --git a/spiffe-proto/src/gen/spiffe.workloadapi.rs b/spiffe-proto/src/gen/spiffe.workloadapi.rs index be7b05abc1..3d07d61c5d 100644 --- a/spiffe-proto/src/gen/spiffe.workloadapi.rs +++ b/spiffe-proto/src/gen/spiffe.workloadapi.rs @@ -1,13 +1,11 @@ // This file is @generated by prost-build. /// The X509SVIDRequest message conveys parameters for requesting an X.509-SVID. /// There are currently no request parameters. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct X509svidRequest {} /// The X509SVIDResponse message carries X.509-SVIDs and related information, /// including a set of global CRLs and a list of bundles the workload may use /// for federating with foreign trust domains. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct X509svidResponse { /// Required. A list of X509SVID messages, each of which includes a single @@ -28,7 +26,6 @@ pub struct X509svidResponse { } /// The X509SVID message carries a single SVID and all associated information, /// including the X.509 bundle for the trust domain. -#[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct X509svid { /// Required. The SPIFFE ID of the SVID in this entry @@ -47,7 +44,13 @@ pub struct X509svid { } /// Generated client implementations. pub mod spiffe_workload_api_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] use tonic::codegen::*; use tonic::codegen::http::Uri; #[derive(Debug, Clone)] @@ -58,8 +61,8 @@ pub mod spiffe_workload_api_client { where T: tonic::client::GrpcService, T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); @@ -84,7 +87,7 @@ pub mod spiffe_workload_api_client { >, , - >>::Error: Into + Send + Sync, + >>::Error: Into + std::marker::Send + std::marker::Sync, { SpiffeWorkloadApiClient::new(InterceptedService::new(inner, interceptor)) } @@ -134,8 +137,7 @@ pub mod spiffe_workload_api_client { .ready() .await .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, + tonic::Status::unknown( format!("Service was not ready: {}", e.into()), ) })?; diff --git a/spiffe-proto/tests/bootstrap.rs b/spiffe-proto/tests/bootstrap.rs index 3aa90b3d3c..277606c757 100644 --- a/spiffe-proto/tests/bootstrap.rs +++ b/spiffe-proto/tests/bootstrap.rs @@ -28,7 +28,7 @@ fn generate(out_dir: &std::path::Path) { .emit_rerun_if_changed(false) .disable_package_emission() .out_dir(out_dir) - .compile(iface_files, &["."]) + .compile_protos(iface_files, &["."]) { panic!("failed to compile protobuf: {error}") } diff --git a/tools/Cargo.toml b/tools/Cargo.toml index e32cb390ca..7fc2d22e7b 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -6,6 +6,6 @@ license = "Apache-2.0" publish = false [dependencies.tonic-build] -version = "0.10" +workspace = true default-features = false features = ["prost"] diff --git a/tools/src/bin/gen-protos.rs b/tools/src/bin/gen-protos.rs index 4e85f3bffe..f25efe458c 100644 --- a/tools/src/bin/gen-protos.rs +++ b/tools/src/bin/gen-protos.rs @@ -1,8 +1,12 @@ +use std::path::{Path, PathBuf}; + fn main() { - let opencensus_dir = { - let manifest_dir = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); - manifest_dir.parent().unwrap().join("opencensus-proto") - }; + generate_opentelemetry_protos(); + generate_opencensus_protos(); +} + +fn generate_opencensus_protos() { + let opencensus_dir = get_proto_dir("opencensus-proto"); let out_dir = opencensus_dir.join("src").join("gen"); @@ -17,12 +21,39 @@ fn main() { ] }; + generate_protos(&out_dir, iface_files, &opencensus_dir); +} + +fn generate_opentelemetry_protos() { + let opentelemetry_dir = get_proto_dir("opentelemetry-proto"); + + let out_dir = opentelemetry_dir.join("src").join("gen"); + + let iface_files = { + let proto_dir = opentelemetry_dir.join("opentelemetry").join("proto"); + &[ + proto_dir.join("collector/trace/v1/trace_service.proto"), + proto_dir.join("common/v1/common.proto"), + proto_dir.join("resource/v1/resource.proto"), + proto_dir.join("trace/v1/trace.proto"), + ] + }; + + generate_protos(&out_dir, iface_files, &opentelemetry_dir); +} + +fn get_proto_dir(name: &str) -> PathBuf { + let manifest_dir = std::path::PathBuf::from(std::env!("CARGO_MANIFEST_DIR")); + manifest_dir.parent().unwrap().join(name) +} + +fn generate_protos(out_dir: &Path, iface_files: &[PathBuf], includes: &Path) { if let Err(error) = tonic_build::configure() .build_client(true) .build_server(false) .emit_rerun_if_changed(false) .out_dir(out_dir) - .compile(iface_files, &[opencensus_dir]) + .compile_protos(iface_files, &[includes]) { eprintln!("\nfailed to compile protos: {}", error); }