diff --git a/.github/containers/Dockerfile b/.github/containers/Dockerfile index 207332f3c0..3f370a4a45 100644 --- a/.github/containers/Dockerfile +++ b/.github/containers/Dockerfile @@ -115,7 +115,7 @@ RUN mv "${HOME}/.local/bin/python3.11" "${HOME}/.local/bin/pypy3.11" && \ mv "${HOME}/.local/bin/python3.10" "${HOME}/.local/bin/pypy3.10" # Install CPython versions -RUN uv python install -f cp3.14 cp3.13 cp3.12 cp3.11 cp3.10 cp3.9 cp3.8 +RUN uv python install -f cp3.14 cp3.14t cp3.13 cp3.12 cp3.11 cp3.10 cp3.9 cp3.8 # Set default Python version to CPython 3.13 RUN uv python install -f --default cp3.13 diff --git a/.github/workflows/build-ci-image.yml b/.github/workflows/build-ci-image.yml index 8d56ad35c9..ab183f48a2 100644 --- a/.github/workflows/build-ci-image.yml +++ b/.github/workflows/build-ci-image.yml @@ -97,7 +97,7 @@ jobs: touch "${{ runner.temp }}/digests/${digest#sha256:}" - name: Upload Digest - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 with: name: digests-${{ matrix.cache_tag }} path: ${{ runner.temp }}/digests/* @@ -114,7 +114,7 @@ jobs: steps: - name: Download Digests - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # 5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # 6.0.0 with: path: ${{ runner.temp }}/digests pattern: digests-* diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 2337ee8d40..8b469eaacb 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -81,7 +81,7 @@ jobs: platforms: arm64 - name: Build Wheels - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 + uses: pypa/cibuildwheel@9c00cb4f6b517705a3794b22395aedc36257242c # 3.2.1 env: CIBW_PLATFORM: auto CIBW_BUILD: "${{ matrix.wheel }}*" @@ -97,7 +97,7 @@ jobs: CIBW_TEST_SKIP: "*-win_arm64" - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 with: name: ${{ github.job }}-${{ matrix.wheel }} path: ./wheelhouse/*.whl @@ -134,7 +134,7 @@ jobs: openssl md5 -binary "dist/${tarball}" | xxd -p | tr -d '\n' > "dist/${md5_file}" - name: Upload Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 with: name: ${{ github.job }}-sdist path: | @@ -166,7 +166,7 @@ jobs: environment: ${{ matrix.pypi-instance }} steps: - - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # 5.0.0 + - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # 6.0.0 with: path: ./dist/ merge-multiple: true diff --git a/.github/workflows/mega-linter.yml b/.github/workflows/mega-linter.yml index 17e65ed47a..8f74866d43 100644 --- a/.github/workflows/mega-linter.yml +++ b/.github/workflows/mega-linter.yml @@ -68,7 +68,7 @@ jobs: # Upload MegaLinter artifacts - name: Archive production artifacts if: success() || failure() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 with: name: MegaLinter reports include-hidden-files: "true" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index fc51fcb08c..e9ef7b2d4e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -100,7 +100,7 @@ jobs: architecture: x64 - name: Download Coverage Artifacts - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # 5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # 6.0.0 with: pattern: coverage-* path: ./ @@ -134,7 +134,7 @@ jobs: architecture: x64 - name: Download Results Artifacts - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # 5.0.0 + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # 6.0.0 with: pattern: results-* path: ./ @@ -196,7 +196,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -206,7 +206,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -261,7 +261,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -271,7 +271,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -283,9 +283,8 @@ jobs: windows: env: TOTAL_GROUPS: 1 - UV_PYTHON: "3.13" - UV_PYTHON_DOWNLOADS: "never" - UV_PYTHON_PREFERENCE: "only-system" + UV_PYTHON_DOWNLOADS: "manual" + UV_PYTHON_PREFERENCE: "only-managed" strategy: fail-fast: false @@ -301,15 +300,13 @@ jobs: run: | git fetch --tags origin - - name: Install Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 - with: - python-version: | - 3.13 - 3.14 - - name: Install uv - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # 7.1.1 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # 7.1.2 + + - name: Install Python + run: | + uv python install -f 3.13 3.14 3.14t + uv python install -f --default 3.13 - name: Install Dependencies run: | @@ -333,7 +330,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -343,7 +340,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -355,9 +352,8 @@ jobs: windows_arm64: env: TOTAL_GROUPS: 1 - UV_PYTHON: "3.13" - UV_PYTHON_DOWNLOADS: "never" - UV_PYTHON_PREFERENCE: "only-system" + UV_PYTHON_DOWNLOADS: "manual" + UV_PYTHON_PREFERENCE: "only-managed" strategy: fail-fast: false @@ -373,15 +369,17 @@ jobs: run: | git fetch --tags origin - - name: Install Python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # 6.0.0 - with: - python-version: | - 3.13 - 3.14 - - name: Install uv - uses: astral-sh/setup-uv@2ddd2b9cb38ad8efd50337e8ab201519a34c9f24 # 7.1.1 + uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # 7.1.2 + + - name: Install Python + run: | + uv python install -f \ + cpython-3.13-windows-aarch64-none \ + cpython-3.14-windows-aarch64-none \ + cpython-3.14t-windows-aarch64-none + uv python install -f --default \ + cpython-3.13-windows-aarch64-none - name: Install Dependencies run: | @@ -405,7 +403,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -415,7 +413,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -475,7 +473,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -485,7 +483,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -558,7 +556,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -568,7 +566,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -638,7 +636,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -648,7 +646,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -719,7 +717,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -729,7 +727,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -804,7 +802,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -814,7 +812,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -869,7 +867,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -879,7 +877,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -959,7 +957,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -969,7 +967,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1037,7 +1035,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1047,7 +1045,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1115,7 +1113,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1125,7 +1123,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1193,7 +1191,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1203,7 +1201,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1276,7 +1274,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1286,7 +1284,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1359,7 +1357,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1369,7 +1367,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1438,7 +1436,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1448,7 +1446,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1519,7 +1517,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1529,7 +1527,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1599,7 +1597,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1609,7 +1607,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1679,7 +1677,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1689,7 +1687,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1758,7 +1756,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1768,7 +1766,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1836,7 +1834,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1846,7 +1844,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -1955,7 +1953,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -1965,7 +1963,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -2035,7 +2033,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -2045,7 +2043,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} @@ -2113,7 +2111,7 @@ jobs: FORCE_COLOR: "true" - name: Upload Coverage Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: coverage-${{ github.job }}-${{ strategy.job-index }} @@ -2123,7 +2121,7 @@ jobs: retention-days: 1 - name: Upload Results Artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # 4.6.2 + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # 5.0.0 if: always() with: name: results-${{ github.job }}-${{ strategy.job-index }} diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 2016fe2121..c373a38bb1 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -61,6 +61,6 @@ jobs: - name: Upload Trivy scan results to GitHub Security tab if: ${{ github.event_name == 'schedule' }} - uses: github/codeql-action/upload-sarif@16140ae1a102900babc80a33c44059580f687047 # 4.30.9 + uses: github/codeql-action/upload-sarif@0499de31b99561a6d14a36a5f662c2a54f91beee # 4.31.2 with: sarif_file: "trivy-results.sarif" diff --git a/newrelic/api/transaction.py b/newrelic/api/transaction.py index f90bbde143..f8a9f329f5 100644 --- a/newrelic/api/transaction.py +++ b/newrelic/api/transaction.py @@ -637,6 +637,7 @@ def __exit__(self, exc, value, tb): trace_id=self.trace_id, loop_time=self._loop_time, root=root_node, + partial_granularity_sampled=hasattr(self, "partial_granularity_sampled"), ) # Clear settings as we are all done and don't need it @@ -1073,23 +1074,52 @@ def _make_sampling_decision(self): return priority = self._priority sampled = self._sampled - _logger.debug( - "Full granularity tracing is enabled. Asking if full granularity wants to sample. priority=%s, sampled=%s", - priority, - sampled, - ) - computed_priority, computed_sampled = self._compute_sampled_and_priority( - priority, - sampled, - remote_parent_sampled_path="distributed_tracing.sampler.remote_parent_sampled", - remote_parent_sampled_setting=self.settings.distributed_tracing.sampler.remote_parent_sampled, - remote_parent_not_sampled_path="distributed_tracing.sampler.remote_parent_not_sampled", - remote_parent_not_sampled_setting=self.settings.distributed_tracing.sampler.remote_parent_not_sampled, - ) - _logger.debug("Full granularity sampling decision was %s with priority=%s.", sampled, priority) - self._priority = computed_priority - self._sampled = computed_sampled - self._sampling_decision_made = True + # Compute sampling decision for full granularity. + if self.settings.distributed_tracing.sampler.full_granularity.enabled: + _logger.debug( + "Full granularity tracing is enabled. Asking if full granularity wants to sample. priority=%s, sampled=%s", + priority, + sampled, + ) + computed_priority, computed_sampled = self._compute_sampled_and_priority( + priority, + sampled, + remote_parent_sampled_path="distributed_tracing.sampler.full_granularity.remote_parent_sampled", + remote_parent_sampled_setting=self.settings.distributed_tracing.sampler.full_granularity.remote_parent_sampled, + remote_parent_not_sampled_path="distributed_tracing.sampler.full_granularity.remote_parent_not_sampled", + remote_parent_not_sampled_setting=self.settings.distributed_tracing.sampler.full_granularity.remote_parent_not_sampled, + ) + _logger.debug("Full granularity sampling decision was %s with priority=%s.", sampled, priority) + if computed_sampled or not self.settings.distributed_tracing.sampler.partial_granularity.enabled: + self._priority = computed_priority + self._sampled = computed_sampled + self._sampling_decision_made = True + return + + # If full granularity is not going to sample, let partial granularity decide. + if self.settings.distributed_tracing.sampler.partial_granularity.enabled: + _logger.debug("Partial granularity tracing is enabled. Asking if partial granularity wants to sample.") + self._priority, self._sampled = self._compute_sampled_and_priority( + priority, + sampled, + remote_parent_sampled_path="distributed_tracing.sampler.partial_granularity.remote_parent_sampled", + remote_parent_sampled_setting=self.settings.distributed_tracing.sampler.partial_granularity.remote_parent_sampled, + remote_parent_not_sampled_path="distributed_tracing.sampler.partial_granularity.remote_parent_not_sampled", + remote_parent_not_sampled_setting=self.settings.distributed_tracing.sampler.partial_granularity.remote_parent_not_sampled, + ) + _logger.debug( + "Partial granularity sampling decision was %s with priority=%s.", self._sampled, self._priority + ) + self._sampling_decision_made = True + if self._sampled: + self.partial_granularity_sampled = True + return + + # This is only reachable if both full and partial granularity tracing are off. + # Set priority=0 and do not sample. This enables DT headers to still be sent + # even if the trace is never sampled. + self._priority = 0 + self._sampled = False def _freeze_path(self): if self._frozen_path is None: diff --git a/newrelic/config.py b/newrelic/config.py index 5367538695..41d118961f 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -319,6 +319,47 @@ def _process_setting(section, option, getter, mapper): _raise_configuration_error(section, option) +def _process_dt_setting(section, option_p1, option_p2, getter): + try: + # The type of a value is dictated by the getter + # function supplied. + + value1 = getattr(_config_object, getter)(section, option_p1) + value2 = getattr(_config_object, getter)(section, option_p2) + + # Now need to apply the option from the + # configuration file to the internal settings + # object. Walk the object path and assign it. + + target = _settings + fields = option_p1.split(".", 1) + + while True: + if len(fields) == 1: + value = value1 or value2 or "default" + setattr(target, fields[0], value) + break + target = getattr(target, fields[0]) + fields = fields[1].split(".", 1) + + # Cache the configuration so can be dumped out to + # log file when whole main configuration has been + # processed. This ensures that the log file and log + # level entries have been set. + + _cache_object.append((option_p1, value1)) + _cache_object.append((option_p2, value2)) + + except configparser.NoSectionError: + pass + + except configparser.NoOptionError: + pass + + except Exception: + _raise_configuration_error(section, option_p1) + + # Processing of all the settings for specified section except # for log file and log level which are applied separately to # ensure they are set as soon as possible. @@ -405,8 +446,23 @@ def _process_configuration(section): _process_setting(section, "distributed_tracing.enabled", "getboolean", None) _process_setting(section, "distributed_tracing.exclude_newrelic_header", "getboolean", None) _process_setting(section, "distributed_tracing.sampler.adaptive_sampling_target", "getint", None) - _process_setting(section, "distributed_tracing.sampler.remote_parent_sampled", "get", None) - _process_setting(section, "distributed_tracing.sampler.remote_parent_not_sampled", "get", None) + _process_dt_setting( + section, + "distributed_tracing.sampler.full_granularity.remote_parent_sampled", + "distributed_tracing.sampler.remote_parent_sampled", + "get", + ) + _process_dt_setting( + section, + "distributed_tracing.sampler.full_granularity.remote_parent_not_sampled", + "distributed_tracing.sampler.remote_parent_not_sampled", + "get", + ) + _process_setting(section, "distributed_tracing.sampler.full_granularity.enabled", "getboolean", None) + _process_setting(section, "distributed_tracing.sampler.partial_granularity.enabled", "getboolean", None) + _process_setting(section, "distributed_tracing.sampler.partial_granularity.type", "get", None) + _process_setting(section, "distributed_tracing.sampler.partial_granularity.remote_parent_sampled", "get", None) + _process_setting(section, "distributed_tracing.sampler.partial_granularity.remote_parent_not_sampled", "get", None) _process_setting(section, "span_events.enabled", "getboolean", None) _process_setting(section, "span_events.max_samples_stored", "getint", None) _process_setting(section, "span_events.attributes.enabled", "getboolean", None) diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index fbfc06b260..90690a573d 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -746,7 +746,12 @@ def shutdown_agent(self, timeout=None): self._harvest_thread.start() if self._harvest_thread.is_alive(): - self._harvest_thread.join(timeout) + try: + self._harvest_thread.join(timeout) + except RuntimeError: + # This can occur if the application is killed while in the harvest thread, + # causing shutdown_agent to be called from within the harvest thread. + pass def agent_instance(): diff --git a/newrelic/core/attribute.py b/newrelic/core/attribute.py index 785a2fa0ec..49bc890a80 100644 --- a/newrelic/core/attribute.py +++ b/newrelic/core/attribute.py @@ -109,6 +109,23 @@ "zeebe.client.resourceFile", } +SPAN_ENTITY_RELATIONSHIP_ATTRIBUTES = { + "cloud.account.id", + "cloud.platform", + "cloud.region", + "cloud.resource_id", + "db.instance", + "db.system", + "http.url", + "messaging.destination.name", + "messaging.system", + "peer.hostname", + "server.address", + "server.port", + "span.kind", +} + + MAX_NUM_USER_ATTRIBUTES = 128 MAX_ATTRIBUTE_LENGTH = 255 MAX_NUM_ML_USER_ATTRIBUTES = 64 diff --git a/newrelic/core/config.py b/newrelic/core/config.py index 4f813e4370..fe5c9b5872 100644 --- a/newrelic/core/config.py +++ b/newrelic/core/config.py @@ -337,6 +337,14 @@ class DistributedTracingSamplerSettings(Settings): pass +class DistributedTracingSamplerFullGranularitySettings(Settings): + pass + + +class DistributedTracingSamplerPartialGranularitySettings(Settings): + pass + + class ServerlessModeSettings(Settings): pass @@ -507,6 +515,8 @@ class EventHarvestConfigHarvestLimitSettings(Settings): _settings.debug = DebugSettings() _settings.distributed_tracing = DistributedTracingSettings() _settings.distributed_tracing.sampler = DistributedTracingSamplerSettings() +_settings.distributed_tracing.sampler.full_granularity = DistributedTracingSamplerFullGranularitySettings() +_settings.distributed_tracing.sampler.partial_granularity = DistributedTracingSamplerPartialGranularitySettings() _settings.error_collector = ErrorCollectorSettings() _settings.error_collector.attributes = ErrorCollectorAttributesSettings() _settings.event_harvest_config = EventHarvestConfigSettings() @@ -803,9 +813,9 @@ def default_otlp_host(host): _settings.compressed_content_encoding = "gzip" _settings.max_payload_size_in_bytes = 1000000 -_settings.attributes.enabled = True -_settings.attributes.exclude = [] -_settings.attributes.include = [] +_settings.attributes.enabled = _environ_as_bool("NEW_RELIC_ATTRIBUTES_ENABLED", default=True) +_settings.attributes.exclude = _environ_as_set(os.environ.get("NEW_RELIC_ATTRIBUTES_EXCLUDE", "")) +_settings.attributes.include = _environ_as_set(os.environ.get("NEW_RELIC_ATTRIBUTES_INCLUDE", "")) _settings.thread_profiler.enabled = True _settings.cross_application_tracer.enabled = False @@ -821,9 +831,15 @@ def default_otlp_host(host): _settings.event_harvest_config.harvest_limits.analytic_event_data = _environ_as_int( "NEW_RELIC_ANALYTICS_EVENTS_MAX_SAMPLES_STORED", default=DEFAULT_RESERVOIR_SIZE ) -_settings.transaction_events.attributes.enabled = True -_settings.transaction_events.attributes.exclude = [] -_settings.transaction_events.attributes.include = [] +_settings.transaction_events.attributes.enabled = _environ_as_bool( + "NEW_RELIC_TRANSACTION_EVENTS_ATTRIBUTES_ENABLED", default=True +) +_settings.transaction_events.attributes.exclude = _environ_as_set( + os.environ.get("NEW_RELIC_TRANSACTION_EVENTS_ATTRIBUTES_EXCLUDE", "") +) +_settings.transaction_events.attributes.include = _environ_as_set( + os.environ.get("NEW_RELIC_TRANSACTION_EVENTS_ATTRIBUTES_INCLUDE", "") +) _settings.custom_insights_events.enabled = True _settings.event_harvest_config.harvest_limits.custom_event_data = _environ_as_int( @@ -839,24 +855,49 @@ def default_otlp_host(host): _settings.distributed_tracing.sampler.adaptive_sampling_target = _environ_as_int( "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_ADAPTIVE_SAMPLING_TARGET", default=10 ) -_settings.distributed_tracing.sampler.remote_parent_sampled = os.environ.get( - "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_SAMPLED", "default" +_settings.distributed_tracing.sampler.full_granularity.enabled = _environ_as_bool( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_FULL_GRANULARITY_ENABLED", default=True +) +_settings.distributed_tracing.sampler.full_granularity.remote_parent_sampled = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_FULL_GRANULARITY_REMOTE_PARENT_SAMPLED", None +) or os.environ.get("NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_SAMPLED", "default") +_settings.distributed_tracing.sampler.full_granularity.remote_parent_not_sampled = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_FULL_GRANULARITY_REMOTE_PARENT_NOT_SAMPLED", None +) or os.environ.get("NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_NOT_SAMPLED", "default") +_settings.distributed_tracing.sampler.partial_granularity.enabled = _environ_as_bool( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_PARTIAL_GRANULARITY_ENABLED", default=False +) +_settings.distributed_tracing.sampler.partial_granularity.type = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_PARTIAL_GRANULARITY_TYPE", "essential" ) -_settings.distributed_tracing.sampler.remote_parent_not_sampled = os.environ.get( - "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_NOT_SAMPLED", "default" +_settings.distributed_tracing.sampler.partial_granularity.remote_parent_sampled = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_PARTIAL_GRANULARITY_REMOTE_PARENT_SAMPLED", "default" +) +_settings.distributed_tracing.sampler.partial_granularity.remote_parent_not_sampled = os.environ.get( + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_PARTIAL_GRANULARITY_REMOTE_PARENT_NOT_SAMPLED", "default" ) _settings.distributed_tracing.exclude_newrelic_header = False _settings.span_events.enabled = _environ_as_bool("NEW_RELIC_SPAN_EVENTS_ENABLED", default=True) _settings.event_harvest_config.harvest_limits.span_event_data = _environ_as_int( "NEW_RELIC_SPAN_EVENTS_MAX_SAMPLES_STORED", default=SPAN_EVENT_RESERVOIR_SIZE ) -_settings.span_events.attributes.enabled = True -_settings.span_events.attributes.exclude = [] -_settings.span_events.attributes.include = [] +_settings.span_events.attributes.enabled = _environ_as_bool("NEW_RELIC_SPAN_EVENTS_ATTRIBUTES_ENABLED", default=True) +_settings.span_events.attributes.exclude = _environ_as_set( + os.environ.get("NEW_RELIC_SPAN_EVENTS_ATTRIBUTES_EXCLUDE", "") +) +_settings.span_events.attributes.include = _environ_as_set( + os.environ.get("NEW_RELIC_SPAN_EVENTS_ATTRIBUTES_INCLUDE", "") +) -_settings.transaction_segments.attributes.enabled = True -_settings.transaction_segments.attributes.exclude = [] -_settings.transaction_segments.attributes.include = [] +_settings.transaction_segments.attributes.enabled = _environ_as_bool( + "NEW_RELIC_TRANSACTION_SEGMENTS_ATTRIBUTES_ENABLED", default=True +) +_settings.transaction_segments.attributes.exclude = _environ_as_set( + os.environ.get("NEW_RELIC_TRANSACTION_SEGMENTS_ATTRIBUTES_EXCLUDE", "") +) +_settings.transaction_segments.attributes.include = _environ_as_set( + os.environ.get("NEW_RELIC_TRANSACTION_SEGMENTS_ATTRIBUTES_INCLUDE", "") +) _settings.transaction_tracer.enabled = True _settings.transaction_tracer.transaction_threshold = None @@ -867,9 +908,15 @@ def default_otlp_host(host): _settings.transaction_tracer.function_trace = [] _settings.transaction_tracer.generator_trace = [] _settings.transaction_tracer.top_n = 20 -_settings.transaction_tracer.attributes.enabled = True -_settings.transaction_tracer.attributes.exclude = [] -_settings.transaction_tracer.attributes.include = [] +_settings.transaction_tracer.attributes.enabled = _environ_as_bool( + "NEW_RELIC_TRANSACTION_TRACER_ATTRIBUTES_ENABLED", default=True +) +_settings.transaction_tracer.attributes.exclude = _environ_as_set( + os.environ.get("NEW_RELIC_TRANSACTION_TRACER_ATTRIBUTES_EXCLUDE", "") +) +_settings.transaction_tracer.attributes.include = _environ_as_set( + os.environ.get("NEW_RELIC_TRANSACTION_TRACER_ATTRIBUTES_INCLUDE", "") +) _settings.error_collector.enabled = True _settings.error_collector.capture_events = True @@ -882,9 +929,15 @@ def default_otlp_host(host): ) _settings.error_collector.expected_status_codes = set() _settings.error_collector._error_group_callback = None -_settings.error_collector.attributes.enabled = True -_settings.error_collector.attributes.exclude = [] -_settings.error_collector.attributes.include = [] +_settings.error_collector.attributes.enabled = _environ_as_bool( + "NEW_RELIC_ERROR_COLLECTOR_ATTRIBUTES_ENABLED", default=True +) +_settings.error_collector.attributes.exclude = _environ_as_set( + os.environ.get("NEW_RELIC_ERROR_COLLECTOR_ATTRIBUTES_EXCLUDE", "") +) +_settings.error_collector.attributes.include = _environ_as_set( + os.environ.get("NEW_RELIC_ERROR_COLLECTOR_ATTRIBUTES_INCLUDE", "") +) _settings.browser_monitoring.enabled = True _settings.browser_monitoring.auto_instrument = True @@ -893,9 +946,15 @@ def default_otlp_host(host): _settings.browser_monitoring.debug = False _settings.browser_monitoring.ssl_for_http = None _settings.browser_monitoring.content_type = ["text/html"] -_settings.browser_monitoring.attributes.enabled = False -_settings.browser_monitoring.attributes.exclude = [] -_settings.browser_monitoring.attributes.include = [] +_settings.browser_monitoring.attributes.enabled = _environ_as_bool( + "NEW_RELIC_BROWSER_MONITORING_ATTRIBUTES_ENABLED", default=False +) +_settings.browser_monitoring.attributes.exclude = _environ_as_set( + os.environ.get("NEW_RELIC_BROWSER_MONITORING_ATTRIBUTES_EXCLUDE", "") +) +_settings.browser_monitoring.attributes.include = _environ_as_set( + os.environ.get("NEW_RELIC_BROWSER_MONITORING_ATTRIBUTES_INCLUDE", "") +) _settings.transaction_name.limit = None _settings.transaction_name.naming_scheme = os.environ.get("NEW_RELIC_TRANSACTION_NAMING_SCHEME") @@ -1332,6 +1391,16 @@ def apply_server_side_settings(server_side_config=None, settings=_settings): min(settings_snapshot.custom_insights_events.max_attribute_value, 4095), ) + # Partial granularity tracing is not available in infinite tracing mode. + if ( + settings_snapshot.infinite_tracing.enabled + and settings_snapshot.distributed_tracing.sampler.partial_granularity.enabled + ): + _logger.warning( + "Improper configuration. Infinite tracing cannot be enabled at the same time as partial granularity tracing. Setting distributed_tracing.sampler.partial_granularity.enabled=False." + ) + apply_config_setting(settings_snapshot, "distributed_tracing.sampler.partial_granularity.enabled", False) + # This will be removed at some future point # Special case for account_id which will be sent instead of # cross_process_id in the future diff --git a/newrelic/core/data_collector.py b/newrelic/core/data_collector.py index e481f1d6e7..c303fad90b 100644 --- a/newrelic/core/data_collector.py +++ b/newrelic/core/data_collector.py @@ -117,7 +117,14 @@ def send_ml_events(self, sampling_info, custom_event_data): def send_span_events(self, sampling_info, span_event_data): """Called to submit sample set for span events.""" - + # TODO: remove this later after list types are suported. + for span_event in span_event_data: + try: + ids = span_event[1].get("nr.ids") + if ids: + span_event[1]["nr.ids"] = ",".join(ids) + except: + pass payload = (self.agent_run_id, sampling_info, span_event_data) return self._protocol.send("span_event_data", payload) diff --git a/newrelic/core/database_node.py b/newrelic/core/database_node.py index 1f60add195..8e30e3fecf 100644 --- a/newrelic/core/database_node.py +++ b/newrelic/core/database_node.py @@ -279,7 +279,15 @@ def trace_node(self, stats, root, connections): start_time=start_time, end_time=end_time, name=name, params=params, children=children, label=None ) - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): sql = self.formatted if sql: @@ -288,4 +296,11 @@ def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dic self.agent_attributes["db.statement"] = sql - return super().span_event(settings, base_attrs=base_attrs, parent_guid=parent_guid, attr_class=attr_class) + return super().span_event( + settings, + base_attrs=base_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) diff --git a/newrelic/core/external_node.py b/newrelic/core/external_node.py index f47d634b3d..7251504bb1 100644 --- a/newrelic/core/external_node.py +++ b/newrelic/core/external_node.py @@ -169,7 +169,15 @@ def trace_node(self, stats, root, connections): start_time=start_time, end_time=end_time, name=name, params=params, children=children, label=None ) - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): self.agent_attributes["http.url"] = self.http_url i_attrs = (base_attrs and base_attrs.copy()) or attr_class() @@ -180,4 +188,11 @@ def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dic if self.method: _, i_attrs["http.method"] = attribute.process_user_attribute("http.method", self.method) - return super().span_event(settings, base_attrs=i_attrs, parent_guid=parent_guid, attr_class=attr_class) + return super().span_event( + settings, + base_attrs=i_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) diff --git a/newrelic/core/function_node.py b/newrelic/core/function_node.py index 588f675f31..2eab783ecc 100644 --- a/newrelic/core/function_node.py +++ b/newrelic/core/function_node.py @@ -114,8 +114,23 @@ def trace_node(self, stats, root, connections): start_time=start_time, end_time=end_time, name=name, params=params, children=children, label=self.label ) - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): i_attrs = (base_attrs and base_attrs.copy()) or attr_class() i_attrs["name"] = f"{self.group}/{self.name}" - return super().span_event(settings, base_attrs=i_attrs, parent_guid=parent_guid, attr_class=attr_class) + return super().span_event( + settings, + base_attrs=i_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) diff --git a/newrelic/core/loop_node.py b/newrelic/core/loop_node.py index 58d1b3a746..b562720a85 100644 --- a/newrelic/core/loop_node.py +++ b/newrelic/core/loop_node.py @@ -79,8 +79,23 @@ def trace_node(self, stats, root, connections): start_time=start_time, end_time=end_time, name=name, params=params, children=children, label=None ) - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): i_attrs = (base_attrs and base_attrs.copy()) or attr_class() i_attrs["name"] = f"EventLoop/Wait/{self.name}" - return super().span_event(settings, base_attrs=i_attrs, parent_guid=parent_guid, attr_class=attr_class) + return super().span_event( + settings, + base_attrs=i_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) diff --git a/newrelic/core/node_mixin.py b/newrelic/core/node_mixin.py index 9154cc8765..29f5bedbc1 100644 --- a/newrelic/core/node_mixin.py +++ b/newrelic/core/node_mixin.py @@ -49,7 +49,15 @@ def get_trace_segment_params(self, settings, params=None): _params["exclusive_duration_millis"] = 1000.0 * self.exclusive return _params - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): i_attrs = (base_attrs and base_attrs.copy()) or attr_class() i_attrs["type"] = "Span" i_attrs["name"] = i_attrs.get("name") or self.name @@ -68,18 +76,111 @@ def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dic u_attrs = attribute.resolve_user_attributes( self.processed_user_attributes, settings.attribute_filter, DST_SPAN_EVENTS, attr_class=attr_class ) + if not partial_granularity_sampled: + # intrinsics, user attrs, agent attrs + return [i_attrs, u_attrs, a_attrs] + else: + if ct_exit_spans is None: + ct_exit_spans = {} - # intrinsics, user attrs, agent attrs - return [i_attrs, u_attrs, a_attrs] - - def span_events(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): - yield self.span_event(settings, base_attrs=base_attrs, parent_guid=parent_guid, attr_class=attr_class) + partial_granularity_type = settings.distributed_tracing.sampler.partial_granularity.type + exit_span_attrs_present = attribute.SPAN_ENTITY_RELATIONSHIP_ATTRIBUTES & set(a_attrs) + # If this is the entry node or an LLM span always return it. + if i_attrs.get("nr.entryPoint") or i_attrs["name"].startswith("Llm/"): + if partial_granularity_type == "reduced": + return [i_attrs, u_attrs, a_attrs] + else: + return [i_attrs, {}, {}] + # If the span is not an exit span, skip it by returning None. + if not exit_span_attrs_present: + return None + # If the span is an exit span and we are in reduced mode (meaning no attribute dropping), + # just return the exit span as is. + if partial_granularity_type == "reduced": + return [i_attrs, u_attrs, a_attrs] + else: + a_minimized_attrs = attr_class({key: a_attrs[key] for key in exit_span_attrs_present}) + # If we are in essential mode return the span with minimized attributes. + if partial_granularity_type == "essential": + return [i_attrs, {}, a_minimized_attrs] + # If the span is an exit span but span compression (compact) is enabled, + # we need to check for uniqueness before returning it. + # Combine all the entity relationship attr values into a string to be + # used as the hash to check for uniqueness. + span_attrs = "".join([str(a_minimized_attrs[key]) for key in exit_span_attrs_present]) + new_exit_span = span_attrs not in ct_exit_spans + # If this is a new exit span, add it to the known ct_exit_spans and + # return it. + if new_exit_span: + # nr.ids is the list of span guids that share this unqiue exit span. + a_minimized_attrs["nr.ids"] = [] + a_minimized_attrs["nr.durations"] = self.duration + ct_exit_spans[span_attrs] = [i_attrs, a_minimized_attrs] + return [i_attrs, {}, a_minimized_attrs] + # If this is an exit span we've already seen, add it's guid to the list + # of ids on the seen span, compute the new duration & start time, and + # return None. + ct_exit_spans[span_attrs][1]["nr.ids"].append(self.guid) + # Max size for `nr.ids` = 1024. Max length = 63 (each span id is 16 bytes + 8 bytes for list type). + ct_exit_spans[span_attrs][1]["nr.ids"] = ct_exit_spans[span_attrs][1]["nr.ids"][:63] + # Compute the new start and end time for all compressed spans and use + # that to set the duration for all compressed spans. + current_start_time = ct_exit_spans[span_attrs][0]["timestamp"] + current_end_time = ( + ct_exit_spans[span_attrs][0]["timestamp"] / 1000 + ct_exit_spans[span_attrs][1]["nr.durations"] + ) + new_start_time = i_attrs["timestamp"] + new_end_time = i_attrs["timestamp"] / 1000 + i_attrs["duration"] + set_start_time = min(new_start_time, current_start_time) + # If the new span starts after the old span's end time or the new span + # ends before the current span starts; add the durations. + if current_end_time < new_start_time / 1000 or new_end_time < current_start_time / 1000: + set_duration = ct_exit_spans[span_attrs][1]["nr.durations"] + i_attrs["duration"] + # Otherwise, if the new and old span's overlap in time, use the newest + # end time and subtract the start time from it to calculate the new + # duration. + else: + set_duration = max(current_end_time, new_end_time) - set_start_time / 1000 + ct_exit_spans[span_attrs][0]["timestamp"] = set_start_time + ct_exit_spans[span_attrs][1]["nr.durations"] = set_duration + return None + def span_events( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): + span = self.span_event( + settings, + base_attrs=base_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) + parent_id = parent_guid + if span: # span will be None if the span is an inprocess span or repeated exit span. + yield span + # Compressed spans are always reparented onto the entry span. + if not settings.distributed_tracing.sampler.partial_granularity.type == "compact" or span[0].get( + "nr.entryPoint" + ): + parent_id = self.guid for child in self.children: - for event in child.span_events( # noqa: UP028 - settings, base_attrs=base_attrs, parent_guid=self.guid, attr_class=attr_class + for event in child.span_events( + settings, + base_attrs=base_attrs, + parent_guid=parent_id, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, ): - yield event + if event: # event will be None if the span is an inprocess span or repeated exit span. + yield event class DatastoreNodeMixin(GenericNodeMixin): @@ -108,7 +209,15 @@ def db_instance(self): self._db_instance = db_instance_attr return db_instance_attr - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): a_attrs = self.agent_attributes a_attrs["db.instance"] = self.db_instance i_attrs = (base_attrs and base_attrs.copy()) or attr_class() @@ -140,4 +249,11 @@ def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dic except Exception: pass - return super().span_event(settings, base_attrs=i_attrs, parent_guid=parent_guid, attr_class=attr_class) + return super().span_event( + settings, + base_attrs=i_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) diff --git a/newrelic/core/root_node.py b/newrelic/core/root_node.py index fa8b3de82b..72f1d392d7 100644 --- a/newrelic/core/root_node.py +++ b/newrelic/core/root_node.py @@ -37,7 +37,15 @@ class RootNode(_RootNode, GenericNodeMixin): - def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): + def span_event( + self, + settings, + base_attrs=None, + parent_guid=None, + attr_class=dict, + partial_granularity_sampled=False, + ct_exit_spans=None, + ): i_attrs = (base_attrs and base_attrs.copy()) or attr_class() i_attrs["transaction.name"] = self.path i_attrs["nr.entryPoint"] = True @@ -46,7 +54,14 @@ def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dic if self.tracing_vendors: i_attrs["tracingVendors"] = self.tracing_vendors - return super().span_event(settings, base_attrs=i_attrs, parent_guid=parent_guid, attr_class=attr_class) + return super().span_event( + settings, + base_attrs=i_attrs, + parent_guid=parent_guid, + attr_class=attr_class, + partial_granularity_sampled=partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) def trace_node(self, stats, root, connections): name = self.path diff --git a/newrelic/core/transaction_node.py b/newrelic/core/transaction_node.py index 34871d8b21..f1c9f1ea7a 100644 --- a/newrelic/core/transaction_node.py +++ b/newrelic/core/transaction_node.py @@ -98,6 +98,7 @@ "root_span_guid", "trace_id", "loop_time", + "partial_granularity_sampled", ], ) @@ -633,5 +634,12 @@ def span_events(self, settings, attr_class=dict): ("priority", self.priority), ) ) - - yield from self.root.span_events(settings, base_attrs, parent_guid=self.parent_span, attr_class=attr_class) + ct_exit_spans = {} + yield from self.root.span_events( + settings, + base_attrs, + parent_guid=self.parent_span, + attr_class=attr_class, + partial_granularity_sampled=self.partial_granularity_sampled, + ct_exit_spans=ct_exit_spans, + ) diff --git a/newrelic/hooks/database_aiomysql.py b/newrelic/hooks/database_aiomysql.py index 9a2f3d1d18..2cedcb40f9 100644 --- a/newrelic/hooks/database_aiomysql.py +++ b/newrelic/hooks/database_aiomysql.py @@ -78,6 +78,10 @@ async def _wrap_pool__acquire(wrapped, instance, args, kwargs): with FunctionTrace(name=callable_name(wrapped), terminal=True, rollup=rollup, source=wrapped): connection = await wrapped(*args, **kwargs) connection_kwargs = getattr(instance, "_conn_kwargs", {}) + + if hasattr(connection, "__wrapped__"): + return connection + return AsyncConnectionWrapper(connection, dbapi2_module, (((), connection_kwargs))) return _wrap_pool__acquire diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index d8c18b49db..39317ea752 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -394,7 +394,7 @@ def extract_bedrock_claude_model_request(request_body, bedrock_attrs): ] else: input_message_list = [{"role": "user", "content": request_body.get("prompt")}] - bedrock_attrs["request.max_tokens"] = request_body.get("max_tokens_to_sample") + bedrock_attrs["request.max_tokens"] = request_body.get("max_tokens_to_sample") or request_body.get("max_tokens") bedrock_attrs["request.temperature"] = request_body.get("temperature") bedrock_attrs["input_message_list"] = input_message_list @@ -406,7 +406,13 @@ def extract_bedrock_claude_model_response(response_body, bedrock_attrs): response_body = json.loads(response_body) role = response_body.get("role", "assistant") content = response_body.get("content") or response_body.get("completion") - output_message_list = [{"role": role, "content": content}] + + # For Claude Sonnet 3+ models, the content key holds a list with the type and text of the output + if isinstance(content, list): + output_message_list = [{"role": "assistant", "content": result.get("text")} for result in content] + else: + output_message_list = [{"role": role, "content": content}] + bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason") bedrock_attrs["output_message_list"] = output_message_list @@ -420,6 +426,7 @@ def extract_bedrock_claude_model_streaming_response(response_body, bedrock_attrs bedrock_attrs["output_message_list"] = [{"role": "assistant", "content": ""}] bedrock_attrs["output_message_list"][0]["content"] += content bedrock_attrs["response.choices.finish_reason"] = response_body.get("stop_reason") + return bedrock_attrs @@ -639,7 +646,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): # Determine extractor by model type for extractor_name, request_extractor, response_extractor, stream_extractor in MODEL_EXTRACTORS: # noqa: B007 - if model.startswith(extractor_name): + if extractor_name in model: break else: # Model was not found in extractor list @@ -1057,6 +1064,13 @@ def handle_chat_completion_event(transaction, bedrock_attrs): input_message_list = bedrock_attrs.get("input_message_list", []) output_message_list = bedrock_attrs.get("output_message_list", []) + + no_output_content = len(output_message_list) == 1 and not output_message_list[0].get("content", "") + + # This checks handles Sonnet 3+ models which report an additional empty input and empty output in streaming cases after the main content has been generated + if not input_message_list and no_output_content: + return + number_of_messages = ( len(input_message_list) + len(output_message_list) ) or None # If 0, attribute will be set to None and removed @@ -1374,6 +1388,7 @@ def wrap_serialize_to_request(wrapped, instance, args, kwargs): extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis", ), + ("kinesis", "describe_account_settings"): aws_function_trace("describe_account_settings", library="Kinesis"), ("kinesis", "describe_limits"): aws_function_trace("describe_limits", library="Kinesis"), ("kinesis", "describe_stream"): aws_function_trace( "describe_stream", extract_kinesis, extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis" @@ -1451,12 +1466,22 @@ def wrap_serialize_to_request(wrapped, instance, args, kwargs): ("kinesis", "untag_resource"): aws_function_trace( "untag_resource", extract_kinesis, extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis" ), + ("kinesis", "update_account_settings"): aws_function_trace("update_account_settings", library="Kinesis"), + ("kinesis", "update_max_record_size"): aws_function_trace( + "update_max_record_size", extract_kinesis, extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis" + ), ("kinesis", "update_shard_count"): aws_function_trace( "update_shard_count", extract_kinesis, extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis" ), ("kinesis", "update_stream_mode"): aws_function_trace( "update_stream_mode", extract_kinesis, extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis" ), + ("kinesis", "update_stream_warm_throughput"): aws_function_trace( + "update_stream_warm_throughput", + extract_kinesis, + extract_agent_attrs=extract_kinesis_agent_attrs, + library="Kinesis", + ), ("kinesis", "put_record"): aws_message_trace( "Produce", "Stream", extract_kinesis, extract_agent_attrs=extract_kinesis_agent_attrs, library="Kinesis" ), diff --git a/tests/agent_features/test_distributed_tracing.py b/tests/agent_features/test_distributed_tracing.py index 6548b17cf8..f11375a00b 100644 --- a/tests/agent_features/test_distributed_tracing.py +++ b/tests/agent_features/test_distributed_tracing.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +import asyncio import copy import json +import time import pytest import webtest @@ -24,6 +26,18 @@ from testing_support.validators.validate_transaction_event_attributes import validate_transaction_event_attributes from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics +from newrelic.api.function_trace import function_trace +from newrelic.common.object_wrapper import function_wrapper, transient_function_wrapper + +try: + from newrelic.core.infinite_tracing_pb2 import AttributeValue, Span +except: + AttributeValue = None + Span = None + +from testing_support.mock_external_http_server import MockExternalHTTPHResponseHeadersServer +from testing_support.validators.validate_span_events import check_value_equals, validate_span_events + from newrelic.api.application import application_instance from newrelic.api.background_task import BackgroundTask, background_task from newrelic.api.external_trace import ExternalTrace @@ -72,6 +86,110 @@ } +def validate_compact_span_event( + name, compressed_span_count, expected_nr_durations_low_bound, expected_nr_durations_high_bound +): + @function_wrapper + def _validate_wrapper(wrapped, instance, args, kwargs): + record_transaction_called = [] + recorded_span_events = [] + + @transient_function_wrapper("newrelic.core.stats_engine", "StatsEngine.record_transaction") + def capture_span_events(wrapped, instance, args, kwargs): + events = [] + + @transient_function_wrapper("newrelic.common.streaming_utils", "StreamBuffer.put") + def stream_capture(wrapped, instance, args, kwargs): + event = args[0] + events.append(event) + return wrapped(*args, **kwargs) + + record_transaction_called.append(True) + try: + result = stream_capture(wrapped)(*args, **kwargs) + except: + raise + else: + if not instance.settings.infinite_tracing.enabled: + events = [event for priority, seen_at, event in instance.span_events.pq] + + recorded_span_events.append(events) + + return result + + _new_wrapper = capture_span_events(wrapped) + val = _new_wrapper(*args, **kwargs) + assert record_transaction_called + captured_events = recorded_span_events.pop(-1) + + mismatches = [] + matching_span_events = 0 + + def _span_details(): + details = [ + f"matching_span_events={matching_span_events}", + f"mismatches={mismatches}", + f"captured_events={captured_events}", + ] + return "\n".join(details) + + for captured_event in captured_events: + if Span and isinstance(captured_event, Span): + intrinsics = captured_event.intrinsics + user_attrs = captured_event.user_attributes + agent_attrs = captured_event.agent_attributes + else: + intrinsics, _, agent_attrs = captured_event + + # Find the span by name. + if not check_value_equals(intrinsics, "name", name): + continue + assert check_value_length(agent_attrs, "nr.ids", compressed_span_count - 1, mismatches), _span_details() + assert check_value_between( + agent_attrs, + "nr.durations", + expected_nr_durations_low_bound, + expected_nr_durations_high_bound, + mismatches, + ), _span_details() + matching_span_events += 1 + + assert matching_span_events == 1, _span_details() + return val + + return _validate_wrapper + + +def check_value_between(dictionary, key, expected_min, expected_max, mismatches): + value = dictionary.get(key) + if AttributeValue and isinstance(value, AttributeValue): + for _, val in value.ListFields(): + if not (expected_min < val < expected_max): + mismatches.append(f"key: {key}, not {expected_min} < {val} < {expected_max}") + return False + return True + else: + if not (expected_min < value < expected_max): + mismatches.append(f"key: {key}, not {expected_min} < {value} < {expected_max}") + return False + return True + + +def check_value_length(dictionary, key, expected_length, mismatches): + value = dictionary.get(key) + if AttributeValue and isinstance(value, AttributeValue): + for _, val in value.ListFields(): + if len(val) != expected_length: + mismatches.append(f"key: {key}, not len({val}) == {expected_length}") + return False + return True + else: + if len(value) != expected_length: + mismatches.append(f"key: {key}, not len({value}) == {expected_length}") + return False + return True + + @wsgi_application() def target_wsgi_application(environ, start_response): status = "200 OK" @@ -468,8 +586,99 @@ def test_distributed_trace_remote_parent_sampling_decision_full_granularity( test_settings = _override_settings.copy() test_settings.update( { - "distributed_tracing.sampler.remote_parent_sampled": remote_parent_sampled_setting, - "distributed_tracing.sampler.remote_parent_not_sampled": remote_parent_not_sampled_setting, + "distributed_tracing.sampler.full_granularity.remote_parent_sampled": remote_parent_sampled_setting, + "distributed_tracing.sampler.full_granularity.remote_parent_not_sampled": remote_parent_not_sampled_setting, + "span_events.enabled": True, + } + ) + if expected_adaptive_sampling_algo_called: + function_called_decorator = validate_function_called( + "newrelic.core.adaptive_sampler", "AdaptiveSampler.compute_sampled" + ) + else: + function_called_decorator = validate_function_not_called( + "newrelic.core.adaptive_sampler", "AdaptiveSampler.compute_sampled" + ) + + @function_called_decorator + @override_application_settings(test_settings) + @validate_attributes_complete("intrinsic", required_intrinsics) + @background_task(name="test_distributed_trace_attributes") + def _test(): + txn = current_transaction() + + if traceparent_sampled is not None: + headers = { + "traceparent": f"00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-{int(traceparent_sampled):02x}", + "newrelic": '{"v":[0,1],"d":{"ty":"Mobile","ac":"123","ap":"51424","id":"5f474d64b9cc9b2a","tr":"6e2fea0b173fdad0","pr":0.1234,"sa":true,"ti":1482959525577,"tx":"27856f70d3d314b7"}}', # This header should be ignored. + } + if newrelic_sampled is not None: + headers["tracestate"] = ( + f"1@nr=0-0-1-2827902-0af7651916cd43dd-00f067aa0ba902b7-{int(newrelic_sampled)}-1.23456-1518469636035" + ) + else: + headers = { + "newrelic": '{"v":[0,1],"d":{"ty":"Mobile","ac":"1","ap":"51424","id":"00f067aa0ba902b7","tr":"0af7651916cd43dd8448eb211c80319c","pr":0.1234,"sa":%s,"ti":1482959525577,"tx":"0af7651916cd43dd"}}' + % (str(newrelic_sampled).lower()) + } + accept_distributed_trace_headers(headers) + + _test() + + +@pytest.mark.parametrize( + "traceparent_sampled,newrelic_sampled,remote_parent_sampled_setting,remote_parent_not_sampled_setting,expected_sampled,expected_priority,expected_adaptive_sampling_algo_called", + ( + (True, None, "default", "default", None, None, True), # Uses adaptive sampling algo. + (True, None, "always_on", "default", True, 2, False), # Always sampled. + (True, None, "always_off", "default", False, 0, False), # Never sampled. + (False, None, "default", "default", None, None, True), # Uses adaptive sampling algo. + (False, None, "always_on", "default", None, None, True), # Uses adaptive sampling alog. + (False, None, "always_off", "default", None, None, True), # Uses adaptive sampling algo. + (True, None, "default", "always_on", None, None, True), # Uses adaptive sampling algo. + (True, None, "default", "always_off", None, None, True), # Uses adaptive sampling algo. + (False, None, "default", "always_on", True, 2, False), # Always sampled. + (False, None, "default", "always_off", False, 0, False), # Never sampled. + (True, True, "default", "default", True, 1.23456, False), # Uses sampling decision in W3C TraceState header. + (True, False, "default", "default", False, 1.23456, False), # Uses sampling decision in W3C TraceState header. + (False, False, "default", "default", False, 1.23456, False), # Uses sampling decision in W3C TraceState header. + (True, False, "always_on", "default", True, 2, False), # Always sampled. + (True, True, "always_off", "default", False, 0, False), # Never sampled. + (False, False, "default", "always_on", True, 2, False), # Always sampled. + (False, True, "default", "always_off", False, 0, False), # Never sampled. + (None, True, "default", "default", True, 0.1234, False), # Uses sampling and priority from newrelic header. + (None, True, "always_on", "default", True, 2, False), # Always sampled. + (None, True, "always_off", "default", False, 0, False), # Never sampled. + (None, False, "default", "default", False, 0.1234, False), # Uses sampling and priority from newrelic header. + (None, False, "always_on", "default", False, 0.1234, False), # Uses sampling and priority from newrelic header. + (None, True, "default", "always_on", True, 0.1234, False), # Uses sampling and priority from newrelic header. + (None, False, "default", "always_on", True, 2, False), # Always sampled. + (None, False, "default", "always_off", False, 0, False), # Never sampled. + (None, None, "default", "default", None, None, True), # Uses adaptive sampling algo. + ), +) +def test_distributed_trace_remote_parent_sampling_decision_partial_granularity( + traceparent_sampled, + newrelic_sampled, + remote_parent_sampled_setting, + remote_parent_not_sampled_setting, + expected_sampled, + expected_priority, + expected_adaptive_sampling_algo_called, +): + required_intrinsics = [] + if expected_sampled is not None: + required_intrinsics.append(Attribute(name="sampled", value=expected_sampled, destinations=0b110)) + if expected_priority is not None: + required_intrinsics.append(Attribute(name="priority", value=expected_priority, destinations=0b110)) + + test_settings = _override_settings.copy() + test_settings.update( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": remote_parent_sampled_setting, + "distributed_tracing.sampler.partial_granularity.remote_parent_not_sampled": remote_parent_not_sampled_setting, "span_events.enabled": True, } ) @@ -506,3 +715,320 @@ def _test(): accept_distributed_trace_headers(headers) _test() + + +@pytest.mark.parametrize( + "full_granularity_enabled,full_granularity_remote_parent_sampled_setting,partial_granularity_enabled,partial_granularity_remote_parent_sampled_setting,expected_sampled,expected_priority,expected_adaptive_sampling_algo_called", + ( + (True, "always_off", True, "adaptive", None, None, True), # Uses adaptive sampling algo. + (True, "always_on", True, "adaptive", True, 2, False), # Uses adaptive sampling algo. + (False, "always_on", False, "adaptive", False, 0, False), # Uses adaptive sampling algo. + ), +) +def test_distributed_trace_remote_parent_sampling_decision_between_full_and_partial_granularity( + full_granularity_enabled, + full_granularity_remote_parent_sampled_setting, + partial_granularity_enabled, + partial_granularity_remote_parent_sampled_setting, + expected_sampled, + expected_priority, + expected_adaptive_sampling_algo_called, +): + required_intrinsics = [] + if expected_sampled is not None: + required_intrinsics.append(Attribute(name="sampled", value=expected_sampled, destinations=0b110)) + if expected_priority is not None: + required_intrinsics.append(Attribute(name="priority", value=expected_priority, destinations=0b110)) + + test_settings = _override_settings.copy() + test_settings.update( + { + "distributed_tracing.sampler.full_granularity.enabled": full_granularity_enabled, + "distributed_tracing.sampler.partial_granularity.enabled": partial_granularity_enabled, + "distributed_tracing.sampler.full_granularity.remote_parent_sampled": full_granularity_remote_parent_sampled_setting, + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": partial_granularity_remote_parent_sampled_setting, + "span_events.enabled": True, + } + ) + if expected_adaptive_sampling_algo_called: + function_called_decorator = validate_function_called( + "newrelic.core.adaptive_sampler", "AdaptiveSampler.compute_sampled" + ) + else: + function_called_decorator = validate_function_not_called( + "newrelic.core.adaptive_sampler", "AdaptiveSampler.compute_sampled" + ) + + @function_called_decorator + @override_application_settings(test_settings) + @validate_attributes_complete("intrinsic", required_intrinsics) + @background_task(name="test_distributed_trace_attributes") + def _test(): + headers = {"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"} + accept_distributed_trace_headers(headers) + + _test() + + +def test_partial_granularity_max_compressed_spans(): + """ + Tests `nr.ids` does not exceed 1024 byte limit. + """ + + async def test(index): + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace: + time.sleep(0.1) + + @function_trace() + async def call_tests(): + tasks = [test(i) for i in range(65)] + await asyncio.gather(*tasks) + + @validate_span_events( + count=1, # Entry span. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_max_compressed_spans.._test" + }, + expected_intrinsics=["duration", "timestamp"], + ) + @validate_span_events( + count=1, # 1 external compressed span. + exact_intrinsics={"name": "External/localhost:3000/requests/GET"}, + exact_agents={"http.url": "http://localhost:3000/"}, + expected_agents=["nr.durations", "nr.ids"], + ) + @validate_compact_span_event( + name="External/localhost:3000/requests/GET", + # `nr.ids` can only hold 63 ids but duration reflects all compressed spans. + compressed_span_count=64, + expected_nr_durations_low_bound=6.5, + expected_nr_durations_high_bound=6.8, # 64 of these adds > .2 overhead. + ) + @background_task() + def _test(): + headers = {"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"} + accept_distributed_trace_headers(headers) + asyncio.run(call_tests()) + + _test = override_application_settings( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.type": "compact", + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": "always_on", + "span_events.enabled": True, + } + )(_test) + + _test() + + +def test_partial_granularity_compressed_span_attributes_in_series(): + """ + Tests compressed span attributes when compressed span times are serial. + Aka: each span ends before the next compressed span begins. + """ + + async def test(index): + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace: + time.sleep(0.1) + + @function_trace() + async def call_tests(): + tasks = [test(i) for i in range(3)] + await asyncio.gather(*tasks) + + @validate_span_events( + count=1, # Entry span. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_compressed_span_attributes_in_series.._test" + }, + expected_intrinsics=["duration", "timestamp"], + ) + @validate_span_events( + count=1, # 1 external compressed span. + exact_intrinsics={"name": "External/localhost:3000/requests/GET"}, + exact_agents={"http.url": "http://localhost:3000/"}, + expected_agents=["nr.durations", "nr.ids"], + ) + @validate_compact_span_event( + name="External/localhost:3000/requests/GET", + compressed_span_count=3, + expected_nr_durations_low_bound=0.3, + expected_nr_durations_high_bound=0.4, + ) + @background_task() + def _test(): + headers = {"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"} + accept_distributed_trace_headers(headers) + asyncio.run(call_tests()) + + _test = override_application_settings( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.type": "compact", + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": "always_on", + "span_events.enabled": True, + } + )(_test) + + _test() + + +def test_partial_granularity_compressed_span_attributes_overlapping(): + """ + Tests compressed span attributes when compressed span times overlap. + Aka: the next span begins in the middle of the first span. + """ + + @validate_span_events( + count=1, # Entry span. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_compressed_span_attributes_overlapping.._test" + }, + expected_intrinsics=["duration", "timestamp"], + ) + @validate_span_events( + count=1, # 1 external compressed span. + exact_intrinsics={"name": "External/localhost:3000/requests/GET"}, + exact_agents={"http.url": "http://localhost:3000/"}, + expected_agents=["nr.durations", "nr.ids"], + ) + @validate_compact_span_event( + name="External/localhost:3000/requests/GET", + compressed_span_count=2, + expected_nr_durations_low_bound=0.1, + expected_nr_durations_high_bound=0.2, + ) + @background_task() + def _test(): + headers = {"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"} + accept_distributed_trace_headers(headers) + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace1: + # Override terminal_node so we can create a nested exit span. + trace1.terminal_node = lambda: False + trace2 = ExternalTrace("requests", "http://localhost:3000/", method="GET") + trace2.__enter__() + time.sleep(0.1) + trace2.__exit__(None, None, None) + + _test = override_application_settings( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.type": "compact", + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": "always_on", + "span_events.enabled": True, + } + )(_test) + + _test() + + +def test_partial_granularity_reduced_span_attributes(): + """ + In reduced mode, only inprocess spans are dropped. + """ + + @function_trace() + def foo(): + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace: + trace.add_custom_attribute("custom", "bar") + + @validate_span_events( + count=1, # Entry span. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_reduced_span_attributes.._test" + }, + expected_intrinsics=["duration", "timestamp"], + expected_agents=["code.function", "code.lineno", "code.namespace"], + ) + @validate_span_events( + count=0, # Function foo span should not be present. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_reduced_span_attributes..foo" + }, + expected_intrinsics=["duration", "timestamp"], + ) + @validate_span_events( + count=2, # 2 external spans. + exact_intrinsics={"name": "External/localhost:3000/requests/GET"}, + exact_agents={"http.url": "http://localhost:3000/"}, + exact_users={"custom": "bar"}, + ) + @background_task() + def _test(): + headers = {"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"} + accept_distributed_trace_headers(headers) + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace: + # Override terminal_node so we can create a nested exit span. + trace.terminal_node = lambda: False + trace.add_custom_attribute("custom", "bar") + foo() + + _test = override_application_settings( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.type": "reduced", + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": "always_on", + "span_events.enabled": True, + } + )(_test) + + _test() + + +def test_partial_granularity_essential_span_attributes(): + """ + In essential mode, inprocess spans are dropped and non-entity synthesis attributes. + """ + + @function_trace() + def foo(): + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace: + trace.add_custom_attribute("custom", "bar") + + @validate_span_events( + count=1, # Entry span. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_essential_span_attributes.._test" + }, + expected_intrinsics=["duration", "timestamp"], + unexpected_agents=["code.function", "code.lineno", "code.namespace"], + ) + @validate_span_events( + count=0, # Function foo span should not be present. + exact_intrinsics={ + "name": "Function/test_distributed_tracing:test_partial_granularity_essential_span_attributes..foo" + }, + expected_intrinsics=["duration", "timestamp"], + ) + @validate_span_events( + count=2, # 2 external spans. + exact_intrinsics={"name": "External/localhost:3000/requests/GET"}, + exact_agents={"http.url": "http://localhost:3000/"}, + unexpected_users=["custom"], + ) + @background_task() + def _test(): + headers = {"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"} + accept_distributed_trace_headers(headers) + with ExternalTrace("requests", "http://localhost:3000/", method="GET") as trace: + # Override terminal_node so we can create a nested exit span. + trace.terminal_node = lambda: False + trace.add_custom_attribute("custom", "bar") + foo() + + _test = override_application_settings( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.type": "essential", + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": "always_on", + "span_events.enabled": True, + } + )(_test) + + _test() diff --git a/tests/agent_unittests/test_agent_protocol.py b/tests/agent_unittests/test_agent_protocol.py index 7a2011d00c..8d9e353978 100644 --- a/tests/agent_unittests/test_agent_protocol.py +++ b/tests/agent_unittests/test_agent_protocol.py @@ -467,7 +467,7 @@ def test_connect( # Verify that agent settings sent have converted null, containers, and # unserializable types to string assert agent_settings_payload["proxy_host"] == "None" - assert agent_settings_payload["attributes.include"] == "[]" + assert agent_settings_payload["attributes.include"] == str(set()) assert agent_settings_payload["feature_flag"] == str(set()) assert isinstance(agent_settings_payload["attribute_filter"], str) diff --git a/tests/agent_unittests/test_distributed_tracing_settings.py b/tests/agent_unittests/test_distributed_tracing_settings.py index a1c99da58d..3668cfbe32 100644 --- a/tests/agent_unittests/test_distributed_tracing_settings.py +++ b/tests/agent_unittests/test_distributed_tracing_settings.py @@ -14,6 +14,8 @@ import pytest +from newrelic.core.config import finalize_application_settings + INI_FILE_EMPTY = b""" [newrelic] """ @@ -30,3 +32,35 @@ def test_distributed_trace_setings(ini, env, expected_format, global_settings): settings = global_settings() assert settings.distributed_tracing.exclude_newrelic_header == expected_format + + +@pytest.mark.parametrize( + "ini,env", + ( + ( + INI_FILE_EMPTY, + { + "NEW_RELIC_ENABLED": "true", + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_SAMPLED": "default", + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_NOT_SAMPLED": "default", + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_FULL_GRANULARITY_REMOTE_PARENT_SAMPLED": "always_on", + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_FULL_GRANULARITY_REMOTE_PARENT_NOT_SAMPLED": "always_off", + }, + ), + ( + INI_FILE_EMPTY, + { + "NEW_RELIC_ENABLED": "true", + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_SAMPLED": "always_on", + "NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_REMOTE_PARENT_NOT_SAMPLED": "always_off", + }, + ), + ), +) +def test_full_granularity_precedence(ini, env, global_settings): + settings = global_settings() + + app_settings = finalize_application_settings(settings=settings) + + assert app_settings.distributed_tracing.sampler.full_granularity.remote_parent_sampled == "always_on" + assert app_settings.distributed_tracing.sampler.full_granularity.remote_parent_not_sampled == "always_off" diff --git a/tests/agent_unittests/test_harvest_loop.py b/tests/agent_unittests/test_harvest_loop.py index 9717e956ba..8447b18eb5 100644 --- a/tests/agent_unittests/test_harvest_loop.py +++ b/tests/agent_unittests/test_harvest_loop.py @@ -166,6 +166,7 @@ def transaction_node(request): root_span_guid=None, trace_id="4485b89db608aece", loop_time=0.0, + partial_granularity_sampled=False, ) return node diff --git a/tests/agent_unittests/test_infinite_trace_settings.py b/tests/agent_unittests/test_infinite_trace_settings.py index 4b47a72398..31c8e6819e 100644 --- a/tests/agent_unittests/test_infinite_trace_settings.py +++ b/tests/agent_unittests/test_infinite_trace_settings.py @@ -14,6 +14,8 @@ import pytest +from newrelic.core.config import finalize_application_settings + INI_FILE_EMPTY = b""" [newrelic] """ @@ -77,3 +79,18 @@ def test_infinite_tracing_port(ini, env, expected_port, global_settings): def test_infinite_tracing_span_queue_size(ini, env, expected_size, global_settings): settings = global_settings() assert settings.infinite_tracing.span_queue_size == expected_size + + +@pytest.mark.parametrize( + "ini,env", + ((INI_FILE_INFINITE_TRACING, {"NEW_RELIC_DISTRIBUTED_TRACING_SAMPLER_PARTIAL_GRANULARITY_ENABLED": "true"}),), +) +def test_partial_granularity_dissabled_when_infinite_tracing_enabled(ini, env, global_settings): + settings = global_settings() + assert settings.distributed_tracing.sampler.partial_granularity.enabled + assert settings.infinite_tracing.enabled + + app_settings = finalize_application_settings(settings=settings) + + assert not app_settings.distributed_tracing.sampler.partial_granularity.enabled + assert app_settings.infinite_tracing.enabled diff --git a/tests/datastore_aiomysql/test_database.py b/tests/datastore_aiomysql/test_database.py index 20d1a48586..8cc386cfe1 100644 --- a/tests/datastore_aiomysql/test_database.py +++ b/tests/datastore_aiomysql/test_database.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import inspect + import aiomysql from testing_support.db_settings import mysql_settings from testing_support.util import instance_hostname @@ -150,3 +152,35 @@ async def _test(): await pool.wait_closed() loop.run_until_complete(_test()) + + +@background_task() +def test_connection_pool_no_double_wrap(loop): + async def _test(): + pool = await aiomysql.create_pool( + db=DB_SETTINGS["name"], + user=DB_SETTINGS["user"], + password=DB_SETTINGS["password"], + host=DB_SETTINGS["host"], + port=DB_SETTINGS["port"], + loop=loop, + ) + + # Retrieve the same connection from the pool twice to see if it gets double wrapped + async with pool.acquire() as first_connection: + first_connection_unwrapped = inspect.unwrap(first_connection) + async with pool.acquire() as second_connection: + second_connection_unwrapped = inspect.unwrap(second_connection) + + # Ensure we actually retrieved the same underlying connection object from the pool twice + assert first_connection_unwrapped is second_connection_unwrapped, "Did not get same connection from pool" + + # Check that wrapping occurred only once + assert hasattr(first_connection, "__wrapped__"), "first_connection object was not wrapped" + assert hasattr(second_connection, "__wrapped__"), "second_connection object was not wrapped" + assert not hasattr(second_connection.__wrapped__, "__wrapped__"), "second_connection was double wrapped" + + pool.close() + await pool.wait_closed() + + loop.run_until_complete(_test()) diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py b/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py index 65cb276c77..e02cc5b543 100644 --- a/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py +++ b/tests/external_aiobotocore/test_bedrock_chat_completion_invoke_model.py @@ -73,6 +73,7 @@ def request_streaming(request): "amazon.titan-text-express-v1", "ai21.j2-mid-v1", "anthropic.claude-instant-v1", + "anthropic.claude-3-sonnet-20240229-v1:0", "meta.llama2-13b-chat-v1", "mistral.mistral-7b-instruct-v0:2", ], diff --git a/tests/external_botocore/_mock_external_bedrock_server_invoke_model.py b/tests/external_botocore/_mock_external_bedrock_server_invoke_model.py index 5114a251fd..6dd1fbaac0 100644 --- a/tests/external_botocore/_mock_external_bedrock_server_invoke_model.py +++ b/tests/external_botocore/_mock_external_bedrock_server_invoke_model.py @@ -184,6 +184,27 @@ "0000009b0000004b22fa51700b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a64473977496977696157356b5a5867694f6a4239227dc0567ebe", ], ], + "anthropic.claude-3-sonnet-20240229-v1%3A0::The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff", + }, + 200, + [ + "000002280000004b385582bf0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56397a6447467964434973496d316c63334e685a3255694f6e7369615751694f694a7463326466596d5279613138774d544e765a4552425157646a61316c4d6548677a4d545a6e56484e68615651694c434a306558426c496a6f696257567a6332466e5a534973496e4a76624755694f694a6863334e7063335268626e51694c434a746232526c62434936496d4e735958566b5a53307a4c54637463323975626d56304c5449774d6a55774d6a45354969776959323975644756756443493657313073496e4e3062334266636d566863323975496a7075645778734c434a7a6447397758334e6c6358566c626d4e6c496a7075645778734c434a316332466e5a53493665794a70626e4231644639306232746c626e4d694f6a55334c434a6a59574e6f5a56396a636d566864476c76626c3970626e4231644639306232746c626e4d694f6a4173496d4e685932686c58334a6c595752666157357764585266644739725a57357a496a6f774c434a766458527764585266644739725a57357a496a6f7866583139222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a41424344227d61e93a7f000001090000004b4260f6a50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a6447467964434973496d6c755a475634496a6f774c434a6a623235305a57353058324a7362324e72496a7037496e5235634755694f694a305a58683049697769644756346443493649694a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051227d389cc5b50000010b0000004b38a0a5c50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694a4a496e3139222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f5051525354555657227d44a91993", + "0000012f0000004b0ce12c010b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496e625342795a57466b655342306279426f59585a6c494745696658303d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a303132333435363738227d800aab0f", + "000001250000004b465134a00b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949675a6e4a705a57356b62486b6759323975646d56796332463061573975494746755a434277636d39326157526c496e3139222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445227d802cf867", + "0000012e0000004b318105b10b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949675a47563059576c735a575167636d567a634739756332567a49474a686332566b494739754947313549477475623364735a57526e5a534a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142227da34e7683", + "000001040000004bbaf032140b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69497549456c6d49486c7664534268633273696658303d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a227d6c7c28fc", + "0000011e0000004b90a0bd370b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496762575567633239745a58526f6157356e49456b675a4739754a33516761323576647977696658303d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243444546227d3d05ad95", + "000001010000004b7210bd640b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496753536473624342695a53423163475a79623235304947466962335630496e3139222c2270223a226162636465666768696a6b227d54598ee9", + "000001240000004b7b311d100b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496764476868644334675632686864434233623356735a4342356233556762476c725a53423062794a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748227d2ed8f3e1", + "000001110000004b12f02ae60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949675a476c7a5933567a6379423062325268655438696658303d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a414243444546474849227d3a079dac", + "000000da0000004b476920890b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a64473977496977696157356b5a5867694f6a4239222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a30313233227dfb6c29f4", + "000001150000004be7708c260b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56396b5a57783059534973496d526c62485268496a7037496e4e3062334266636d566863323975496a6f695a57356b58335231636d34694c434a7a6447397758334e6c6358566c626d4e6c496a7075645778736653776964584e685a3255694f6e736962335630634856305833527661325675637949364e445a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f70717273227d4edf2495", + "000001750000004b7e42fb6b0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56397a644739774969776959573168656d39754c574a6c5a484a76593273746157353262324e6864476c76626b316c64484a7059334d694f6e736961573577645852556232746c626b4e7664573530496a6f314e7977696233563063485630564739725a57354462335675644349364e445973496d6c75646d396a595852706232354d5958526c626d4e35496a6f784e546b774c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f324d444a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a30313233343536227d62c2f995", + ], + ], "meta.llama2-13b-chat-v1::[INST] The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ { "Content-Type": "application/vnd.amazon.eventstream", @@ -385,6 +406,23 @@ "0000016b0000004ba192d2880b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a6a623231776247563061573975496a6f694969776963335276634639795a57467a623234694f694a7a6447397758334e6c6358566c626d4e6c496977696333527663434936496c78755847354964573168626a6f694c434a686257463662323474596d566b636d396a61793170626e5a76593246306157397554575630636d6c6a6379493665794a70626e4231644652766132567551323931626e51694f6a45354c434a7664585277645852556232746c626b4e7664573530496a6f354f5377696157353262324e6864476c76626b78686447567559336b694f6a45314e7a4173496d5a70636e4e30516e6c305a5578686447567559336b694f6a51784d583139227d9a4fc171", ], ], + "anthropic.claude-3-sonnet-20240229-v1%3A0::What is 212 degrees Fahrenheit converted to Celsius?": [ + { + "Content-Type": "application/vnd.amazon.eventstream", + "x-amzn-RequestId": "1efe6197-80f9-43a6-89a5-bb536c1b822f", + }, + 200, + [ + "000002180000004b99743a390b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56397a6447467964434973496d316c63334e685a3255694f6e7369615751694f694a7463326466596d5279613138774d5574335a6c68345a4759345a474e52656b7734616c5a584e30704961554d694c434a306558426c496a6f696257567a6332466e5a534973496e4a76624755694f694a6863334e7063335268626e51694c434a746232526c62434936496d4e735958566b5a53307a4c54637463323975626d56304c5449774d6a55774d6a45354969776959323975644756756443493657313073496e4e3062334266636d566863323975496a7075645778734c434a7a6447397758334e6c6358566c626d4e6c496a7075645778734c434a316332466e5a53493665794a70626e4231644639306232746c626e4d694f6a49784c434a6a59574e6f5a56396a636d566864476c76626c3970626e4231644639306232746c626e4d694f6a4173496d4e685932686c58334a6c595752666157357764585266644739725a57357a496a6f774c434a766458527764585266644739725a57357a496a6f3066583139222c2270223a226162636465666768696a6b6c6d6e227d3d1a346f000000e80000004b9c88cb6f0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a6447467964434973496d6c755a475634496a6f774c434a6a623235305a57353058324a7362324e72496a7037496e5235634755694f694a305a58683049697769644756346443493649694a3966513d3d222c2270223a226162636465666768696a227d9956467b000001110000004b12f02ae60b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949794d5449675a47566e636d566c63794a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d227de0817863", + "0000013f0000004b6c01bb830b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f694967526d466f636d567561475670644342706379426c63585670646d46735a57353049485276494445774d434a3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031323334227d4d984565", + "000001300000004bee512c520b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949675a47566e636d566c637942445a57787a6158567a4c6c78755847355561476c7a496e3139222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031227dcde98f72", + "000001380000004bde2167930b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f69496761584d676447686c49474a7661577870626d636763473970626e516762325967643246305a5849696658303d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031227d704e3e29", + "000001200000004b8eb1bbd00b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131396b5a57783059534973496d6c755a475634496a6f774c434a6b5a5778305953493665794a306558426c496a6f69644756346446396b5a57783059534973496e526c654851694f6949675958516763335268626d5268636d51675958527462334e776147567961574d6763484a6c63334e31636d5575496e3139222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a227d878c94d4", + "000000b40000004b616be9a50b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f695932397564475675644639696247396a6131397a64473977496977696157356b5a5867694f6a4239222c2270223a226162636465666768696a6b6c6d6e6f707172227d819037fa", + "000001350000004b26b1a3220b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56396b5a57783059534973496d526c62485268496a7037496e4e3062334266636d566863323975496a6f695a57356b58335231636d34694c434a7a6447397758334e6c6358566c626d4e6c496a7075645778736653776964584e685a3255694f6e736962335630634856305833527661325675637949364d7a523966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f50515253545556575859227d97f2c35e", + "000001730000004bf1020ecb0b3a6576656e742d747970650700056368756e6b0d3a636f6e74656e742d747970650700106170706c69636174696f6e2f6a736f6e0d3a6d6573736167652d747970650700056576656e747b226279746573223a2265794a306558426c496a6f696257567a6332466e5a56397a644739774969776959573168656d39754c574a6c5a484a76593273746157353262324e6864476c76626b316c64484a7059334d694f6e736961573577645852556232746c626b4e7664573530496a6f794d5377696233563063485630564739725a57354462335675644349364d7a5173496d6c75646d396a595852706232354d5958526c626d4e35496a6f784d5459334c434a6d61584a7a64454a356447564d5958526c626d4e35496a6f314e446c3966513d3d222c2270223a226162636465666768696a6b6c6d6e6f707172737475767778797a4142434445464748494a4b4c4d4e4f505152535455565758595a3031323334227dce4b9fd5", + ], + ], "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ { "Content-Type": "application/vnd.amazon.eventstream", @@ -616,6 +654,24 @@ "usage": {"input_tokens": 73, "output_tokens": 13}, }, ], + "anthropic.claude-3-sonnet-20240229-v1%3A0::The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "96c7306d-2d60-4629-83e9-dbd6befb0e4e"}, + 200, + { + "id": "msg_bdrk_01QyLcwkWBVCzcNv97J8oC3Q", + "model": "claude-3-7-sonnet-20250219", + "type": "message", + "role": "assistant", + "content": [ + { + "type": "text", + "text": "I'm ready for a friendly conversation! I'll share specific details when I can, and if I don't know something, I'll be straightforward about that. What would you like to talk about today?", + } + ], + "stop_reason": "end_turn", + "usage": {"input_tokens": 57, "output_tokens": 45}, + }, + ], "meta.llama2-13b-chat-v1::[INST] The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.": [ {"Content-Type": "application/json", "x-amzn-RequestId": "cce6b34c-812c-4f97-8885-515829aa9639"}, 200, @@ -824,6 +880,24 @@ "stop": None, }, ], + "anthropic.claude-3-sonnet-20240229-v1%3A0::What is 212 degrees Fahrenheit converted to Celsius?": [ + {"Content-Type": "application/json", "x-amzn-RequestId": "ab38295d-df9c-4141-8173-38221651bf46"}, + 200, + { + "id": "msg_bdrk_018mZM1sfTFG8NdbP2mZKZAy", + "model": "claude-3-7-sonnet-20250219", + "type": "message", + "role": "assistant", + "content": [ + { + "type": "text", + "text": "212 degrees Fahrenheit equals 100 degrees Celsius. This is the boiling point of water at standard atmospheric pressure.", + } + ], + "stop_reason": "end_turn", + "usage": {"input_tokens": 21, "output_tokens": 31}, + }, + ], "cohere.command-text-v14::What is 212 degrees Fahrenheit converted to Celsius?": [ {"Content-Type": "application/json", "x-amzn-RequestId": "12912a17-aa13-45f3-914c-cc82166f3601"}, 200, @@ -5048,6 +5122,15 @@ 403, {"message": "The security token included in the request is invalid."}, ], + "anthropic.claude-3-sonnet-20240229-v1%3A0::Invalid Token": [ + { + "Content-Type": "application/json", + "x-amzn-RequestId": "282ba076-576f-46aa-a2e6-680392132e87", + "x-amzn-ErrorType": "UnrecognizedClientException:http://internal.amazon.com/coral/com.amazon.coral.service/", + }, + 403, + {"message": "The security token included in the request is invalid."}, + ], "cohere.command-text-v14::Invalid Token": [ { "Content-Type": "application/json", diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py index 155b6c993c..fd970b0603 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion.py +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -19,6 +19,7 @@ "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', "anthropic.claude-instant-v1": '{"prompt": "Human: %s Assistant:", "temperature": %f, "max_tokens_to_sample": %d}', + "anthropic.claude-3-sonnet-20240229-v1:0": '{"anthropic_version": "bedrock-2023-05-31", "messages": [{"role": "user", "content": "%s"}], "temperature": %f, "max_tokens": %d}', "cohere.command-text-v14": '{"prompt": "%s", "temperature": %f, "max_tokens": %d}', "meta.llama2-13b-chat-v1": '{"prompt": "%s", "temperature": %f, "max_gen_len": %d}', "mistral.mistral-7b-instruct-v0:2": '{"prompt": "[INST] %s [/INST]", "temperature": %f, "max_tokens": %d}', @@ -262,6 +263,65 @@ }, ), ], + "anthropic.claude-3-sonnet-20240229-v1:0": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "ab38295d-df9c-4141-8173-38221651bf46", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "response.choices.finish_reason": "end_turn", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "ab38295d-df9c-4141-8173-38221651bf46", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "ab38295d-df9c-4141-8173-38221651bf46", + "span_id": None, + "trace_id": "trace-id", + "content": "212 degrees Fahrenheit equals 100 degrees Celsius. This is the boiling point of water at standard atmospheric pressure.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], "cohere.command-text-v14": [ ( {"type": "LlmChatCompletionSummary"}, @@ -555,6 +615,62 @@ }, ), ], + "anthropic.claude-3-sonnet-20240229-v1:0": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff", + "span_id": None, + "trace_id": "trace-id", + "content": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff", + "span_id": None, + "trace_id": "trace-id", + "content": "I'm ready for a friendly conversation! I'll share specific details when I can, and if I don't know something, I'll be straightforward about that. What would you like to talk about today?", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], "meta.llama2-13b-chat-v1": [ ( {"type": "LlmChatCompletionSummary"}, @@ -787,6 +903,63 @@ }, ), ], + "anthropic.claude-3-sonnet-20240229-v1:0": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.choices.finish_reason": "end_turn", + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e", + "span_id": None, + "trace_id": "trace-id", + "content": "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\nHuman: Hi there!\nAI:", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e", + "span_id": None, + "trace_id": "trace-id", + "content": "I'm ready for a friendly conversation! I'll share specific details when I can, and if I don't know something, I'll be straightforward about that. What would you like to talk about today?", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], "meta.llama2-13b-chat-v1": [ ( {"type": "LlmChatCompletionSummary"}, @@ -1024,6 +1197,64 @@ }, ), ], + "anthropic.claude-3-sonnet-20240229-v1:0": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 2, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f", + "span_id": None, + "trace_id": "trace-id", + "content": "What is 212 degrees Fahrenheit converted to Celsius?", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f", + "span_id": None, + "trace_id": "trace-id", + "content": "212 degrees Fahrenheit is equivalent to 100 degrees Celsius.\n\nThis is the boiling point of water at standard atmospheric pressure.", + "role": "assistant", + "completion_id": None, + "sequence": 1, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + "is_response": True, + }, + ), + ], "cohere.command-text-v14": [ ( {"type": "LlmChatCompletionSummary"}, @@ -1326,6 +1557,46 @@ }, ), ], + "anthropic.claude-3-sonnet-20240229-v1:0": [ + ( + {"type": "LlmChatCompletionSummary"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "span_id": None, + "trace_id": "trace-id", + "request_id": "282ba076-576f-46aa-a2e6-680392132e87", + "duration": None, # Response time varies each test run + "request.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "request.temperature": 0.7, + "request.max_tokens": 100, + "vendor": "bedrock", + "ingest_source": "Python", + "response.number_of_messages": 1, + "error": True, + }, + ), + ( + {"type": "LlmChatCompletionMessage"}, + { + "id": None, # UUID that varies with each run + "llm.conversation_id": "my-awesome-id", + "llm.foo": "bar", + "request_id": "282ba076-576f-46aa-a2e6-680392132e87", + "span_id": None, + "trace_id": "trace-id", + "content": "Invalid Token", + "role": "user", + "completion_id": None, + "sequence": 0, + "response.model": "anthropic.claude-3-sonnet-20240229-v1:0", + "vendor": "bedrock", + "ingest_source": "Python", + }, + ), + ], "cohere.command-text-v14": [ ( {"type": "LlmChatCompletionSummary"}, diff --git a/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py b/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py index 94a88e7a56..4422685b9f 100644 --- a/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py +++ b/tests/external_botocore/test_bedrock_chat_completion_invoke_model.py @@ -73,6 +73,7 @@ def request_streaming(request): "amazon.titan-text-express-v1", "ai21.j2-mid-v1", "anthropic.claude-instant-v1", + "anthropic.claude-3-sonnet-20240229-v1:0", "cohere.command-text-v14", "meta.llama2-13b-chat-v1", "mistral.mistral-7b-instruct-v0:2", @@ -107,7 +108,6 @@ def _exercise_streaming_model(prompt, temperature=0.7, max_tokens=100): body = (payload_template % (prompt, temperature, max_tokens)).encode("utf-8") if request_streaming: body = BytesIO(body) - response = bedrock_server.invoke_model_with_response_stream( body=body, modelId=model_id, accept="application/json", contentType="application/json" ) diff --git a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py index d68b636df2..82537cd10a 100644 --- a/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py +++ b/tests/external_botocore/test_bedrock_chat_completion_via_langchain.py @@ -37,6 +37,7 @@ "amazon.titan-text-express-v1", "ai21.j2-mid-v1", "anthropic.claude-instant-v1", + "anthropic.claude-3-sonnet-20240229-v1:0", "cohere.command-text-v14", "meta.llama2-13b-chat-v1", ], diff --git a/tests/external_botocore/test_boto3_kinesis.py b/tests/external_botocore/test_boto3_kinesis.py index 9c03fa154a..9c92c669aa 100644 --- a/tests/external_botocore/test_boto3_kinesis.py +++ b/tests/external_botocore/test_boto3_kinesis.py @@ -46,6 +46,8 @@ } } +UNINSTRUMENTED_KINESIS_METHODS = ("generate_presigned_url", "close", "get_waiter", "can_paginate", "get_paginator") + _kinesis_scoped_metrics = [ (f"MessageBroker/Kinesis/Stream/Produce/Named/{TEST_STREAM}", 2), (f"MessageBroker/Kinesis/Stream/Consume/Named/{TEST_STREAM}", 1), @@ -117,10 +119,7 @@ def test_instrumented_kinesis_methods(): region_name=AWS_REGION, ) - ignored_methods = { - ("kinesis", method) - for method in ("generate_presigned_url", "close", "get_waiter", "can_paginate", "get_paginator") - } + ignored_methods = {("kinesis", method) for method in UNINSTRUMENTED_KINESIS_METHODS} client_methods = inspect.getmembers(client, predicate=inspect.ismethod) methods = {("kinesis", name) for (name, method) in client_methods if not name.startswith("_")} diff --git a/tests/logger_structlog/test_attributes.py b/tests/logger_structlog/test_attributes.py index c41591f192..f76821cd4a 100644 --- a/tests/logger_structlog/test_attributes.py +++ b/tests/logger_structlog/test_attributes.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys + import pytest from testing_support.validators.validate_log_event_count import validate_log_event_count from testing_support.validators.validate_log_events import validate_log_events @@ -23,12 +25,18 @@ def logger(structlog_caplog): import structlog + # For Python < 3.11 co_qualname does not exist and causes errors. + # Remove it from the CallsiteParameterAdder input list. + _callsite_params = set(structlog.processors.CallsiteParameter) + if sys.version_info < (3, 11) and hasattr(structlog.processors.CallsiteParameter, "QUAL_NAME"): + _callsite_params.remove(structlog.processors.CallsiteParameter.QUAL_NAME) + structlog.configure( processors=[ structlog.contextvars.merge_contextvars, structlog.processors.format_exc_info, structlog.processors.StackInfoRenderer(), - structlog.processors.CallsiteParameterAdder(), + structlog.processors.CallsiteParameterAdder(parameters=_callsite_params), ], logger_factory=lambda *args, **kwargs: structlog_caplog, ) diff --git a/tests/mlmodel_openai/test_chat_completion_v1.py b/tests/mlmodel_openai/test_chat_completion_v1.py index 817db35d8e..969e4233bf 100644 --- a/tests/mlmodel_openai/test_chat_completion_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_v1.py @@ -11,9 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import openai -from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes +import pytest +from testing_support.fixtures import ( + override_application_settings, + override_llm_token_callback_settings, + reset_core_stats_engine, + validate_attributes, +) from testing_support.ml_testing_utils import ( add_token_count_to_events, disabled_ai_monitoring_record_content_settings, @@ -31,7 +36,7 @@ from newrelic.api.background_task import background_task from newrelic.api.llm_custom_attributes import WithLlmCustomAttributes -from newrelic.api.transaction import add_custom_attribute +from newrelic.api.transaction import accept_distributed_trace_headers, add_custom_attribute _test_openai_chat_completion_messages = ( {"role": "system", "content": "You are a scientist."}, @@ -387,6 +392,46 @@ def test_openai_chat_completion_async_with_llm_metadata_no_content(loop, set_tra ) +@pytest.mark.parametrize("partial_granularity_type", ("reduced", "essential", "compact")) +def test_openai_chat_completion_async_in_txn_with_token_count_partial_granularity_dt( + partial_granularity_type, set_trace_info, loop, async_openai_client +): + @reset_core_stats_engine() + @disabled_ai_monitoring_record_content_settings + @validate_custom_events(events_sans_content(chat_completion_recorded_events)) + @validate_custom_event_count(count=4) + @validate_transaction_metrics( + "test_chat_completion_v1:test_openai_chat_completion_async_in_txn_with_token_count_partial_granularity_dt.._test", + scoped_metrics=[("Llm/completion/OpenAI/create", 1)], + rollup_metrics=[("Llm/completion/OpenAI/create", 1)], + custom_metrics=[(f"Supportability/Python/ML/OpenAI/{openai.__version__}", 1)], + background_task=True, + ) + @validate_attributes("agent", ["llm"]) + @override_application_settings( + { + "distributed_tracing.sampler.full_granularity.enabled": False, + "distributed_tracing.sampler.partial_granularity.enabled": True, + "distributed_tracing.sampler.partial_granularity.type": partial_granularity_type, + "distributed_tracing.sampler.partial_granularity.remote_parent_sampled": "always_on", + "span_events.enabled": True, + } + ) + @background_task() + def _test(): + add_custom_attribute("llm.conversation_id", "my-awesome-id") + add_custom_attribute("llm.foo", "bar") + accept_distributed_trace_headers({"traceparent": "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01"}) + set_trace_info() + loop.run_until_complete( + async_openai_client.chat.completions.create( + model="gpt-3.5-turbo", messages=_test_openai_chat_completion_messages, temperature=0.7, max_tokens=100 + ) + ) + + _test() + + @reset_core_stats_engine() @override_llm_token_callback_settings(llm_token_count_callback) @validate_custom_events(add_token_count_to_events(chat_completion_recorded_events))