From 08f2f8021ee96e3f3e9fec9d1aa0f69a4eba98a0 Mon Sep 17 00:00:00 2001 From: Prashant Srivastava <50466688+srprash@users.noreply.github.com> Date: Fri, 10 Jan 2025 10:40:50 -0800 Subject: [PATCH 1/9] decouple apigw-lambda sample app from adot project build (#992) The sample app doesn't need ADOT Javaagent instrumentation, rather it depends on instrumentaiton via lambda layer. SO it makes sense to not depend on adot javaagent build for building this sample app. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- .../apigateway-lambda/settings.gradle.kts | 16 ++++++++++++++++ settings.gradle.kts | 1 - 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 sample-apps/apigateway-lambda/settings.gradle.kts diff --git a/sample-apps/apigateway-lambda/settings.gradle.kts b/sample-apps/apigateway-lambda/settings.gradle.kts new file mode 100644 index 0000000000..0f30e4fa33 --- /dev/null +++ b/sample-apps/apigateway-lambda/settings.gradle.kts @@ -0,0 +1,16 @@ +pluginManagement { + plugins { + id("com.diffplug.spotless") version "6.13.0" + id("com.github.ben-manes.versions") version "0.50.0" + id("com.github.johnrengelman.shadow") version "8.1.1" + } +} + +dependencyResolutionManagement { + repositories { + mavenCentral() + mavenLocal() + } +} + +rootProject.name = "sample-app-apigw-lambda" diff --git a/settings.gradle.kts b/settings.gradle.kts index 7edda895ac..f6b5033352 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -51,7 +51,6 @@ include(":smoke-tests:spring-boot") include(":sample-apps:springboot") include(":sample-apps:spark") include(":sample-apps:spark-awssdkv1") -include(":sample-apps:apigateway-lambda") // Used for contract tests include("appsignals-tests:images:mock-collector") From e7870fb7c00e746c10f922f46a74ed9b6c43b618 Mon Sep 17 00:00:00 2001 From: Prashant Srivastava <50466688+srprash@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:16:03 -0800 Subject: [PATCH 2/9] add lambda release workflow - main branch (#994) Adding a manually triggered workflow to release the java lambda layers to multiple regions. Tested by running the workflow in my fork, and confirmed that the layer was successfully published in `us-east-1`. GH run: https://github.com/srprash/aws-otel-java-instrumentation/actions/runs/12738121465 By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- .github/workflows/release-lambda.yml | 218 +++++++++++++++++++++++++++ 1 file changed, 218 insertions(+) create mode 100644 .github/workflows/release-lambda.yml diff --git a/.github/workflows/release-lambda.yml b/.github/workflows/release-lambda.yml new file mode 100644 index 0000000000..4fca802f6b --- /dev/null +++ b/.github/workflows/release-lambda.yml @@ -0,0 +1,218 @@ +name: Release Java Lambda layer + +on: + workflow_dispatch: + inputs: + version: + description: The version to tag the lambda release with, e.g., 1.2.0 + required: true + aws_region: + description: 'Deploy to aws regions' + required: true + default: 'us-east-1, us-east-2, us-west-1, us-west-2, ap-south-1, ap-northeast-3, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, ca-central-1, eu-central-1, eu-west-1, eu-west-2, eu-west-3, eu-north-1, sa-east-1, af-south-1, ap-east-1, ap-south-2, ap-southeast-3, ap-southeast-4, eu-central-2, eu-south-1, eu-south-2, il-central-1, me-central-1, me-south-1' + +env: + COMMERCIAL_REGIONS: us-east-1, us-east-2, us-west-1, us-west-2, ap-south-1, ap-northeast-3, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, ca-central-1, eu-central-1, eu-west-1, eu-west-2, eu-west-3, eu-north-1, sa-east-1 + LAYER_NAME: AWSOpenTelemetryDistroJava + +permissions: + id-token: write + contents: write + +jobs: + build-layer: + runs-on: ubuntu-latest + outputs: + aws_regions_json: ${{ steps.set-matrix.outputs.aws_regions_json }} + steps: + - name: Set up regions matrix + id: set-matrix + run: | + IFS=',' read -ra REGIONS <<< "${{ github.event.inputs.aws_region }}" + MATRIX="[" + for region in "${REGIONS[@]}"; do + trimmed_region=$(echo "$region" | xargs) + MATRIX+="\"$trimmed_region\"," + done + MATRIX="${MATRIX%,}]" + echo ${MATRIX} + echo "aws_regions_json=${MATRIX}" >> $GITHUB_OUTPUT + + - name: Checkout Repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: 'temurin' + + - name: Build layers + working-directory: lambda-layer + run: | + ./build-layer.sh + + - name: Upload layer + uses: actions/upload-artifact@v3 + with: + name: aws-opentelemetry-java-layer.zip + path: lambda-layer/build/distributions/aws-opentelemetry-java-layer.zip + + publish-prod: + runs-on: ubuntu-latest + needs: build-layer + strategy: + matrix: + aws_region: ${{ fromJson(needs.build-layer.outputs.aws_regions_json) }} + steps: + - name: role arn + env: + COMMERCIAL_REGIONS: ${{ env.COMMERCIAL_REGIONS }} + run: | + COMMERCIAL_REGIONS_ARRAY=(${COMMERCIAL_REGIONS//,/ }) + FOUND=false + for REGION in "${COMMERCIAL_REGIONS_ARRAY[@]}"; do + if [[ "$REGION" == "${{ matrix.aws_region }}" ]]; then + FOUND=true + break + fi + done + if [ "$FOUND" = true ]; then + echo "Found ${{ matrix.aws_region }} in COMMERCIAL_REGIONS" + SECRET_KEY="LAMBDA_LAYER_RELEASE" + else + echo "Not found ${{ matrix.aws_region }} in COMMERCIAL_REGIONS" + SECRET_KEY="${{ matrix.aws_region }}_LAMBDA_LAYER_RELEASE" + fi + SECRET_KEY=${SECRET_KEY//-/_} + echo "SECRET_KEY=${SECRET_KEY}" >> $GITHUB_ENV + + - uses: aws-actions/configure-aws-credentials@v4.0.2 + with: + role-to-assume: ${{ secrets[env.SECRET_KEY] }} + role-duration-seconds: 1200 + aws-region: ${{ matrix.aws_region }} + + - name: Get s3 bucket name for release + run: | + echo BUCKET_NAME=java-lambda-layer-${{ github.run_id }}-${{ matrix.aws_region }} | tee --append $GITHUB_ENV + + - name: download layer.zip + uses: actions/download-artifact@v3 + with: + name: aws-opentelemetry-java-layer.zip + + - name: publish + run: | + aws s3 mb s3://${{ env.BUCKET_NAME }} + aws s3 cp aws-opentelemetry-java-layer.zip s3://${{ env.BUCKET_NAME }} + layerARN=$( + aws lambda publish-layer-version \ + --layer-name ${{ env.LAYER_NAME }} \ + --content S3Bucket=${{ env.BUCKET_NAME }},S3Key=aws-opentelemetry-java-layer.zip \ + --compatible-runtimes java17 java21 \ + --compatible-architectures "arm64" "x86_64" \ + --license-info "Apache-2.0" \ + --description "AWS Distro of OpenTelemetry Lambda Layer for Java Runtime" \ + --query 'LayerVersionArn' \ + --output text + ) + echo $layerARN + echo "LAYER_ARN=${layerARN}" >> $GITHUB_ENV + mkdir ${{ env.LAYER_NAME }} + echo $layerARN > ${{ env.LAYER_NAME }}/${{ matrix.aws_region }} + cat ${{ env.LAYER_NAME }}/${{ matrix.aws_region }} + + - name: public layer + run: | + layerVersion=$( + aws lambda list-layer-versions \ + --layer-name ${{ env.LAYER_NAME }} \ + --query 'max_by(LayerVersions, &Version).Version' + ) + aws lambda add-layer-version-permission \ + --layer-name ${{ env.LAYER_NAME }} \ + --version-number $layerVersion \ + --principal "*" \ + --statement-id publish \ + --action lambda:GetLayerVersion + + - name: upload layer arn artifact + if: ${{ success() }} + uses: actions/upload-artifact@v3 + with: + name: ${{ env.LAYER_NAME }} + path: ${{ env.LAYER_NAME }}/${{ matrix.aws_region }} + + - name: clean s3 + if: always() + run: | + aws s3 rb --force s3://${{ env.BUCKET_NAME }} + + generate-release-note: + runs-on: ubuntu-latest + needs: publish-prod + steps: + - name: Checkout Repo @ SHA - ${{ github.sha }} + uses: actions/checkout@v4 + + - uses: hashicorp/setup-terraform@v2 + + - name: download layerARNs + uses: actions/download-artifact@v3 + with: + name: ${{ env.LAYER_NAME }} + path: ${{ env.LAYER_NAME }} + + - name: show layerARNs + run: | + for file in ${{ env.LAYER_NAME }}/* + do + echo $file + cat $file + done + + - name: generate layer-note + working-directory: ${{ env.LAYER_NAME }} + run: | + echo "| Region | Layer ARN |" >> ../layer-note + echo "| ---- | ---- |" >> ../layer-note + for file in * + do + read arn < $file + echo "| " $file " | " $arn " |" >> ../layer-note + done + cd .. + cat layer-note + + - name: generate tf layer + working-directory: ${{ env.LAYER_NAME }} + run: | + echo "locals {" >> ../layer.tf + echo " sdk_layer_arns = {" >> ../layer.tf + for file in * + do + read arn < $file + echo " \""$file"\" = \""$arn"\"" >> ../layer.tf + done + cd .. + echo " }" >> layer.tf + echo "}" >> layer.tf + terraform fmt layer.tf + cat layer.tf + + - name: upload layer tf file + uses: actions/upload-artifact@v3 + with: + name: layer.tf + path: layer.tf + + - name: Create GH release + id: create_release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token + run: | + gh release create --target "$GITHUB_REF_NAME" \ + --title "Release lambda-v${{ github.event.inputs.version }}" \ + --draft \ + "lambda-v${{ github.event.inputs.version }}" \ + layer.tf From 180e294c1bc7d33d65adb04fc084d4677670da35 Mon Sep 17 00:00:00 2001 From: Prashant Srivastava <50466688+srprash@users.noreply.github.com> Date: Wed, 15 Jan 2025 15:15:38 -0800 Subject: [PATCH 3/9] Update Lambda sample app - switch to native http client +switch to ListBuckets API call (#1003) Related to https://github.com/aws-observability/aws-otel-java-instrumentation/pull/1002, we want to use native Java Http Client instead of OkHttp in the sample app. Also, I noticed that the other language sample apps ([Python](https://github.com/aws-observability/aws-otel-python-instrumentation/blob/main/lambda-layer/sample-apps/function/lambda_function.py#L15)) does a `ListBuckets` call whereas this sample app is doing `ListBucket` call. So updating that to be consistent with others. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- .../apigateway-lambda/build.gradle.kts | 1 - .../com/amazon/sampleapp/LambdaHandler.java | 51 +++++++++---------- .../apigateway-lambda/terraform/main.tf | 6 +-- 3 files changed, 27 insertions(+), 31 deletions(-) diff --git a/sample-apps/apigateway-lambda/build.gradle.kts b/sample-apps/apigateway-lambda/build.gradle.kts index 66992540ab..25b47e9e44 100644 --- a/sample-apps/apigateway-lambda/build.gradle.kts +++ b/sample-apps/apigateway-lambda/build.gradle.kts @@ -15,7 +15,6 @@ java { dependencies { implementation("com.amazonaws:aws-lambda-java-core:1.2.2") - implementation("com.squareup.okhttp3:okhttp:4.11.0") implementation("software.amazon.awssdk:s3:2.29.23") implementation("org.json:json:20240303") implementation("org.slf4j:jcl-over-slf4j:2.0.16") diff --git a/sample-apps/apigateway-lambda/src/main/java/com/amazon/sampleapp/LambdaHandler.java b/sample-apps/apigateway-lambda/src/main/java/com/amazon/sampleapp/LambdaHandler.java index f3e11bc38d..bc8a7543ac 100644 --- a/sample-apps/apigateway-lambda/src/main/java/com/amazon/sampleapp/LambdaHandler.java +++ b/sample-apps/apigateway-lambda/src/main/java/com/amazon/sampleapp/LambdaHandler.java @@ -3,18 +3,20 @@ import com.amazonaws.services.lambda.runtime.Context; import com.amazonaws.services.lambda.runtime.RequestHandler; import java.io.IOException; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.util.HashMap; import java.util.Map; -import okhttp3.OkHttpClient; -import okhttp3.Request; -import okhttp3.Response; import org.json.JSONObject; import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.model.HeadBucketRequest; +import software.amazon.awssdk.services.s3.model.ListBucketsResponse; import software.amazon.awssdk.services.s3.model.S3Exception; public class LambdaHandler implements RequestHandler> { - private final OkHttpClient client = new OkHttpClient(); + HttpClient client = HttpClient.newHttpClient(); private final S3Client s3Client = S3Client.create(); @Override @@ -36,35 +38,30 @@ public Map handleRequest(Object input, Context context) { responseBody.put("traceId", traceId); // Make a remote call using OkHttp - System.out.println("Making a remote call using OkHttp"); - String url = "https://www.amazon.com"; - Request request = new Request.Builder().url(url).build(); - - try (Response response = client.newCall(request).execute()) { + System.out.println("Making a remote call using Java HttpClient"); + String url = "https://aws.amazon.com/"; + HttpRequest request = HttpRequest.newBuilder() + .uri(URI.create(url)) + .GET() + .build(); + try { + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + System.out.println("Response status code: " + response.statusCode()); responseBody.put("httpRequest", "Request successful"); - } catch (IOException e) { - context.getLogger().log("Error: " + e.getMessage()); + } catch (Exception e) { + System.err.println("Error: " + e.getMessage()); responseBody.put("httpRequest", "Request failed"); } System.out.println("Remote call done"); - // Make a S3 HeadBucket call to check whether the bucket exists - System.out.println("Making a S3 HeadBucket call"); - String bucketName = "SomeDummyBucket"; + // Make a S3 ListBuckets call to list the S3 buckets in the account + System.out.println("Making a S3 ListBuckets call"); try { - HeadBucketRequest headBucketRequest = HeadBucketRequest.builder().bucket(bucketName).build(); - s3Client.headBucket(headBucketRequest); - responseBody.put("s3Request", "Bucket exists and is accessible: " + bucketName); + ListBucketsResponse listBucketsResponse = s3Client.listBuckets(); + responseBody.put("s3Request", "ListBuckets successful"); } catch (S3Exception e) { - if (e.statusCode() == 403) { - responseBody.put("s3Request", "Access denied to bucket: " + bucketName); - } else if (e.statusCode() == 404) { - responseBody.put("s3Request", "Bucket does not exist: " + bucketName); - } else { - System.err.println("Error checking bucket: " + e.awsErrorDetails().errorMessage()); - responseBody.put( - "s3Request", "Error checking bucket: " + e.awsErrorDetails().errorMessage()); - } + System.err.println("Error listing buckets: " + e.awsErrorDetails().errorMessage()); + responseBody.put("s3Request", "Error listing buckets: " + e.awsErrorDetails().errorMessage()); } System.out.println("S3 HeadBucket call done"); diff --git a/sample-apps/apigateway-lambda/terraform/main.tf b/sample-apps/apigateway-lambda/terraform/main.tf index 6881f0e1ce..0e37647ed0 100644 --- a/sample-apps/apigateway-lambda/terraform/main.tf +++ b/sample-apps/apigateway-lambda/terraform/main.tf @@ -16,13 +16,13 @@ resource "aws_iam_role" "lambda_role" { } resource "aws_iam_policy" "s3_access" { - name = "S3ListBucketPolicy" - description = "Allow Lambda to check a given S3 bucket exists" + name = "S3ListBucketsPolicy" + description = "Allow Lambda to list buckets" policy = jsonencode({ Version = "2012-10-17", Statement = [{ Effect = "Allow", - Action = ["s3:ListBucket"], + Action = ["s3:ListAllMyBuckets"], Resource = "*" }] }) From 3893a5ea435924fdeca1b13d9b5730702e081bba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 10:48:48 -0800 Subject: [PATCH 4/9] Bump io.grpc:protoc-gen-grpc-java from 1.56.1 to 1.69.1 (#1006) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [io.grpc:protoc-gen-grpc-java](https://github.com/grpc/grpc-java) from 1.56.1 to 1.69.1.
Release notes

Sourced from io.grpc:protoc-gen-grpc-java's releases.

v1.69.1

Bug Fixes

  • okhttp: Improve certificate handling by rejecting non-ASCII subject alternative names and hostnames as seen in CVE-2021-0341 (#11749) (a0982ca0a). Hostnames are considered trusted and CAs are required to use punycode for non-ASCII hostnames, so this is expected to provide defense-in-depth. See also the related GoSecure blog post and the AOSP fix
  • xds: Preserve nonce when unsubscribing last watcher of a particular type so that new discovery requests of that type are handled correctly (1cf1927d1). This (along with 1cf1927d1) fixes a nonce-handling regression introduced in 1.66.0 that could cause resources to appear to not exist until re-creating the ADS stream. Triggering the behavior required specific config changes. It is easiest to trigger when clusters use EDS and routes are changed from one cluster to another. The error “found 0 leaf (logical DNS or EDS) clusters for root cluster” might then be seen
  • xds: Remember nonces for unknown types (6c12c2bd2)
  • xds: Unexpected types in the bootstrap’s server_features should be ignored (e8ff6da2c). They were previously required to be strings
  • xds: Fixed unsupported unsigned 32 bits issue for circuit breaker (#11735) (f8f613984). This fixes clients treating large max_requests as “no requests” and failing all requests
  • xds: Remove xds authority label from metric registration (#11760) (6516c7387). This fixes the error “Incorrect number of required labels provided. Expected: 4” introduced in 1.69.0

v1.69.0

v1.69.0

New Features

  • api: Allow LoadBalancers to specify an authority per-RPC.(#11631) (c167ead85) CallOptions.withAuthority() has higher precedence.
  • netty: Add soft Metadata size limit enforcement. (#11603) (735b3f3fe) The soft limit is a lower size limit that fails an increasing percentage of RPCs as the Metadata size approaches the upper limit. This can be used as an “early warning” that the Metadata size is growing too large
  • alts: support altsCallCredentials in GoogleDefaultChannelCredentials (#11634) (ba8ab796e)
  • xds: Add grpc.xds_client metrics, as documented by OpenTelemetry Metrics (#11661) (20d09cee5). grpc.xds.authority is not yet available

Bug Fixes

  • api: When forwarding from Listener onAddresses to Listener2 continue to use onResult (#11666) (dae078c0a). This fixes a 1.68.1 "IllegalStateException: Not called from the SynchronizationContext" regression (#11662) that could be seen in certain custom NameResolvers
  • okhttp: If the frame handler thread is null do not schedule it on the executor (ef1fe8737). This fixes a 1.68.1 NullPointerException regression when a custom transportExecutor was provided to the channel and it did not have enough threads to run new tasks

Improvements

  • api: Add java.time.Duration overloads to CallOptions, AbstractStub methods that take TimeUnit and a time value (#11562) (766b92379)
  • core: Make timestamp usage in Channelz use nanos from Java.time.Instant when available (#11604) (9176b5528). This increases the timestamp precision from milliseconds
  • okhttp: Fix for ipv6 link local with scope (#11725) (e98e7445b)
  • binder: Let AndroidComponentAddress specify a target UserHandle (#11670) (e58c998a4)
  • servlet: Deframe failures should be logged on the server as warnings (#11645) (a5db67d0c)
  • s2a: Rename the Bazel target s2av2_credentials to s2a (29dd9bad3). The target s2a had been referenced by IO_GRPC_GRPC_JAVA_OVERRIDE_TARGETS but didn’t previously exist
  • services: Make channelz work with proto lite (#11685) (b1703345f). This compatibility is on the source level. There is not a pre-built binary on Maven Central that supports proto lite
  • services: Deprecate ProtoReflectionService (#11681) (921f88ae3). The class implements the deprecated v1alpha of the reflection protocol. Prefer ProtoReflectionServiceV1, which implements the v1 version of the reflection protocol

Dependencies

  • Upgrade proto-google-common-protos to 2.48.0 (1993e68b0)
  • Upgrade google-auth-library to 1.24.1 (1993e68b0)
  • Upgrade error_prone_annotations to 2.30.0 (1993e68b0)
  • Upgrade Guava to 33.3.1-android (1993e68b0)
  • Upgrade opentelemetry-api to 1.43.0 (1993e68b0)
  • xds: Remove Bazel dependency on xds v2 (664f1fcf8). This had been done for the Maven Central binaries in 1.63.0, but had been missed for Bazel builds

Documentation

  • binder: Update error codes doc for new "Safer Intent" rules. (#11639) (fe350cfd5)
  • examples: Use xds-enabled server and xds credentials in example-gcp-csm-observability (#11706) (a79982c7f)

... (truncated)

Commits
  • 5e14f99 Bump version to 1.69.1
  • d13bc25 Update README etc to reference 1.69.1
  • 05d9628 fix security issue with okhttp (#11749)
  • 112dccb xds: fixed unsupported unsigned 32 bits issue for circuit breaker (#11735)
  • 0de7bfe xds: Remove xds authority label from metric registration
  • e3e343d xds: Remember nonces for unknown types
  • aba8a0c xds: Preserve nonce when unsubscribing type
  • ded82e2 xds: Unexpected types in server_features should be ignored
  • eccb137 Bump version to 1.69.1-SNAPSHOT
  • 34a7cff Bump version to 1.69.0
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=io.grpc:protoc-gen-grpc-java&package-manager=gradle&previous-version=1.56.1&new-version=1.69.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- appsignals-tests/images/grpc/grpc-base/build.gradle.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appsignals-tests/images/grpc/grpc-base/build.gradle.kts b/appsignals-tests/images/grpc/grpc-base/build.gradle.kts index b945d29474..580d561592 100644 --- a/appsignals-tests/images/grpc/grpc-base/build.gradle.kts +++ b/appsignals-tests/images/grpc/grpc-base/build.gradle.kts @@ -36,7 +36,7 @@ protobuf { } plugins { create("grpc") { - artifact = "io.grpc:protoc-gen-grpc-java:1.56.1" + artifact = "io.grpc:protoc-gen-grpc-java:1.69.1" } } generateProtoTasks { From 3645a8aa5b6573641692f83e54b486c6fb801c0b Mon Sep 17 00:00:00 2001 From: Jeel-mehta <72543735+Jeel-mehta@users.noreply.github.com> Date: Tue, 28 Jan 2025 11:16:48 -0800 Subject: [PATCH 5/9] [Java] Add ubuntu test (#1009) *Issue #, if available:* The current nstrumentation was lacking ubuntu test *Description of changes:* Added the ubuntu test to the instrumentation This is the workflow link: https://github.com/Jeel-mehta/aws-application-signals-test-framework/actions/runs/13001021774 By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. Co-authored-by: Jeel Mehta --- .github/workflows/application-signals-e2e-test.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/application-signals-e2e-test.yml b/.github/workflows/application-signals-e2e-test.yml index dcfe5d1adb..823f492101 100644 --- a/.github/workflows/application-signals-e2e-test.yml +++ b/.github/workflows/application-signals-e2e-test.yml @@ -205,6 +205,19 @@ jobs: java-version: '11' cpu-architecture: 'arm64' + # + # UBUNTU COVERAGE + # DEFAULT SETTING: Java 11, EC2, AMD64, Ubuntu + # + + v11-amd64-ubuntu: + needs: [ upload-main-build ] + uses: aws-observability/aws-application-signals-test-framework/.github/workflows/java-ec2-ubuntu-test.yml@main + secrets: inherit + with: + aws-region: us-east-1 + caller-workflow-name: 'main-build' + # # Other Functional Test Case # From 2740c561cff00cfe7f6c7473de2022c13370ccc1 Mon Sep 17 00:00:00 2001 From: Prashant Srivastava <50466688+srprash@users.noreply.github.com> Date: Tue, 4 Feb 2025 11:13:51 -0800 Subject: [PATCH 6/9] bump up the upload-download artifact action to v4 (#1012) *Issue #, if available:* The `upload-artifacts` and the `download-artifacts` v3 are deprecated and causing the workflows to be stopped. https://github.blog/changelog/2024-04-16-deprecation-notice-v3-of-the-artifact-actions/ By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- .github/workflows/application-signals-e2e-test.yml | 2 +- .github/workflows/main-build.yml | 2 +- .github/workflows/nightly-upstream-snapshot-build.yml | 2 +- .github/workflows/release-lambda.yml | 10 +++++----- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/application-signals-e2e-test.yml b/.github/workflows/application-signals-e2e-test.yml index 823f492101..20854a0b85 100644 --- a/.github/workflows/application-signals-e2e-test.yml +++ b/.github/workflows/application-signals-e2e-test.yml @@ -31,7 +31,7 @@ jobs: role-to-assume: arn:aws:iam::${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ACCOUNT_ID }}:role/${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ROLE_NAME }} aws-region: us-east-1 - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: aws-opentelemetry-agent.jar diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml index 89870e82be..12563cf2e6 100644 --- a/.github/workflows/main-build.yml +++ b/.github/workflows/main-build.yml @@ -128,7 +128,7 @@ jobs: snapshot-ecr-role: ${{ secrets.JAVA_INSTRUMENTATION_SNAPSHOT_ECR }} - name: Upload to GitHub Actions - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: aws-opentelemetry-agent.jar path: otelagent/build/libs/aws-opentelemetry-agent-*.jar diff --git a/.github/workflows/nightly-upstream-snapshot-build.yml b/.github/workflows/nightly-upstream-snapshot-build.yml index dde8608042..c97db6704c 100644 --- a/.github/workflows/nightly-upstream-snapshot-build.yml +++ b/.github/workflows/nightly-upstream-snapshot-build.yml @@ -95,7 +95,7 @@ jobs: snapshot-ecr-role: ${{ secrets.JAVA_INSTRUMENTATION_SNAPSHOT_ECR }} - name: Upload to GitHub Actions - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: aws-opentelemetry-agent.jar path: otelagent/build/libs/aws-opentelemetry-agent-*.jar diff --git a/.github/workflows/release-lambda.yml b/.github/workflows/release-lambda.yml index 4fca802f6b..851ae877cd 100644 --- a/.github/workflows/release-lambda.yml +++ b/.github/workflows/release-lambda.yml @@ -52,7 +52,7 @@ jobs: ./build-layer.sh - name: Upload layer - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: aws-opentelemetry-java-layer.zip path: lambda-layer/build/distributions/aws-opentelemetry-java-layer.zip @@ -97,7 +97,7 @@ jobs: echo BUCKET_NAME=java-lambda-layer-${{ github.run_id }}-${{ matrix.aws_region }} | tee --append $GITHUB_ENV - name: download layer.zip - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: aws-opentelemetry-java-layer.zip @@ -138,7 +138,7 @@ jobs: - name: upload layer arn artifact if: ${{ success() }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ env.LAYER_NAME }} path: ${{ env.LAYER_NAME }}/${{ matrix.aws_region }} @@ -158,7 +158,7 @@ jobs: - uses: hashicorp/setup-terraform@v2 - name: download layerARNs - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ${{ env.LAYER_NAME }} path: ${{ env.LAYER_NAME }} @@ -201,7 +201,7 @@ jobs: cat layer.tf - name: upload layer tf file - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: layer.tf path: layer.tf From aafbabeebf9b270ed5347fd8b49d402c20b3257c Mon Sep 17 00:00:00 2001 From: Harry Date: Thu, 13 Feb 2025 13:22:43 -0800 Subject: [PATCH 7/9] Remove spans with ec2 metadata ip address from metrics (#1015) *Issue #, if available:* ADOT SDK resource detectors by default have enabled a few AWS resource detector which will call EC2 metadata API endpoints. These activities have been captured by auto-instrumentation and generated AppSignals metrics. These calls should be present in traces, but not in metrics. *Description of changes:* Suppress AwsSpanMetricsProcessor from generating metrics when the RemoteService points to `169.254.169.254` *Testing* Deployed two EC2 instance with Java sample application and ADOT Java - `sample-application-2025-02-06-test`: ADOT Java with changes in this PR - `sample-application-2025-02-06`: Public latest ADOT Java *Traces:* Present from both EC2 instance ![image](https://github.com/user-attachments/assets/9c10a81d-0336-473f-81f3-9d4e61069401) ![image](https://github.com/user-attachments/assets/8be02cc9-dddb-4461-834e-98ad65d5a23f) *Metrics*: Present only for `sample-application-2025-02-06`: ![image](https://github.com/user-attachments/assets/97d29884-7133-47b5-aa0e-c5ac7a09cc3e) *Logs*: Present only for `sample-application-2025-02-06`: By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- .../providers/AwsSpanMetricsProcessor.java | 16 ++++++++++++- .../AwsSpanMetricsProcessorTest.java | 24 +++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessor.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessor.java index b8479dbedd..c2f133a48d 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessor.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessor.java @@ -17,6 +17,7 @@ import static io.opentelemetry.semconv.SemanticAttributes.HTTP_RESPONSE_STATUS_CODE; import static io.opentelemetry.semconv.SemanticAttributes.HTTP_STATUS_CODE; +import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_SERVICE; import static software.amazon.opentelemetry.javaagent.providers.AwsSpanProcessingUtil.isKeyPresent; import io.opentelemetry.api.common.Attributes; @@ -61,6 +62,10 @@ public final class AwsSpanMetricsProcessor implements SpanProcessor { private static final int FAULT_CODE_LOWER_BOUND = 500; private static final int FAULT_CODE_UPPER_BOUND = 599; + // EC2 Metadata API IP Address + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html#instancedata-inside-access + private final String EC2_METADATA_API_IP = "169.254.169.254"; + // Metric instruments private final LongHistogram errorHistogram; private final LongHistogram faultHistogram; @@ -172,9 +177,18 @@ private void recordLatency(ReadableSpan span, Attributes attributes) { private void recordMetrics(ReadableSpan span, SpanData spanData, Attributes attributes) { // Only record metrics if non-empty attributes are returned. - if (!attributes.isEmpty()) { + if (!attributes.isEmpty() && !isEc2MetadataSpan((attributes))) { recordErrorOrFault(spanData, attributes); recordLatency(span, attributes); } } + + private boolean isEc2MetadataSpan(Attributes attributes) { + if (attributes.get(AWS_REMOTE_SERVICE) != null + && attributes.get(AWS_REMOTE_SERVICE).equals(EC2_METADATA_API_IP)) { + return true; + } + + return false; + } } diff --git a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessorTest.java b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessorTest.java index 16fc889cec..65bba3a513 100644 --- a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessorTest.java +++ b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/AwsSpanMetricsProcessorTest.java @@ -25,6 +25,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; +import static software.amazon.opentelemetry.javaagent.providers.AwsAttributeKeys.AWS_REMOTE_SERVICE; import static software.amazon.opentelemetry.javaagent.providers.MetricAttributeGenerator.DEPENDENCY_METRIC; import static software.amazon.opentelemetry.javaagent.providers.MetricAttributeGenerator.SERVICE_METRIC; @@ -378,6 +379,21 @@ public void testOnEndMetricsGenerationWithStatusDataOk() { validateMetricsGeneratedForStatusDataOk(600L, ExpectedStatusMetric.NEITHER); } + @Test + public void testOnEndMetricsGenerationFromEc2MetadataApi() { + Attributes spanAttributes = Attributes.of(AWS_REMOTE_SERVICE, "169.254.169.254"); + ReadableSpan readableSpanMock = + buildReadableSpanMock( + spanAttributes, SpanKind.CLIENT, SpanContext.getInvalid(), StatusData.unset()); + Map metricAttributesMap = buildEc2MetadataApiMetricAttributes(); + configureMocksForOnEnd(readableSpanMock, metricAttributesMap); + + awsSpanMetricsProcessor.onEnd(readableSpanMock); + verifyNoInteractions(errorHistogramMock); + verifyNoInteractions(faultHistogramMock); + verifyNoInteractions(latencyHistogramMock); + } + private static Attributes buildSpanAttributes(boolean containsAttribute) { if (containsAttribute) { return Attributes.of(AttributeKey.stringKey("original key"), "original value"); @@ -404,6 +420,14 @@ private static Map buildMetricAttributes( return attributesMap; } + private static Map buildEc2MetadataApiMetricAttributes() { + Map attributesMap = new HashMap<>(); + Attributes attributes = + Attributes.of(AttributeKey.stringKey(AWS_REMOTE_SERVICE.toString()), "169.254.169.254"); + attributesMap.put(MetricAttributeGenerator.DEPENDENCY_METRIC, attributes); + return attributesMap; + } + private static ReadableSpan buildReadableSpanMock(Attributes spanAttributes) { return buildReadableSpanMock(spanAttributes, SpanKind.SERVER, null, StatusData.unset()); } From 51bec573ff2994c314127b8724c25111fbd1ce7c Mon Sep 17 00:00:00 2001 From: Steve Liu Date: Mon, 17 Feb 2025 14:07:12 -0800 Subject: [PATCH 8/9] Enable baggage propagator (#1020) *Description of changes:* Enabling baggage propagator to support Lineage propagation: https://github.com/open-telemetry/opentelemetry-java-contrib/pull/1671 By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --- .../providers/AwsAgentPropertiesCustomizerProvider.java | 2 +- lambda-layer/otel-instrument | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAgentPropertiesCustomizerProvider.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAgentPropertiesCustomizerProvider.java index 8d35951fb1..073e345de0 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAgentPropertiesCustomizerProvider.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsAgentPropertiesCustomizerProvider.java @@ -26,7 +26,7 @@ public void customize(AutoConfigurationCustomizer autoConfiguration) { () -> new HashMap() { { - put("otel.propagators", "xray,tracecontext,b3,b3multi"); + put("otel.propagators", "baggage,xray,tracecontext,b3,b3multi"); put("otel.instrumentation.aws-sdk.experimental-span-attributes", "true"); put( "otel.instrumentation.aws-sdk.experimental-record-individual-http-error", diff --git a/lambda-layer/otel-instrument b/lambda-layer/otel-instrument index 450eb925a5..f59a163d32 100644 --- a/lambda-layer/otel-instrument +++ b/lambda-layer/otel-instrument @@ -2,7 +2,7 @@ export OTEL_INSTRUMENTATION_AWS_SDK_EXPERIMENTAL_SPAN_ATTRIBUTES=true -export OTEL_PROPAGATORS="${OTEL_PROPAGATORS:-xray,tracecontext,b3,b3multi}" +export OTEL_PROPAGATORS="${OTEL_PROPAGATORS:-baggage,xray,tracecontext,b3,b3multi}" export OTEL_SERVICE_NAME=${OTEL_SERVICE_NAME:-${AWS_LAMBDA_FUNCTION_NAME}} From 83e5adee3ba93630d66e7a87baf03fd863c33ed3 Mon Sep 17 00:00:00 2001 From: Steve Liu Date: Thu, 20 Feb 2025 13:52:52 -0800 Subject: [PATCH 9/9] SigV4 Authentication support for http exporter (#1019) Issue #, if available: Adding SigV4 Authentication extension for Exporting traces to OTLP CloudWatch endpoint without needing to explictily install the collector. Description of changes: Added a new class that extends upstream's OTLP http span exporter. Overrides the export method so that if the endpoint is CW, we add an extra step of injecting SigV4 authentication to the headers. By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. --------- Co-authored-by: Mahad Janjua --- awsagentprovider/build.gradle.kts | 5 +- ...sApplicationSignalsCustomizerProvider.java | 25 +++ .../providers/OtlpAwsSpanExporter.java | 159 +++++++++++++ .../providers/OtlpAwsSpanExporterTest.java | 211 ++++++++++++++++++ 4 files changed, 399 insertions(+), 1 deletion(-) create mode 100644 awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporter.java create mode 100644 awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporterTest.java diff --git a/awsagentprovider/build.gradle.kts b/awsagentprovider/build.gradle.kts index 7e9211052e..6b9b75e3d5 100644 --- a/awsagentprovider/build.gradle.kts +++ b/awsagentprovider/build.gradle.kts @@ -41,9 +41,12 @@ dependencies { // Import AWS SDK v1 core for ARN parsing utilities implementation("com.amazonaws:aws-java-sdk-core:1.12.773") // Export configuration - compileOnly("io.opentelemetry:opentelemetry-exporter-otlp") + implementation("io.opentelemetry:opentelemetry-exporter-otlp") // For Udp emitter compileOnly("io.opentelemetry:opentelemetry-exporter-otlp-common") + // For HTTP SigV4 emitter + implementation("software.amazon.awssdk:auth:2.30.14") + implementation("software.amazon.awssdk:http-auth-aws:2.30.14") testImplementation("io.opentelemetry:opentelemetry-sdk-extension-autoconfigure") testImplementation("io.opentelemetry:opentelemetry-sdk-testing") diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsApplicationSignalsCustomizerProvider.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsApplicationSignalsCustomizerProvider.java index b3d04a7a8c..9f023c119f 100644 --- a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsApplicationSignalsCustomizerProvider.java +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/AwsApplicationSignalsCustomizerProvider.java @@ -51,6 +51,7 @@ import java.util.Set; import java.util.logging.Level; import java.util.logging.Logger; +import java.util.regex.Pattern; /** * This customizer performs the following customizations: @@ -70,6 +71,8 @@ public class AwsApplicationSignalsCustomizerProvider implements AutoConfigurationCustomizerProvider { static final String AWS_LAMBDA_FUNCTION_NAME_CONFIG = "AWS_LAMBDA_FUNCTION_NAME"; + private static final String XRAY_OTLP_ENDPOINT_PATTERN = + "^https://xray\\.([a-z0-9-]+)\\.amazonaws\\.com/v1/traces$"; private static final Duration DEFAULT_METRIC_EXPORT_INTERVAL = Duration.ofMinutes(1); private static final Logger logger = @@ -121,6 +124,16 @@ static boolean isLambdaEnvironment() { return System.getenv(AWS_LAMBDA_FUNCTION_NAME_CONFIG) != null; } + static boolean isXrayOtlpEndpoint(String otlpEndpoint) { + if (otlpEndpoint == null) { + return false; + } + + return Pattern.compile(XRAY_OTLP_ENDPOINT_PATTERN) + .matcher(otlpEndpoint.toLowerCase()) + .matches(); + } + private boolean isApplicationSignalsEnabled(ConfigProperties configProps) { return configProps.getBoolean( APPLICATION_SIGNALS_ENABLED_CONFIG, @@ -221,6 +234,10 @@ private SdkTracerProviderBuilder customizeTracerProviderBuilder( return tracerProviderBuilder; } + if (isXrayOtlpEndpoint(System.getenv(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT_CONFIG))) { + return tracerProviderBuilder; + } + // Construct meterProvider MetricExporter metricsExporter = ApplicationSignalsExporterProvider.INSTANCE.createExporter(configProps); @@ -286,6 +303,14 @@ private SpanExporter customizeSpanExporter( .build(); } } + // When running OTLP endpoint for X-Ray backend, use custom exporter for SigV4 authentication + else if (spanExporter instanceof OtlpHttpSpanExporter + && isXrayOtlpEndpoint(System.getenv(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT_CONFIG))) { + spanExporter = + new OtlpAwsSpanExporter( + (OtlpHttpSpanExporter) spanExporter, + System.getenv(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT_CONFIG)); + } if (isApplicationSignalsEnabled(configProps)) { return AwsMetricAttributesSpanExporterBuilder.create( diff --git a/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporter.java b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporter.java new file mode 100644 index 0000000000..c4a777dfe5 --- /dev/null +++ b/awsagentprovider/src/main/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporter.java @@ -0,0 +1,159 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.opentelemetry.javaagent.providers; + +import io.opentelemetry.exporter.internal.otlp.traces.TraceRequestMarshaler; +import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporter; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import javax.annotation.concurrent.Immutable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.SdkHttpRequest; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; +import software.amazon.awssdk.http.auth.spi.signer.SignedRequest; + +/** + * This exporter extends the functionality of the OtlpHttpSpanExporter to allow spans to be exported + * to the XRay OTLP endpoint https://xray.[AWSRegion].amazonaws.com/v1/traces. Utilizes the AWSSDK + * library to sign and directly inject SigV4 Authentication to the exported request's headers. ... + */ +@Immutable +public class OtlpAwsSpanExporter implements SpanExporter { + private static final String SERVICE_NAME = "xray"; + private static final Logger logger = LoggerFactory.getLogger(OtlpAwsSpanExporter.class); + + private final OtlpHttpSpanExporter parentExporter; + private final String awsRegion; + private final String endpoint; + private Collection spanData; + + public OtlpAwsSpanExporter(String endpoint) { + this.parentExporter = + OtlpHttpSpanExporter.builder() + .setEndpoint(endpoint) + .setHeaders(new SigV4AuthHeaderSupplier()) + .build(); + + this.awsRegion = endpoint.split("\\.")[1]; + this.endpoint = endpoint; + this.spanData = new ArrayList<>(); + } + + public OtlpAwsSpanExporter(OtlpHttpSpanExporter parentExporter, String endpoint) { + this.parentExporter = + parentExporter.toBuilder() + .setEndpoint(endpoint) + .setHeaders(new SigV4AuthHeaderSupplier()) + .build(); + + this.awsRegion = endpoint.split("\\.")[1]; + this.endpoint = endpoint; + this.spanData = new ArrayList<>(); + } + + /** + * Overrides the upstream implementation of export. All behaviors are the same except if the + * endpoint is an XRay OTLP endpoint, we will sign the request with SigV4 in headers before + * sending it to the endpoint. Otherwise, we will skip signing. + */ + @Override + public CompletableResultCode export(Collection spans) { + this.spanData = spans; + return this.parentExporter.export(spans); + } + + @Override + public CompletableResultCode flush() { + return this.parentExporter.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return this.parentExporter.shutdown(); + } + + @Override + public String toString() { + return this.parentExporter.toString(); + } + + private final class SigV4AuthHeaderSupplier implements Supplier> { + + @Override + public Map get() { + try { + ByteArrayOutputStream encodedSpans = new ByteArrayOutputStream(); + TraceRequestMarshaler.create(OtlpAwsSpanExporter.this.spanData).writeBinaryTo(encodedSpans); + + SdkHttpRequest httpRequest = + SdkHttpFullRequest.builder() + .uri(URI.create(OtlpAwsSpanExporter.this.endpoint)) + .method(SdkHttpMethod.POST) + .putHeader("Content-Type", "application/x-protobuf") + .contentStreamProvider(() -> new ByteArrayInputStream(encodedSpans.toByteArray())) + .build(); + + AwsCredentials credentials = DefaultCredentialsProvider.create().resolveCredentials(); + + SignedRequest signedRequest = + AwsV4HttpSigner.create() + .sign( + b -> + b.identity(credentials) + .request(httpRequest) + .putProperty(AwsV4HttpSigner.SERVICE_SIGNING_NAME, SERVICE_NAME) + .putProperty( + AwsV4HttpSigner.REGION_NAME, OtlpAwsSpanExporter.this.awsRegion) + .payload(() -> new ByteArrayInputStream(encodedSpans.toByteArray()))); + + Map result = new HashMap<>(); + + Map> headers = signedRequest.request().headers(); + headers.forEach( + (key, values) -> { + if (!values.isEmpty()) { + result.put(key, values.get(0)); + } + }); + + return result; + + } catch (Exception e) { + logger.error( + "Failed to sign/authenticate the given exported Span request to OTLP CloudWatch endpoint with error: {}", + e.getMessage()); + + return new HashMap<>(); + } + } + } +} diff --git a/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporterTest.java b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporterTest.java new file mode 100644 index 0000000000..252ae3e900 --- /dev/null +++ b/awsagentprovider/src/test/java/software/amazon/opentelemetry/javaagent/providers/OtlpAwsSpanExporterTest.java @@ -0,0 +1,211 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package software.amazon.opentelemetry.javaagent.providers; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; +import static org.mockito.Mockito.when; + +import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporter; +import io.opentelemetry.exporter.otlp.http.trace.OtlpHttpSpanExporterBuilder; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import java.net.URI; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Supplier; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.junit.jupiter.MockitoExtension; +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkClientException; +import software.amazon.awssdk.http.SdkHttpFullRequest; +import software.amazon.awssdk.http.SdkHttpMethod; +import software.amazon.awssdk.http.auth.aws.signer.AwsV4HttpSigner; +import software.amazon.awssdk.http.auth.spi.signer.SignRequest.Builder; +import software.amazon.awssdk.http.auth.spi.signer.SignedRequest; +import software.amazon.awssdk.identity.spi.AwsCredentialsIdentity; + +@ExtendWith(MockitoExtension.class) +public class OtlpAwsSpanExporterTest { + private static final String XRAY_OTLP_ENDPOINT = "https://xray.us-east-1.amazonaws.com/v1/traces"; + private static final String AUTHORIZATION_HEADER = "Authorization"; + private static final String X_AMZ_DATE_HEADER = "X-Amz-Date"; + private static final String X_AMZ_SECURITY_TOKEN_HEADER = "X-Amz-Security-Token"; + + private static final String EXPECTED_AUTH_HEADER = + "AWS4-HMAC-SHA256 Credential=test_key/some_date/us-east-1/xray/aws4_request"; + private static final String EXPECTED_AUTH_X_AMZ_DATE = "some_date"; + private static final String EXPECTED_AUTH_SECURITY_TOKEN = "test_token"; + + AwsCredentials credentials = AwsBasicCredentials.create("test_access_key", "test_secret_key"); + SignedRequest signedRequest = + SignedRequest.builder() + .request( + SdkHttpFullRequest.builder() + .method(SdkHttpMethod.POST) + .uri(URI.create(XRAY_OTLP_ENDPOINT)) + .putHeader(AUTHORIZATION_HEADER, EXPECTED_AUTH_HEADER) + .putHeader(X_AMZ_DATE_HEADER, EXPECTED_AUTH_X_AMZ_DATE) + .putHeader(X_AMZ_SECURITY_TOKEN_HEADER, EXPECTED_AUTH_SECURITY_TOKEN) + .build()) + .build(); + + private MockedStatic mockDefaultCredentialsProvider; + private MockedStatic mockAwsV4HttpSigner; + private MockedStatic otlpSpanExporterMock; + + @Mock private DefaultCredentialsProvider credentialsProvider; + @Mock private AwsV4HttpSigner signer; + @Mock private OtlpHttpSpanExporterBuilder mockBuilder; + @Mock private OtlpHttpSpanExporter mockExporter; + + private ArgumentCaptor>> headersCaptor; + + @BeforeEach + void setup() { + this.mockDefaultCredentialsProvider = mockStatic(DefaultCredentialsProvider.class); + this.mockDefaultCredentialsProvider + .when(DefaultCredentialsProvider::create) + .thenReturn(credentialsProvider); + + this.mockAwsV4HttpSigner = mockStatic(AwsV4HttpSigner.class); + this.mockAwsV4HttpSigner.when(AwsV4HttpSigner::create).thenReturn(this.signer); + + this.otlpSpanExporterMock = mockStatic(OtlpHttpSpanExporter.class); + + this.headersCaptor = ArgumentCaptor.forClass(Supplier.class); + + when(OtlpHttpSpanExporter.builder()).thenReturn(mockBuilder); + when(this.mockBuilder.setEndpoint(any())).thenReturn(mockBuilder); + when(this.mockBuilder.setHeaders(headersCaptor.capture())).thenReturn(mockBuilder); + when(this.mockBuilder.build()).thenReturn(mockExporter); + when(this.mockExporter.export(any())).thenReturn(CompletableResultCode.ofSuccess()); + } + + @AfterEach + void afterEach() { + reset(this.signer, this.credentialsProvider); + this.mockDefaultCredentialsProvider.close(); + this.mockAwsV4HttpSigner.close(); + this.otlpSpanExporterMock.close(); + } + + @Test + void testAwsSpanExporterAddsSigV4Headers() { + + SpanExporter exporter = new OtlpAwsSpanExporter(XRAY_OTLP_ENDPOINT); + when(this.credentialsProvider.resolveCredentials()).thenReturn(this.credentials); + when(this.signer.sign((Consumer>) any())) + .thenReturn(this.signedRequest); + + exporter.export(List.of()); + + Map headers = this.headersCaptor.getValue().get(); + + assertTrue(headers.containsKey(X_AMZ_DATE_HEADER)); + assertTrue(headers.containsKey(AUTHORIZATION_HEADER)); + assertTrue(headers.containsKey(X_AMZ_SECURITY_TOKEN_HEADER)); + + assertEquals(EXPECTED_AUTH_HEADER, headers.get(AUTHORIZATION_HEADER)); + assertEquals(EXPECTED_AUTH_X_AMZ_DATE, headers.get(X_AMZ_DATE_HEADER)); + assertEquals(EXPECTED_AUTH_SECURITY_TOKEN, headers.get(X_AMZ_SECURITY_TOKEN_HEADER)); + } + + @Test + void testAwsSpanExporterExportCorrectlyAddsDifferentSigV4Headers() { + SpanExporter exporter = new OtlpAwsSpanExporter(XRAY_OTLP_ENDPOINT); + + for (int i = 0; i < 10; i += 1) { + String newAuthHeader = EXPECTED_AUTH_HEADER + i; + String newXAmzDate = EXPECTED_AUTH_X_AMZ_DATE + i; + String newXAmzSecurityToken = EXPECTED_AUTH_SECURITY_TOKEN + i; + + SignedRequest newSignedRequest = + SignedRequest.builder() + .request( + SdkHttpFullRequest.builder() + .method(SdkHttpMethod.POST) + .uri(URI.create(XRAY_OTLP_ENDPOINT)) + .putHeader(AUTHORIZATION_HEADER, newAuthHeader) + .putHeader(X_AMZ_DATE_HEADER, newXAmzDate) + .putHeader(X_AMZ_SECURITY_TOKEN_HEADER, newXAmzSecurityToken) + .build()) + .build(); + + when(this.credentialsProvider.resolveCredentials()).thenReturn(this.credentials); + doReturn(newSignedRequest).when(this.signer).sign(any(Consumer.class)); + + exporter.export(List.of()); + + Map headers = this.headersCaptor.getValue().get(); + + assertTrue(headers.containsKey(X_AMZ_DATE_HEADER)); + assertTrue(headers.containsKey(AUTHORIZATION_HEADER)); + assertTrue(headers.containsKey(X_AMZ_SECURITY_TOKEN_HEADER)); + + assertEquals(newAuthHeader, headers.get(AUTHORIZATION_HEADER)); + assertEquals(newXAmzDate, headers.get(X_AMZ_DATE_HEADER)); + assertEquals(newXAmzSecurityToken, headers.get(X_AMZ_SECURITY_TOKEN_HEADER)); + } + } + + @Test + void testAwsSpanExporterDoesNotAddSigV4HeadersIfFailureToRetrieveCredentials() { + + when(this.credentialsProvider.resolveCredentials()) + .thenThrow(SdkClientException.builder().message("bad credentials").build()); + + SpanExporter exporter = new OtlpAwsSpanExporter(XRAY_OTLP_ENDPOINT); + + exporter.export(List.of()); + + Supplier> headersSupplier = headersCaptor.getValue(); + Map headers = headersSupplier.get(); + + assertFalse(headers.containsKey(X_AMZ_DATE_HEADER)); + assertFalse(headers.containsKey(AUTHORIZATION_HEADER)); + assertFalse(headers.containsKey(X_AMZ_SECURITY_TOKEN_HEADER)); + + verifyNoInteractions(this.signer); + } + + @Test + void testAwsSpanExporterDoesNotAddSigV4HeadersIfFailureToSignHeaders() { + + when(this.credentialsProvider.resolveCredentials()).thenReturn(this.credentials); + when(this.signer.sign((Consumer>) any())) + .thenThrow(SdkClientException.builder().message("bad signature").build()); + + SpanExporter exporter = new OtlpAwsSpanExporter(XRAY_OTLP_ENDPOINT); + + exporter.export(List.of()); + + Map headers = this.headersCaptor.getValue().get(); + + assertFalse(headers.containsKey(X_AMZ_DATE_HEADER)); + assertFalse(headers.containsKey(AUTHORIZATION_HEADER)); + assertFalse(headers.containsKey(X_AMZ_SECURITY_TOKEN_HEADER)); + } +}