diff --git a/.github/workflows/actions/patch_image_and_check_diff/action.yml b/.github/workflows/actions/patch_image_and_check_diff/action.yml index 87af843f0..18cea7a90 100644 --- a/.github/workflows/actions/patch_image_and_check_diff/action.yml +++ b/.github/workflows/actions/patch_image_and_check_diff/action.yml @@ -122,6 +122,20 @@ runs: sleep 10 kubectl wait --for=condition=Ready pod --all -n ${{ inputs.sample-app-namespace }} + - name: Patch the Node ADOT image and restart CloudWatch pods + if: ${{ inputs.repository == 'aws-otel-js-instrumentation' }} + shell: bash + run: | + kubectl patch deploy -namazon-cloudwatch amazon-cloudwatch-observability-controller-manager --type='json' \ + -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/5", "value": "--auto-instrumentation-nodejs-image=${{ inputs.patch-image-arn }}"}]' + kubectl delete pods --all -n amazon-cloudwatch + sleep 10 + kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch + + kubectl delete pods --all -n ${{ inputs.sample-app-namespace }} + sleep 10 + kubectl wait --for=condition=Ready pod --all -n ${{ inputs.sample-app-namespace }} + - name: Patch the CloudWatch Agent image and restart CloudWatch pods if: ${{ inputs.repository == 'amazon-cloudwatch-agent' }} shell: bash @@ -153,7 +167,7 @@ runs: kubectl get pods -n amazon-cloudwatch -l app.kubernetes.io/name=amazon-cloudwatch-observability -o json | \ jq '.items[0].spec.containers[0].args' - - name: Log pod Adot image ID and save image to the environment + - name: Log pod ADOT image ID and save image to the environment id: latest-adot-image shell: bash run: | @@ -193,30 +207,39 @@ runs: echo "FLUENT_BIT_IMAGE"=$(kubectl get pods -n amazon-cloudwatch -l k8s-app=fluent-bit -o json | \ jq '.items[0].status.containerStatuses[0].imageID') >> $GITHUB_OUTPUT - - name: Check if Python Adot image has changed + - name: Check if Python ADOT image has changed if: ${{ inputs.repository == 'aws-otel-python-instrumentation' }} shell: bash run: | if [ ${{ steps.default-adot-image.outputs.DEFAULT_ADOT_IMAGE }} = ${{ steps.latest-adot-image.outputs.LATEST_ADOT_IMAGE }} ]; then - echo "Adot image did not change" + echo "ADOT image did not change" exit 1 fi - - name: Check if Java Adot image has changed + - name: Check if Java ADOT image has changed if: ${{ inputs.repository == 'aws-otel-java-instrumentation' }} shell: bash run: | if [ ${{ steps.default-adot-image.outputs.DEFAULT_ADOT_IMAGE }} = ${{ steps.latest-adot-image.outputs.LATEST_ADOT_IMAGE }} ]; then - echo "Adot image did not change" + echo "ADOT image did not change" exit 1 fi - - name: Check if DotNet Adot image has changed + - name: Check if DotNet ADOT image has changed if: ${{ inputs.repository == 'aws-otel-dotnet-instrumentation' }} shell: bash run: | if [ ${{ steps.default-adot-image.outputs.DEFAULT_ADOT_IMAGE }} = ${{ steps.latest-adot-image.outputs.LATEST_ADOT_IMAGE }} ]; then - echo "Adot image did not change" + echo "ADOT image did not change" + exit 1 + fi + + - name: Check if Node ADOT image has changed + if: ${{ inputs.repository == 'aws-otel-js-instrumentation' }} + shell: bash + run: | + if [ ${{ steps.default-adot-image.outputs.DEFAULT_ADOT_IMAGE }} = ${{ steps.latest-adot-image.outputs.LATEST_ADOT_IMAGE }} ]; then + echo "ADOT image did not change" exit 1 fi diff --git a/.github/workflows/node-ec2-asg-test.yml b/.github/workflows/node-ec2-asg-test.yml index ad5bf3b5a..ccfb5f69e 100644 --- a/.github/workflows/node-ec2-asg-test.yml +++ b/.github/workflows/node-ec2-asg-test.yml @@ -16,7 +16,7 @@ on: type: string staging-instrumentation-name: required: false - default: 'aws-opentelemetry-distro' + default: '@aws/aws-distro-opentelemetry-node-autoinstrumentation' type: string outputs: job-started: @@ -92,20 +92,13 @@ jobs: role-to-assume: arn:aws:iam::${{ env.ACCOUNT_ID }}:role/${{ env.E2E_TEST_ROLE_NAME }} aws-region: ${{ env.E2E_TEST_AWS_REGION }} - # TODO: Remove the following step once release testing is ready - - name: TEMPORARY TEST CONFIGS - run: | - echo ADOT_INSTRUMENTATION_NAME="aws-aws-distro-opentelemetry-node-autoinstrumentation-0.0.1.tgz" >> $GITHUB_ENV - - name: Set Get ADOT Instrumentation command environment variable run: | - echo GET_ADOT_INSTRUMENTATION_COMMAND="aws s3 cp s3://adot-autoinstrumentation-node-staging/${{ env.ADOT_INSTRUMENTATION_NAME }} ./${{ env.ADOT_INSTRUMENTATION_NAME }} && npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV - # TODO: Reintroduce release testing logic when artifacts are ready - # if [ "${{ github.event.repository.name }}" = "aws-otel-js-instrumentation" ]; then - # echo GET_ADOT_INSTRUMENTATION_COMMAND="aws s3 cp s3://adot-autoinstrumentation-node-staging/${{ env.ADOT_INSTRUMENTATION_NAME }} ./${{ env.ADOT_INSTRUMENTATION_NAME }} && npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV - # else - # echo GET_ADOT_INSTRUMENTATION_COMMAND="npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV - # fi + if [ "${{ github.event.repository.name }}" = "aws-otel-js-instrumentation" ]; then + echo GET_ADOT_INSTRUMENTATION_COMMAND="aws s3 cp s3://adot-autoinstrumentation-node-staging/${{ env.ADOT_INSTRUMENTATION_NAME }} ./${{ env.ADOT_INSTRUMENTATION_NAME }} && npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV + else + echo GET_ADOT_INSTRUMENTATION_COMMAND="npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV + fi - name: Set Get CW Agent command environment variable run: | diff --git a/terraform/node/k8s/deploy/main.tf b/terraform/node/k8s/deploy/main.tf index 787f15f35..1843fe135 100644 --- a/terraform/node/k8s/deploy/main.tf +++ b/terraform/node/k8s/deploy/main.tf @@ -36,11 +36,15 @@ resource "null_resource" "deploy" { [ ! -e remote-service-depl.yaml ] || rm remote-service-depl.yaml # Clone and install operator onto cluster - echo "LOG: Cloning helm-charts repo" - # TODO: Update to latest release version of the code - git clone https://github.com/aws-observability/helm-charts -q - cd helm-charts/charts/amazon-cloudwatch-observability/ - git reset --hard b407a9545a03e377703ef965a2ee655aa6df7406 + echo "LOG: Getting latest helm chart release URL" + latest_version_url=$(curl -s https://api.github.com/repos/aws-observability/helm-charts/releases/latest | grep "tarball_url" | cut -d '"' -f 4) + echo "LOG: The latest helm chart version url is $latest_version_url" + + echo "LOG: Downloading and unpacking the helm chart repo" + curl -L $latest_version_url -o aws-observability-helm-charts-latest.tar.gz + mkdir helm-charts + tar -xvzf aws-observability-helm-charts-latest.tar.gz -C helm-charts + cd helm-charts/aws-observability-helm-charts*/charts/amazon-cloudwatch-observability echo "LOG: Installing CloudWatch Agent Operator using Helm" helm upgrade --install --debug --namespace amazon-cloudwatch amazon-cloudwatch-operator ./ --create-namespace --set region=${var.aws_region} --set clusterName=k8s-cluster-${var.test_id} @@ -88,7 +92,7 @@ resource "null_resource" "deploy" { kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch elif [ "${var.repository}" = "aws-otel-js-instrumentation" ]; then kubectl patch deploy -n amazon-cloudwatch amazon-cloudwatch-observability-controller-manager --type='json' \ - -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/3", "value": "--auto-instrumentation-nodejs-image=${var.patch_image_arn}"}]' + -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/args/5", "value": "--auto-instrumentation-nodejs-image=${var.patch_image_arn}"}]' kubectl delete pods --all -n amazon-cloudwatch sleep 10 kubectl wait --for=condition=Ready pod --all -n amazon-cloudwatch @@ -121,7 +125,7 @@ resource "null_resource" "deploy" { aws s3api get-object --bucket aws-appsignals-sample-app-prod-us-east-1 --key remote-service-depl-${var.test_id}.yaml remote-service-depl.yaml # Patch the staging image if this is running as part of release testing - if [ "${var.repository}" = "aws-otel-java-instrumentation" ]; then + if [ "${var.repository}" = "aws-otel-js-instrumentation" ]; then RELEASE_TESTING_SECRET_NAME=release-testing-ecr-secret kubectl delete secret -n sample-app-namespace --ignore-not-found $RELEASE_TESTING_SECRET_NAME kubectl create secret -n sample-app-namespace docker-registry $RELEASE_TESTING_SECRET_NAME \