1+ # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+ # # SPDX-License-Identifier: Apache-2.0
3+
4+ # This is a reusable workflow for running the Python Enablement Canary test for Application Signals.
5+ # It is meant to be called from another workflow.
6+ # Read more about reusable workflows: https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview
7+ name : Python EC2 ADOT SigV4 (Stand-Alone ADOT) Use Case
8+ on :
9+ workflow_call :
10+ inputs :
11+ caller-workflow-name :
12+ required : true
13+ type : string
14+ python-version :
15+ description : " Currently support version 3.8, 3.9, 3.10, 3.11, 3.12"
16+ required : false
17+ type : string
18+ default : ' 3.9'
19+ cpu-architecture :
20+ description : " Permitted values: x86_64 or arm64"
21+ required : false
22+ type : string
23+ default : " x86_64"
24+ staging-wheel-name :
25+ required : false
26+ default : ' aws-opentelemetry-distro'
27+ type : string
28+ outputs :
29+ job-started :
30+ value : ${{ jobs.python-ec2-adot-sigv4.outputs.job-started }}
31+ validation-result :
32+ value : ${{ jobs.python-ec2-adot-sigv4.outputs.validation-result }}
33+
34+ permissions :
35+ id-token : write
36+ contents : read
37+
38+ env :
39+ E2E_TEST_AWS_REGION : ' us-west-2'
40+ CALLER_WORKFLOW_NAME : ${{ inputs.caller-workflow-name }}
41+ PYTHON_VERSION : ${{ inputs.python-version }}
42+ CPU_ARCHITECTURE : ${{ inputs.cpu-architecture }}
43+ ADOT_WHEEL_NAME : ${{ inputs.staging-wheel-name }}
44+ E2E_TEST_ACCOUNT_ID : ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ACCOUNT_ID }}
45+ E2E_TEST_ROLE_NAME : ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ROLE_NAME }}
46+ METRIC_NAMESPACE : ApplicationSignals
47+ LOG_GROUP_NAME : aws/spans
48+ TEST_RESOURCES_FOLDER : ${GITHUB_WORKSPACE}
49+
50+ jobs :
51+ python-ec2-adot-sigv4 :
52+ runs-on : ubuntu-latest
53+ timeout-minutes : 30
54+ outputs :
55+ job-started : ${{ steps.job-started.outputs.job-started }}
56+ validation-result : ${{ steps.validation-result.outputs.validation-result }}
57+ steps :
58+ - name : Check if the job started
59+ id : job-started
60+ run : echo "job-started=true" >> $GITHUB_OUTPUT
61+
62+ - uses : actions/checkout@v4
63+ with :
64+ repository : ' aws-observability/aws-application-signals-test-framework'
65+ ref : ${{ inputs.caller-workflow-name == 'main-build' && 'main' || github.ref }}
66+ fetch-depth : 0
67+
68+ # We initialize Gradlew Daemon early on during the workflow because sometimes initialization
69+ # fails due to transient issues. If it fails here, then we will try again later before the validators
70+ - name : Initiate Gradlew Daemon
71+ id : initiate-gradlew
72+ uses : ./.github/workflows/actions/execute_and_retry
73+ continue-on-error : true
74+ with :
75+ command : " ./gradlew :validator:build"
76+ cleanup : " ./gradlew clean"
77+ max_retry : 3
78+ sleep_time : 60
79+
80+ - name : Generate testing id
81+ run : echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}-${RANDOM}" >> $GITHUB_ENV
82+
83+ - name : Refresh AWS Credentials
84+ uses : aws-actions/configure-aws-credentials@v4
85+ with :
86+ role-to-assume : arn:aws:iam::${{ env.E2E_TEST_ACCOUNT_ID }}:role/${{ env.E2E_TEST_ROLE_NAME }}
87+ aws-region : ${{ env.E2E_TEST_AWS_REGION }}
88+
89+ # if [ "${{ github.event.repository.name }}" = "aws-otel-python-instrumentation" ]; then
90+ # # Reusing the adot-main-build-staging-jar bucket to store the python wheel file
91+ # echo GET_ADOT_WHEEL_COMMAND="aws s3 cp s3://adot-main-build-staging-jar/${{ env.ADOT_WHEEL_NAME }} ./${{ env.ADOT_WHEEL_NAME }} && sudo python${{ env.PYTHON_VERSION }} -m pip install ${{ env.ADOT_WHEEL_NAME }}" >> $GITHUB_ENV
92+ # else
93+ # latest_release_version=$(curl -sL https://github.com/aws-observability/aws-otel-python-instrumentation/releases/latest | grep -oP '/releases/tag/v\K[0-9]+\.[0-9]+\.[0-9]+' | head -n 1)
94+ # echo "The latest version is $latest_release_version"
95+ # echo GET_ADOT_WHEEL_COMMAND="wget -O ${{ env.ADOT_WHEEL_NAME }} https://github.com/aws-observability/aws-otel-python-instrumentation/releases/latest/download/aws_opentelemetry_distro-$latest_release_version-py3-none-any.whl \
96+ # && sudo python${{ env.PYTHON_VERSION }} -m pip install ${{ env.ADOT_WHEEL_NAME }}" >> $GITHUB_ENV
97+ # fi
98+ - name : Set Get ADOT Wheel command environment variable
99+ run : |
100+ echo GET_ADOT_WHEEL_COMMAND="aws s3 cp s3://adot-main-build-staging-jar/${{ env.ADOT_WHEEL_NAME }} ./${{ env.ADOT_WHEEL_NAME }} && sudo python${{ env.PYTHON_VERSION }} -m pip install ${{ env.ADOT_WHEEL_NAME }}" >> $GITHUB_ENV
101+
102+ - name : Set up terraform
103+ uses : ./.github/workflows/actions/execute_and_retry
104+ with :
105+ command : " wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg"
106+ post-command : ' echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
107+ && sudo apt update && sudo apt install terraform'
108+ sleep_time : 60
109+
110+ - name : Initiate Terraform
111+ uses : ./.github/workflows/actions/execute_and_retry
112+ with :
113+ command : " cd ${{ env.TEST_RESOURCES_FOLDER }}/terraform/python/ec2/adot-sigv4 && terraform init && terraform validate"
114+ cleanup : " rm -rf .terraform && rm -rf .terraform.lock.hcl"
115+ max_retry : 6
116+ sleep_time : 60
117+
118+ - name : Deploy sample app via terraform and wait for endpoint to come online
119+ working-directory : terraform/python/ec2/adot-sigv4
120+ run : |
121+ # Attempt to deploy the sample app on an EC2 instance and wait for its endpoint to come online.
122+ # There may be occasional failures due to transitivity issues, so try up to 2 times.
123+ # deployment_failed of 0 indicates that both the terraform deployment and the endpoint are running, while 1 indicates
124+ # that it failed at some point
125+ retry_counter=0
126+ max_retry=2
127+ while [ $retry_counter -lt $max_retry ]; do
128+ echo "Attempt $retry_counter"
129+ deployment_failed=0
130+ terraform apply -auto-approve \
131+ -var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
132+ -var="test_id=${{ env.TESTING_ID }}" \
133+ -var="sample_app_zip=s3://aws-appsignals-sample-app-prod-us-east-1/python-sample-app.zip" \
134+ -var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" \
135+ -var="language_version=${{ env.PYTHON_VERSION }}" \
136+ -var="cpu_architecture=${{ env.CPU_ARCHITECTURE }}" \
137+ || deployment_failed=$?
138+
139+ if [ $deployment_failed -eq 1 ]; then
140+ echo "Terraform deployment was unsuccessful. Will attempt to retry deployment."
141+ fi
142+
143+ # If the success is 1 then either the terraform deployment or the endpoint connection failed, so first destroy the
144+ # resources created from terraform and try again.
145+ if [ $deployment_failed -eq 1 ]; then
146+ echo "Destroying terraform"
147+ terraform destroy -auto-approve \
148+ -var="test_id=${{ env.TESTING_ID }}"
149+
150+ retry_counter=$(($retry_counter+1))
151+ else
152+ # If deployment succeeded, then exit the loop
153+ break
154+ fi
155+
156+ if [ $retry_counter -eq $max_retry ]; then
157+ echo "Max retry reached, failed to deploy terraform and connect to the endpoint. Exiting code"
158+ exit 1
159+ fi
160+ done
161+
162+ - name : Get the ec2 instance ami id
163+ run : |
164+ echo "EC2_INSTANCE_AMI=$(terraform output ec2_instance_ami)" >> $GITHUB_ENV
165+ working-directory : terraform/python/ec2/adot-sigv4
166+
167+ - name : Get the sample app endpoint
168+ run : |
169+ echo "REMOTE_SERVICE_IP=$(terraform output sample_app_remote_service_private_ip)" >> $GITHUB_ENV
170+ echo "MAIN_SERVICE_INSTANCE_ID=$(terraform output main_service_instance_id)" >> $GITHUB_ENV
171+ working-directory : terraform/python/ec2/adot-sigv4
172+
173+ - name : Initiate Gradlew Daemon
174+ if : steps.initiate-gradlew == 'failure'
175+ uses : ./.github/workflows/actions/execute_and_retry
176+ continue-on-error : true
177+ with :
178+ command : " ./gradlew :validator:build"
179+ cleanup : " ./gradlew clean"
180+ max_retry : 3
181+ sleep_time : 60
182+
183+ # Validation for pulse telemetry data
184+ - name : Validate generated EMF logs
185+ id : log-validation
186+ run : ./gradlew validator:run --args='-c python/ec2/adot-sigv4/log-validation.yml
187+ --testing-id ${{ env.TESTING_ID }}
188+ --endpoint http://localhost:8000
189+ --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001
190+ --region ${{ env.E2E_TEST_AWS_REGION }}
191+ --metric-namespace ${{ env.METRIC_NAMESPACE }}
192+ --log-group ${{ env.LOG_GROUP_NAME }}
193+ --service-name python-sample-application-${{ env.TESTING_ID }}
194+ --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }}
195+ --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}
196+ --instance-ami ${{ env.EC2_INSTANCE_AMI }}
197+ --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }}
198+ --rollup'
199+
200+ - name : Validate generated metrics
201+ id : metric-validation
202+ if : (success() || steps.log-validation.outcome == 'failure') && !cancelled()
203+ run : ./gradlew validator:run --args='-c python/ec2/adot-sigv4/metric-validation.yml
204+ --testing-id ${{ env.TESTING_ID }}
205+ --endpoint http://localhost:8000
206+ --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001
207+ --region ${{ env.E2E_TEST_AWS_REGION }}
208+ --metric-namespace ${{ env.METRIC_NAMESPACE }}
209+ --log-group ${{ env.LOG_GROUP_NAME }}
210+ --service-name python-sample-application-${{ env.TESTING_ID }}
211+ --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }}
212+ --query-string ip=${{ env.REMOTE_SERVICE_IP }}
213+ --instance-ami ${{ env.EC2_INSTANCE_AMI }}
214+ --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }}
215+ --rollup'
216+
217+ - name : Validate generated traces
218+ id : trace-validation
219+ if : (success() || steps.log-validation.outcome == 'failure' || steps.metric-validation.outcome == 'failure') && !cancelled()
220+ run : ./gradlew validator:run --args='-c python/ec2/adot-sigv4/trace-validation.yml
221+ --testing-id ${{ env.TESTING_ID }}
222+ --endpoint http://localhost:8000
223+ --remote-service-deployment-name ${{ env.REMOTE_SERVICE_IP }}:8001
224+ --region ${{ env.E2E_TEST_AWS_REGION }}
225+ --account-id ${{ env.E2E_TEST_ACCOUNT_ID }}
226+ --metric-namespace ${{ env.METRIC_NAMESPACE }}
227+ --log-group ${{ env.LOG_GROUP_NAME }}
228+ --service-name python-sample-application-${{ env.TESTING_ID }}
229+ --remote-service-name python-sample-remote-application-${{ env.TESTING_ID }}
230+ --query-string ip=${{ env.REMOTE_SERVICE_IP }}&testingId=${{ env.TESTING_ID }}
231+ --instance-ami ${{ env.EC2_INSTANCE_AMI }}
232+ --instance-id ${{ env.MAIN_SERVICE_INSTANCE_ID }}
233+ --rollup'
234+
235+ - name : Refresh AWS Credentials
236+ uses : aws-actions/configure-aws-credentials@v4
237+ with :
238+ role-to-assume : arn:aws:iam::${{ env.E2E_TEST_ACCOUNT_ID }}:role/${{ env.E2E_TEST_ROLE_NAME }}
239+ aws-region : ${{ env.E2E_TEST_AWS_REGION }}
240+
241+ - name : Save test results
242+ if : always()
243+ id : validation-result
244+ run : |
245+ if [ "${{ steps.log-validation.outcome }}" = "success" ] && [ "${{ steps.metric-validation.outcome }}" = "success" ] && [ "${{ steps.trace-validation.outcome }}" = "success" ]; then
246+ echo "validation-result=success" >> $GITHUB_OUTPUT
247+ else
248+ echo "validation-result=failure" >> $GITHUB_OUTPUT
249+ fi
250+
251+ # Clean up Procedures
252+ - name : Terraform destroy
253+ if : always()
254+ continue-on-error : true
255+ working-directory : terraform/python/ec2/adot-sigv4
256+ run : |
257+ terraform destroy -auto-approve \
258+ -var="test_id=${{ env.TESTING_ID }}"
0 commit comments