Skip to content

Commit 896c8a2

Browse files
authored
[Genesis] Release Testing - Workflow Changes (#426)
This PR is part 2 of the release testing changes to support the incoming release of the latest Genesis/Agentic AI Observability changes done in ADOT Python which contains all of the changes for adding the release workflow including sample app, terraform files, and GitHub workflow. *Description of changes:* - Added `python-ec2-genai-test.yml` workflow for automated GenAI service testing which contains a step for generating a random X-Ray trace ID and W3c trace ID to validate trace context propagation and Gen AI log event + span linking - Added a simple Python GenAI service `server.py` created with LangChain and instrumented with LangChain instrumentor and ADOT Python. - Added generate_traffic.sh script for generating traffic to hit the `/ai-chat` endpoint to generate traces `server.py` - `Logs`: Validates Gen AI log event appears in in CloudWatch - `Traces`: Validates trace propagation and Gen AI log event + span linking - `Metrics`: Validates that OTel metrics emitted by the sample application are converted to EMF metrics and are able to be published to CloudWatch Successful run: https://github.com/aws-observability/aws-application-signals-test-framework/actions/runs/16183070365/job/45683540103 By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license.
1 parent 87a11c0 commit 896c8a2

File tree

5 files changed

+479
-0
lines changed

5 files changed

+479
-0
lines changed
Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
## SPDX-License-Identifier: Apache-2.0
3+
4+
name: Python EC2 Gen AI Use Case
5+
on:
6+
workflow_dispatch: # be able to run the workflow on demand
7+
8+
workflow_call:
9+
inputs:
10+
caller-workflow-name:
11+
required: true
12+
type: string
13+
staging-wheel-name:
14+
required: false
15+
default: 'aws-opentelemetry-distro'
16+
type: string
17+
18+
permissions:
19+
id-token: write
20+
contents: read
21+
22+
env:
23+
E2E_TEST_AWS_REGION: 'us-west-2'
24+
E2E_TEST_ACCOUNT_ID: ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ACCOUNT_ID }}
25+
E2E_TEST_ROLE_NAME: ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ROLE_NAME }}
26+
ADOT_WHEEL_NAME: ${{ inputs.staging-wheel-name }}
27+
METRIC_NAMESPACE: genesis
28+
LOG_GROUP_NAME: test/genesis
29+
TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE}
30+
SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/python-genai-sample-app.zip
31+
32+
jobs:
33+
python-ec2-adot-genai:
34+
runs-on: ubuntu-latest
35+
timeout-minutes: 30
36+
steps:
37+
- uses: actions/checkout@v4
38+
39+
- name: Set Get ADOT Wheel command environment variable
40+
run: |
41+
if [ "${{ github.event.repository.name }}" = "aws-otel-python-instrumentation" ]; then
42+
# Reusing the adot-main-build-staging-jar bucket to store the python wheel file
43+
echo GET_ADOT_WHEEL_COMMAND="aws s3 cp s3://adot-main-build-staging-jar/${{ env.ADOT_WHEEL_NAME }} ./${{ env.ADOT_WHEEL_NAME }} && python3.12 -m pip install ${{ env.ADOT_WHEEL_NAME }}" >> $GITHUB_ENV
44+
elif [ "${{ env.OTEL_SOURCE }}" == "pypi" ]; then
45+
echo GET_ADOT_WHEEL_COMMAND="python3.12 -m pip install ${{ env.ADOT_WHEEL_NAME }}" >> $GITHUB_ENV
46+
else
47+
latest_release_version=$(curl -sL https://github.com/aws-observability/aws-otel-python-instrumentation/releases/latest | grep -oP '/releases/tag/v\K[0-9]+\.[0-9]+\.[0-9]+' | head -n 1)
48+
echo "The latest version is $latest_release_version"
49+
echo GET_ADOT_WHEEL_COMMAND="wget -O ${{ env.ADOT_WHEEL_NAME }} https://github.com/aws-observability/aws-otel-python-instrumentation/releases/latest/download/aws_opentelemetry_distro-$latest_release_version-py3-none-any.whl \
50+
&& python3.12 -m pip install ${{ env.ADOT_WHEEL_NAME }}" >> $GITHUB_ENV
51+
fi
52+
53+
- name: Initiate Gradlew Daemon
54+
uses: ./.github/workflows/actions/execute_and_retry
55+
continue-on-error: true
56+
with:
57+
command: "./gradlew :validator:build"
58+
cleanup: "./gradlew clean"
59+
max_retry: 3
60+
sleep_time: 60
61+
62+
- name: Generate testing id
63+
run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}-${RANDOM}" >> $GITHUB_ENV
64+
65+
- name: Generate XRay and W3C trace ID
66+
run: |
67+
ID_1="$(printf '%08x' $(date +%s))"
68+
ID_2="$(openssl rand -hex 12)"
69+
W3C_TRACE_ID="${ID_1}${ID_2}"
70+
XRAY_TRACE_ID="1-${ID_1}-${ID_2}"
71+
PARENT_ID="$(openssl rand -hex 8)"
72+
TRACE_ID_HEADER="Root=${XRAY_TRACE_ID};Parent=${PARENT_ID};Sampled=1"
73+
echo "XRAY_TRACE_ID=${XRAY_TRACE_ID}" >> $GITHUB_ENV
74+
echo "W3C_TRACE_ID=${W3C_TRACE_ID}" >> $GITHUB_ENV
75+
echo "TRACE_ID_HEADER=${TRACE_ID_HEADER}" >> $GITHUB_ENV
76+
echo "Generated XRay Trace ID: ${XRAY_TRACE_ID}"
77+
echo "Generated W3C Trace ID: ${W3C_TRACE_ID}"
78+
echo "Generated Trace ID Header: ${TRACE_ID_HEADER}"
79+
80+
- name: Configure AWS Credentials
81+
uses: aws-actions/configure-aws-credentials@v4
82+
with:
83+
role-to-assume: arn:aws:iam::${{ env.E2E_TEST_ACCOUNT_ID }}:role/${{ env.E2E_TEST_ROLE_NAME }}
84+
aws-region: ${{ env.E2E_TEST_AWS_REGION }}
85+
86+
- name: Set up terraform
87+
uses: ./.github/workflows/actions/execute_and_retry
88+
with:
89+
command: "wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg"
90+
post-command: 'echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
91+
&& sudo apt update && sudo apt install terraform'
92+
93+
- name: Initiate Terraform
94+
uses: ./.github/workflows/actions/execute_and_retry
95+
with:
96+
command: "cd ${{ env.TEST_RESOURCES_FOLDER }}/terraform/python/ec2/adot-genai && terraform init && terraform validate"
97+
cleanup: "rm -rf .terraform && rm -rf .terraform.lock.hcl"
98+
max_retry: 6
99+
100+
- name: Deploy service via terraform
101+
working-directory: terraform/python/ec2/adot-genai
102+
run: |
103+
terraform apply -auto-approve \
104+
-var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
105+
-var="test_id=${{ env.TESTING_ID }}" \
106+
-var="service_zip_url=${{ env.SAMPLE_APP_ZIP }}" \
107+
-var="trace_id=${{ env.TRACE_ID_HEADER }}" \
108+
-var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" \
109+
110+
111+
- name: Get deployment info
112+
working-directory: terraform/python/ec2/adot-genai
113+
run: |
114+
echo "INSTANCE_IP=$(terraform output langchain_service_public_ip)" >> $GITHUB_ENV
115+
echo "INSTANCE_ID=$(terraform output langchain_service_instance_id)" >> $GITHUB_ENV
116+
117+
- name: Waiting 5 Minutes for Gen AI service to be ready and emit logs, traces, and metrics
118+
run: sleep 300
119+
120+
- name: Validate generated logs
121+
run: ./gradlew validator:run --args='-c python/ec2/adot-genai/log-validation.yml
122+
--testing-id ${{ env.TESTING_ID }}
123+
--endpoint http://${{ env.INSTANCE_IP }}:8000
124+
--region ${{ env.E2E_TEST_AWS_REGION }}
125+
--metric-namespace ${{ env.METRIC_NAMESPACE }}
126+
--log-group ${{ env.LOG_GROUP_NAME }}
127+
--service-name langchain-traceloop-app
128+
--instance-id ${{ env.INSTANCE_ID }}
129+
--trace-id ${{ env.W3C_TRACE_ID }}'
130+
131+
- name: Validate generated traces
132+
if: (success() || failure()) && !cancelled()
133+
run: ./gradlew validator:run --args='-c python/ec2/adot-genai/trace-validation.yml
134+
--testing-id ${{ env.TESTING_ID }}
135+
--endpoint http://${{ env.INSTANCE_IP }}:8000
136+
--region ${{ env.E2E_TEST_AWS_REGION }}
137+
--metric-namespace ${{ env.METRIC_NAMESPACE }}
138+
--service-name langchain-traceloop-app
139+
--instance-id ${{ env.INSTANCE_ID }}
140+
--trace-id ${{ env.XRAY_TRACE_ID }}'
141+
142+
- name: Validate generated metrics
143+
if: (success() || failure()) && !cancelled()
144+
run: ./gradlew validator:run --args='-c python/ec2/adot-genai/metric-validation.yml
145+
--testing-id ${{ env.TESTING_ID }}
146+
--endpoint http://${{ env.INSTANCE_IP }}:8000
147+
--region ${{ env.E2E_TEST_AWS_REGION }}
148+
--metric-namespace ${{ env.METRIC_NAMESPACE }}
149+
--log-group ${{ env.LOG_GROUP_NAME }}
150+
--service-name langchain-traceloop-app
151+
--instance-id ${{ env.INSTANCE_ID }}'
152+
153+
- name: Cleanup
154+
if: always()
155+
continue-on-error: true
156+
working-directory: terraform/python/ec2/adot-genai
157+
run: |
158+
terraform destroy -auto-approve \
159+
-var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
160+
-var="test_id=${{ env.TESTING_ID }}" \
161+
-var="service_zip_url=${{ env.SAMPLE_APP_ZIP }}" \
162+
-var="trace_id=${{ env.TRACE_ID_HEADER }}" \
163+
-var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}"
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
langchain
2+
langchain-community
3+
langchain_aws
4+
opentelemetry-sdk
5+
openinference-instrumentation-langchain
6+
opentelemetry-api
7+
opentelemetry-semantic-conventions
8+
python-dotenv
9+
openlit
10+
botocore
11+
setuptools
12+
boto3
13+
aws_opentelemetry_distro
14+
fastapi
15+
uvicorn[standard]
Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
import os
2+
from typing import Dict, List
3+
from dotenv import load_dotenv
4+
from fastapi import FastAPI, HTTPException
5+
from pydantic import BaseModel
6+
from langchain_aws import ChatBedrock
7+
from langchain.prompts import ChatPromptTemplate
8+
from langchain.chains import LLMChain
9+
from opentelemetry import trace
10+
from opentelemetry.sdk.trace import TracerProvider
11+
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
12+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
13+
from openinference.instrumentation.langchain import LangChainInstrumentor
14+
15+
# Load environment variables
16+
load_dotenv()
17+
18+
# Set up OpenTelemetry with BOTH exporters
19+
tracer_provider = TracerProvider()
20+
21+
# Add Console exporter
22+
console_exporter = ConsoleSpanExporter()
23+
console_processor = BatchSpanProcessor(console_exporter)
24+
tracer_provider.add_span_processor(console_processor)
25+
26+
# Add OTLP exporter
27+
otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4318/v1/traces")
28+
otlp_processor = BatchSpanProcessor(otlp_exporter)
29+
tracer_provider.add_span_processor(otlp_processor)
30+
31+
# Set as global provider
32+
trace.set_tracer_provider(tracer_provider)
33+
34+
# Instrument LangChain with OpenInference
35+
LangChainInstrumentor().instrument(tracer_provider=tracer_provider)
36+
37+
# Initialize FastAPI app
38+
app = FastAPI(title="LangChain Bedrock OpenInference API", version="1.0.0")
39+
40+
# Initialize the LLM with AWS Bedrock
41+
llm = ChatBedrock(
42+
model_id="anthropic.claude-3-haiku-20240307-v1:0",
43+
model_kwargs={
44+
"temperature": 0.7,
45+
"max_tokens": 500
46+
},
47+
region_name=os.getenv("AWS_DEFAULT_REGION", "us-west-2")
48+
)
49+
50+
# Create a prompt template
51+
prompt = ChatPromptTemplate.from_template(
52+
"You are a helpful assistant. The user says: {input}. Provide a helpful response."
53+
)
54+
55+
# Create a chain
56+
chain = LLMChain(llm=llm, prompt=prompt)
57+
58+
# Request models
59+
class ChatRequest(BaseModel):
60+
message: str
61+
62+
class BatchChatRequest(BaseModel):
63+
messages: List[str]
64+
65+
class ChatResponse(BaseModel):
66+
response: str
67+
68+
class BatchChatResponse(BaseModel):
69+
responses: List[Dict[str, str]]
70+
71+
# Sample prompts for testing
72+
SAMPLE_PROMPTS = [
73+
"What is the capital of France?",
74+
"How do I make a cup of coffee?",
75+
"What are the benefits of exercise?",
76+
"Explain quantum computing in simple terms",
77+
"What's the best way to learn programming?"
78+
]
79+
80+
@app.get("/")
81+
async def root():
82+
return {
83+
"message": "LangChain Bedrock OpenInference API is running!",
84+
"endpoints": {
85+
"/ai-chat": "Single message chat endpoint",
86+
"/hello": "Simple hello endpoint"
87+
}
88+
}
89+
90+
@app.post("/ai-chat", response_model=ChatResponse)
91+
async def chat(request: ChatRequest):
92+
"""
93+
Chat endpoint that processes a single user message through AWS Bedrock
94+
"""
95+
try:
96+
# Process the input through the chain
97+
result = await chain.ainvoke({"input": request.message})
98+
return ChatResponse(response=result["text"])
99+
except Exception as e:
100+
raise HTTPException(status_code=500, detail=str(e))
101+
102+
@app.get("/health")
103+
async def health():
104+
"""Health check endpoint"""
105+
return {"status": "healthy", "llm": "AWS Bedrock Claude 3 Haiku"}
106+
107+
if __name__ == "__main__":
108+
import uvicorn
109+
print("Starting FastAPI server with AWS Bedrock and OpenInference instrumentation...")
110+
print("Make sure AWS credentials are configured")
111+
print("Server will run on http://localhost:8000")
112+
print("API docs available at http://localhost:8000/docs")
113+
114+
uvicorn.run(app, host="0.0.0.0", port=8000)

0 commit comments

Comments
 (0)