Skip to content

Commit fea6e35

Browse files
committed
add node gen ai release test
1 parent 19c187a commit fea6e35

File tree

15 files changed

+10023
-1
lines changed

15 files changed

+10023
-1
lines changed
Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,162 @@
1+
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
## SPDX-License-Identifier: Apache-2.0
3+
4+
name: Node EC2 Genesis Use Case
5+
on:
6+
workflow_dispatch: # be able to run the workflow on demand
7+
push:
8+
branches:
9+
- genesis-release-test
10+
workflow_call:
11+
inputs:
12+
caller-workflow-name:
13+
required: true
14+
type: string
15+
staging-wheel-name:
16+
required: false
17+
default: '@aws/aws-distro-opentelemetry-node-autoinstrumentation'
18+
type: string
19+
20+
permissions:
21+
id-token: write
22+
contents: read
23+
24+
env:
25+
E2E_TEST_AWS_REGION: 'us-west-2'
26+
E2E_TEST_ACCOUNT_ID: ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ACCOUNT_ID }}
27+
E2E_TEST_ROLE_NAME: ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ROLE_NAME }}
28+
ADOT_WHEEL_NAME: ${{ inputs.staging-wheel-name }}
29+
METRIC_NAMESPACE: genesis
30+
LOG_GROUP_NAME: test/genesis
31+
TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE}
32+
SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/node-sample-app.zip
33+
34+
jobs:
35+
python-ec2-adot-genai:
36+
runs-on: ubuntu-latest
37+
timeout-minutes: 30
38+
steps:
39+
- uses: actions/checkout@v4
40+
with:
41+
repository: 'aws-observability/aws-application-signals-test-framework'
42+
ref: ${{ inputs.caller-workflow-name == 'main-build' && 'main' || github.ref }}
43+
fetch-depth: 0
44+
45+
- name: Set Get ADOT Instrumentation command environment variable
46+
run: echo GET_ADOT_INSTRUMENTATION_COMMAND="aws s3 cp s3://adot-genai-js-test/aws-aws-distro-opentelemetry-node-autoinstrumentation-0.6.0-dev0.tar ./adot-instrumentation.tar --region us-east-1 && npm install ./adot-instrumentation.tar" >> $GITHUB_ENV
47+
# if [ "${{ github.event.repository.name }}" = "aws-otel-js-instrumentation" ]; then
48+
# echo GET_ADOT_INSTRUMENTATION_COMMAND="aws s3 cp s3://adot-autoinstrumentation-node-staging/${{ env.ADOT_INSTRUMENTATION_NAME }} ./${{ env.ADOT_INSTRUMENTATION_NAME }} --region us-east-1 && npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV
49+
# else
50+
# echo GET_ADOT_INSTRUMENTATION_COMMAND="npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV
51+
# fi
52+
53+
- name: Initiate Gradlew Daemon
54+
uses: ./.github/workflows/actions/execute_and_retry
55+
continue-on-error: true
56+
with:
57+
command: "./gradlew :validator:build"
58+
cleanup: "./gradlew clean"
59+
max_retry: 3
60+
sleep_time: 60
61+
62+
- name: Generate testing id
63+
run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}-${RANDOM}" >> $GITHUB_ENV
64+
65+
- name: Generate XRay and W3C trace ID
66+
run: |
67+
ID_1="$(printf '%08x' $(date +%s))"
68+
ID_2="$(openssl rand -hex 12)"
69+
W3C_TRACE_ID="${ID_1}${ID_2}"
70+
XRAY_TRACE_ID="1-${ID_1}-${ID_2}"
71+
PARENT_ID="$(openssl rand -hex 8)"
72+
TRACE_ID_HEADER="Root=${XRAY_TRACE_ID};Parent=${PARENT_ID};Sampled=1"
73+
echo "XRAY_TRACE_ID=${XRAY_TRACE_ID}" >> $GITHUB_ENV
74+
echo "W3C_TRACE_ID=${W3C_TRACE_ID}" >> $GITHUB_ENV
75+
echo "TRACE_ID_HEADER=${TRACE_ID_HEADER}" >> $GITHUB_ENV
76+
echo "Generated XRay Trace ID: ${XRAY_TRACE_ID}"
77+
echo "Generated W3C Trace ID: ${W3C_TRACE_ID}"
78+
echo "Generated Trace ID Header: ${TRACE_ID_HEADER}"
79+
80+
- name: Configure AWS Credentials
81+
uses: aws-actions/configure-aws-credentials@v4
82+
with:
83+
role-to-assume: arn:aws:iam::${{ env.E2E_TEST_ACCOUNT_ID }}:role/${{ env.E2E_TEST_ROLE_NAME }}
84+
aws-region: ${{ env.E2E_TEST_AWS_REGION }}
85+
86+
- name: Set up terraform
87+
uses: ./.github/workflows/actions/execute_and_retry
88+
with:
89+
command: "wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg"
90+
post-command: 'echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
91+
&& sudo apt update && sudo apt install terraform'
92+
93+
- name: Initiate Terraform
94+
uses: ./.github/workflows/actions/execute_and_retry
95+
with:
96+
command: "cd ${{ env.TEST_RESOURCES_FOLDER }}/terraform/python/ec2/adot-genai && terraform init && terraform validate"
97+
cleanup: "rm -rf .terraform && rm -rf .terraform.lock.hcl"
98+
max_retry: 6
99+
100+
- name: Deploy service via terraform
101+
working-directory: terraform/python/ec2/adot-genai
102+
run: |
103+
terraform apply -auto-approve \
104+
-var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
105+
-var="test_id=${{ env.TESTING_ID }}" \
106+
-var="service_zip_url=${{ env.SAMPLE_APP_ZIP }}" \
107+
-var="trace_id=${{ env.TRACE_ID_HEADER }}" \
108+
-var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}" \
109+
110+
- name: Get deployment info
111+
working-directory: terraform/python/ec2/adot-genai
112+
run: |
113+
echo "INSTANCE_IP=$(terraform output langchain_service_public_ip)" >> $GITHUB_ENV
114+
echo "INSTANCE_ID=$(terraform output langchain_service_instance_id)" >> $GITHUB_ENV
115+
116+
- name: Waiting 5 Minutes for Gen AI service to be ready and emit logs, traces, and metrics
117+
run: sleep 300
118+
119+
- name: Validate generated logs
120+
run: ./gradlew validator:run --args='-c python/ec2/adot-genai/log-validation.yml
121+
--testing-id ${{ env.TESTING_ID }}
122+
--endpoint http://${{ env.INSTANCE_IP }}:8000
123+
--region ${{ env.E2E_TEST_AWS_REGION }}
124+
--metric-namespace ${{ env.METRIC_NAMESPACE }}
125+
--log-group ${{ env.LOG_GROUP_NAME }}
126+
--service-name langchain-traceloop-app
127+
--instance-id ${{ env.INSTANCE_ID }}
128+
--trace-id ${{ env.W3C_TRACE_ID }}'
129+
130+
- name: Validate generated traces
131+
if: (success() || failure()) && !cancelled()
132+
run: ./gradlew validator:run --args='-c python/ec2/adot-genai/trace-validation.yml
133+
--testing-id ${{ env.TESTING_ID }}
134+
--endpoint http://${{ env.INSTANCE_IP }}:8000
135+
--region ${{ env.E2E_TEST_AWS_REGION }}
136+
--metric-namespace ${{ env.METRIC_NAMESPACE }}
137+
--service-name langchain-traceloop-app
138+
--instance-id ${{ env.INSTANCE_ID }}
139+
--trace-id ${{ env.XRAY_TRACE_ID }}'
140+
141+
- name: Validate generated metrics
142+
if: (success() || failure()) && !cancelled()
143+
run: ./gradlew validator:run --args='-c python/ec2/adot-genai/metric-validation.yml
144+
--testing-id ${{ env.TESTING_ID }}
145+
--endpoint http://${{ env.INSTANCE_IP }}:8000
146+
--region ${{ env.E2E_TEST_AWS_REGION }}
147+
--metric-namespace ${{ env.METRIC_NAMESPACE }}
148+
--log-group ${{ env.LOG_GROUP_NAME }}
149+
--service-name langchain-traceloop-app
150+
--instance-id ${{ env.INSTANCE_ID }}'
151+
152+
- name: Cleanup
153+
if: always()
154+
continue-on-error: true
155+
working-directory: terraform/python/ec2/adot-genai
156+
run: |
157+
terraform destroy -auto-approve \
158+
-var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
159+
-var="test_id=${{ env.TESTING_ID }}" \
160+
-var="service_zip_url=${{ env.SAMPLE_APP_ZIP }}" \
161+
-var="trace_id=${{ env.TRACE_ID_HEADER }}" \
162+
-var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}"

.github/workflows/python-ec2-genesis-test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
22
## SPDX-License-Identifier: Apache-2.0
33

4-
name: Python EC2 Gen AI Use Case
4+
name: Python EC2 Genesis Use Case
55
on:
66
workflow_dispatch: # be able to run the workflow on demand
77

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
const { trace } = require('@opentelemetry/api');
2+
const { registerInstrumentations } = require('@opentelemetry/instrumentation');
3+
const { HttpInstrumentation } = require('@opentelemetry/instrumentation-http');
4+
const tracerProvider = trace.getTracerProvider();
5+
const {
6+
LangChainInstrumentation,
7+
} = require("@traceloop/instrumentation-langchain");
8+
9+
const AgentsModule = require('langchain/agents');
10+
const ChainsModule = require('langchain/chains');
11+
const RunnableModule = require('@langchain/core/runnables')
12+
const ToolsModule = require('@langchain/core/tools')
13+
const VectorStoresModule = require('@langchain/core/vectorstores')
14+
15+
const traceloop = require("@traceloop/node-server-sdk")
16+
17+
traceloop.initialize({
18+
appName: "myTestApp",
19+
instrumentModules: {
20+
langchain: {
21+
runnablesModule: RunnableModule,
22+
toolsModule: ToolsModule,
23+
chainsModule: ChainsModule,
24+
agentsModule: AgentsModule,
25+
vectorStoreModule: VectorStoresModule,
26+
}
27+
}
28+
});
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
const express = require('express');
2+
const { BedrockChat } = require("@langchain/community/chat_models/bedrock");
3+
const { ChatPromptTemplate } = require("@langchain/core/prompts");
4+
const traceloop = require("@traceloop/node-server-sdk")
5+
const logger = require('pino')()
6+
7+
const app = express();
8+
app.use(express.json());
9+
const PORT = parseInt(process.env.SAMPLE_APP_PORT || '8000', 10);
10+
11+
const llm = new BedrockChat({
12+
model: "anthropic.claude-3-sonnet-20240229-v1:0",
13+
region: "us-east-1",
14+
credentials: {
15+
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID,
16+
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY,
17+
},
18+
temperature: 0.7,
19+
});
20+
21+
const prompt = ChatPromptTemplate.fromMessages([
22+
[
23+
"system",
24+
"You are a helpful assistant. Provide a helpful response to the following user input.",
25+
],
26+
["human", "{input}"],
27+
]);
28+
29+
const chain = prompt.pipe(llm);
30+
31+
app.get('/health', (req, res) => {
32+
res.json({ status: 'healthy' });
33+
});
34+
35+
app.post('/ai-chat', async (req, res) => {
36+
const { message } = req.body;
37+
38+
if (!message) {
39+
return res.status(400).json({ error: 'Message is required' });
40+
}
41+
42+
try {
43+
logger.info(`Question asked: ${message}`);
44+
45+
const response = await traceloop.withWorkflow({ name: "sample_chat" }, () => {
46+
return traceloop.withTask({ name: "parent_task" }, () => {
47+
return chain.invoke({
48+
input_language: "English",
49+
output_language: "English",
50+
input: message,
51+
});
52+
});
53+
});
54+
55+
res.json({ response: response.content });
56+
} catch (error) {
57+
logger.error(`Error processing request: ${error.message}`);
58+
res.status(500).json({ error: 'Internal server error' });
59+
}
60+
});
61+
62+
app.listen(PORT, () => {
63+
logger.info(`GenAI service listening on port ${PORT}`);
64+
});

0 commit comments

Comments
 (0)