diff --git a/generate-release-notes.js b/generate-release-notes.js
index d4e4eff2f7c..d08a4fab308 100644
--- a/generate-release-notes.js
+++ b/generate-release-notes.js
@@ -6,8 +6,8 @@ const octokit = new Octokit({
auth: process.env.GITHUB_TOKEN,
});
-const owner = 'your-github-username';
-const repo = 'your-repository-name';
+const owner = 'WriteMayur';
+const repo = 'docs-website';
const developBranch = 'develop';
const releaseNotesDir = 'release-notes';
const releaseNotesFile = path.join(releaseNotesDir, 'release-notes.md');
diff --git a/src/content/docs/serverless-function-monitoring/aws-lambda-monitoring/response-stream.mdx b/src/content/docs/serverless-function-monitoring/aws-lambda-monitoring/response-stream.mdx
new file mode 100644
index 00000000000..a74aa4f675c
--- /dev/null
+++ b/src/content/docs/serverless-function-monitoring/aws-lambda-monitoring/response-stream.mdx
@@ -0,0 +1,116 @@
+---
+title: New Relic integration for Lambda response streaming
+metaDescription: "Learn how to monitor AWS Lambda functions that utilize response streaming in the New Relic."
+freshnessValidatedDate: never
+tags:
+ - aws
+ - lambda
+ - response stream monitoring
+ - ai monitoring
+---
+
+
+
+{/*
+
+Questions to PM:
+
+1. This is an Limited Preview release or PP or GA release?
+2. Does the legal review required?
+3. New gif for the UI.
+4. Will this feature be display to all the customer or any specific pricing plan customers, like advance compute or advance-core compute?
+
+*/}
+
+
+Integrate your AWS [Lambda response streaming](https://aws.amazon.com/blogs/compute/introducing-aws-lambda-response-streaming/) in the New Relic platform to monitor AWS Lambda functions that utilize response streaming within the Node.js runtime applications. This integration allows you to gain real-time insights into the performance and behavior of your serverless applications that has AI workloads.
+
+
+
+## Prerequisites [#prerequisites]
+
+* A New Relic account with either an [admin role](/docs/accounts/accounts-billing/new-relic-one-user-management/introduction-managing-users/) or have the Infrastructure manager [add-on role](/docs/accounts/accounts-billing/new-relic-one-user-management/introduction-managing-users/).
+* Check that your Lambda function meets our [compatibility and requirements](/docs/serverless-function-monitoring/aws-lambda-monitoring/instrument-lambda-function/compatibility-requirement-lambda-monitoring).
+* Install and configure [Lambda monitoring on New Relic](/docs/serverless-function-monitoring/aws-lambda-monitoring/instrument-lambda-function/compatibility-requirement-lambda-monitoring).
+
+
+## Integrate Lambda response streaming [#integrate-response-streaming]
+
+To enable response streaming monitoring for your Node.js Lambda functions:
+
+1. Add the following [environment variable](/docs/serverless-function-monitoring/aws-lambda-monitoring/instrument-lambda-function/env-variables-lambda) to the required Lambda function for New Relic Lambda response streaming monitoring:
+
+ ```bash
+ NEW_RELIC_MACHINE_LEARNING_ENABLED=true
+ NEW_RELIC_ML_INSIGHTS_EVENTS_ENABLED=true
+ NEW_RELIC_AI_MONITORING_ENABLED=true
+ ```
+2. Save the changes and deploy the function.
+
+ After deployment, you can view the Lambda response streaming data in the New Relic platform. For more information, See (View the Lambda response streaming data)[#view-response-streaming-data].
+
+
+## View and use the Lambda response streaming data [#view-response-streaming-data]
+
+To view the Lambda response streaming data in the New Relic platform:
+
+1. Log in to your New Relic account.
+2. Go to the **left navigation pane > All Capabilities > Serverless Functions**.
+3. Select the required Lambda function that utilizes response streaming.
+4. Click **AI Responses** in the **More views** section.
+
+
+
+ On the **AI Responses** page, monitor the following metrics related to AI responses:
+
| Metric | +Description | +
|---|---|
| Total response | +Displays the total number of responses generated by the Lambda function. | +
| Response time | +Displays the time taken for the Lambda function to generate and stream the response. | +
| Token usage per response | +Displays the number of tokens processed per response. | +
| Errors | +Lists any errors encountered during the response streaming process. | +
| Number of calls to LLMs | +Tracks how many times the Lambda function interacts with LLMs to offer insight into usage patterns. | +
| Average tokens per response | +Provides the average number of tokens processed in each response to understand data volume. | +
| Positive and negative feedback | +Displays the user feedback sentiment to assess AI response quality. | +
| Responses with feedback | +Counts responses that have received user feedback for qualitative analysis. | +
| Average token rate in seconds | +Displays the rate at which tokens are processed to offer insight into processing speed. | +