Skip to content

Commit 991bfbe

Browse files
authored
[OpenAI] Publish audio samples (Azure#27202)
1 parent f8048d9 commit 991bfbe

File tree

8 files changed

+305
-26
lines changed

8 files changed

+305
-26
lines changed

sdk/openai/openai/samples/v1-beta/javascript/README.md

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,20 @@ urlFragment: openai-javascript-beta
1313

1414
These sample programs show how to use the JavaScript client libraries for Azure OpenAI in some common scenarios.
1515

16-
| **File Name** | **Description** |
17-
| --------------------------------------------- | ------------------------------------------------------------------------ |
18-
| [bringYourOwnData.js][bringyourowndata] | chat completions with your own data. |
19-
| [chatCompletions.js][chatcompletions] | get chat completions. |
20-
| [completions.js][completions] | get completions. |
21-
| [functions.js][functions] | get chat completions with functions. |
22-
| [getImages.js][getimages] | generates images from prompts using Azure OpenAI Batch Image Generation. |
23-
| [listChatCompletions.js][listchatcompletions] | list chat completions. |
24-
| [listCompletions.js][listcompletions] | list completions. |
25-
| [readableStream.js][readablestream] | stream chat completions. |
26-
| [openAi.js][openai] | get completions using the OpenAI API. |
16+
| **File Name** | **Description** |
17+
| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------ |
18+
| [audioTranscription.js][audiotranscription] | audio transcription. |
19+
| [audioTranslation.js][audiotranslation] | audio translation. |
20+
| [bringYourOwnData.js][bringyourowndata] | chat completions with your own data. |
21+
| [chatCompletions.js][chatcompletions] | get chat completions. |
22+
| [completions.js][completions] | get completions. |
23+
| [functions.js][functions] | get chat completions with functions. |
24+
| [getImages.js][getimages] | generates images from prompts using Azure OpenAI Batch Image Generation. |
25+
| [listChatCompletions.js][listchatcompletions] | list chat completions. |
26+
| [listChatCompletionsWithContentFilter.js][listchatcompletionswithcontentfilter] | get completions. |
27+
| [listCompletions.js][listcompletions] | list completions. |
28+
| [readableStream.js][readablestream] | stream chat completions. |
29+
| [openAi.js][openai] | get completions using the OpenAI API. |
2730

2831
## Prerequisites
2932

@@ -52,25 +55,28 @@ npm install
5255
3. Run whichever samples you like (note that some samples may require additional setup, see the table above):
5356

5457
```bash
55-
node bringYourOwnData.js
58+
node audioTranscription.js
5659
```
5760

5861
Alternatively, run a single sample with the correct environment variables set (setting up the `.env` file is not required if you do this), for example (cross-platform):
5962

6063
```bash
61-
npx cross-env ENDPOINT="<endpoint>" AZURE_API_KEY="<azure api key>" AZURE_SEARCH_ENDPOINT="<azure search endpoint>" AZURE_SEARCH_KEY="<azure search key>" AZURE_SEARCH_INDEX="<azure search index>" node bringYourOwnData.js
64+
npx cross-env ENDPOINT="<endpoint>" AZURE_API_KEY="<azure api key>" node audioTranscription.js
6265
```
6366

6467
## Next Steps
6568

6669
Take a look at our [API Documentation][apiref] for more information about the APIs that are available in the clients.
6770

71+
[audiotranscription]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/audioTranscription.js
72+
[audiotranslation]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/audioTranslation.js
6873
[bringyourowndata]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/bringYourOwnData.js
6974
[chatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/chatCompletions.js
7075
[completions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/completions.js
7176
[functions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/functions.js
7277
[getimages]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/getImages.js
7378
[listchatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/listChatCompletions.js
79+
[listchatcompletionswithcontentfilter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/listChatCompletionsWithContentFilter.js
7480
[listcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/listCompletions.js
7581
[readablestream]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/readableStream.js
7682
[openai]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/javascript/openAi.js
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT License.
3+
4+
/**
5+
* Demonstrates how to transcribe the content of an audio file.
6+
*
7+
* @summary audio transcription.
8+
*/
9+
10+
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
11+
const { readFile } = require("fs/promises");
12+
13+
// Load the .env file if it exists
14+
require("dotenv").config();
15+
16+
// You will need to set these environment variables or edit the following values
17+
const endpoint = process.env["ENDPOINT"] || "<endpoint>";
18+
const azureApiKey = process.env["AZURE_API_KEY"] || "<api key>";
19+
20+
async function main() {
21+
console.log("== Transcribe Audio Sample ==");
22+
23+
const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
24+
const deploymentName = "whisper-deployment";
25+
const audio = await readFile("./assets/audio/countdown.wav");
26+
const result = await client.getAudioTranscription(deploymentName, audio);
27+
28+
console.log(`Transcription: ${result.text}`);
29+
}
30+
31+
main().catch((err) => {
32+
console.error("The sample encountered an error:", err);
33+
});
34+
35+
module.exports = { main };
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT License.
3+
4+
/**
5+
* Demonstrates how to translate the content of an audio file.
6+
*
7+
* @summary audio translation.
8+
*/
9+
10+
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
11+
const { readFile } = require("fs/promises");
12+
13+
// Load the .env file if it exists
14+
require("dotenv").config();
15+
16+
// You will need to set these environment variables or edit the following values
17+
const endpoint = process.env["ENDPOINT"] || "<endpoint>";
18+
const azureApiKey = process.env["AZURE_API_KEY"] || "<api key>";
19+
20+
async function main() {
21+
console.log("== Translate Audio Sample ==");
22+
23+
const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
24+
const deploymentName = "whisper-deployment";
25+
const audio = await readFile("./assets/audio/countdown.wav");
26+
const result = await client.getAudioTranslation(deploymentName, audio);
27+
28+
console.log(`Translation: ${result.text}`);
29+
}
30+
31+
main().catch((err) => {
32+
console.error("The sample encountered an error:", err);
33+
});
34+
35+
module.exports = { main };
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT License.
3+
4+
/**
5+
* Demonstrates how to get completions for the provided prompt and parse output for content filter
6+
*
7+
* @summary get completions.
8+
*/
9+
10+
const { OpenAIClient, AzureKeyCredential } = require("@azure/openai");
11+
12+
// Load the .env file if it exists
13+
require("dotenv").config();
14+
15+
// You will need to set these environment variables or edit the following values
16+
const endpoint = process.env["ENDPOINT"] || "<endpoint>";
17+
const azureApiKey = process.env["AZURE_API_KEY"] || "<api key>";
18+
19+
const messages = [
20+
{ role: "system", content: "You are a helpful assistant. You will talk like a pirate." },
21+
{ role: "user", content: "Can you help me?" },
22+
{ role: "assistant", content: "Arrrr! Of course, me hearty! What can I do for ye?" },
23+
{ role: "user", content: "What's the best way to train a parrot?" },
24+
];
25+
26+
async function main() {
27+
console.log("== Get completions Sample ==");
28+
29+
const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
30+
const deploymentId = "text-davinci-003";
31+
const events = await client.listChatCompletions(deploymentId, messages, { maxTokens: 128 });
32+
33+
for await (const event of events) {
34+
for (const choice of event.choices) {
35+
console.log(choice.message);
36+
if (!choice.contentFilterResults) {
37+
console.log("No content filter is found");
38+
return;
39+
}
40+
if (choice.contentFilterResults.error) {
41+
console.log(
42+
`Content filter ran into the error ${choice.contentFilterResults.error.code}: ${choice.contentFilterResults.error.message}`
43+
);
44+
} else {
45+
const { hate, sexual, selfHarm, violence } = choice.contentFilterResults;
46+
console.log(`Hate category is filtered: ${hate?.filtered} with ${hate?.severity} severity`);
47+
console.log(
48+
`Sexual category is filtered: ${sexual?.filtered} with ${sexual?.severity} severity`
49+
);
50+
console.log(
51+
`Self-harm category is filtered: ${selfHarm?.filtered} with ${selfHarm?.severity} severity`
52+
);
53+
console.log(
54+
`Violence category is filtered: ${violence?.filtered} with ${violence?.severity} severity`
55+
);
56+
}
57+
}
58+
}
59+
}
60+
61+
main().catch((err) => {
62+
console.error("The sample encountered an error:", err);
63+
});
64+
65+
module.exports = { main };

sdk/openai/openai/samples/v1-beta/typescript/README.md

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,20 @@ urlFragment: openai-typescript-beta
1313

1414
These sample programs show how to use the TypeScript client libraries for Azure OpenAI in some common scenarios.
1515

16-
| **File Name** | **Description** |
17-
| --------------------------------------------- | ------------------------------------------------------------------------ |
18-
| [bringYourOwnData.ts][bringyourowndata] | chat completions with your own data. |
19-
| [chatCompletions.ts][chatcompletions] | get chat completions. |
20-
| [completions.ts][completions] | get completions. |
21-
| [functions.ts][functions] | get chat completions with functions. |
22-
| [getImages.ts][getimages] | generates images from prompts using Azure OpenAI Batch Image Generation. |
23-
| [listChatCompletions.ts][listchatcompletions] | list chat completions. |
24-
| [listCompletions.ts][listcompletions] | list completions. |
25-
| [readableStream.ts][readablestream] | stream chat completions. |
26-
| [openAi.ts][openai] | get completions using the OpenAI API. |
16+
| **File Name** | **Description** |
17+
| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------ |
18+
| [audioTranscription.ts][audiotranscription] | audio transcription. |
19+
| [audioTranslation.ts][audiotranslation] | audio translation. |
20+
| [bringYourOwnData.ts][bringyourowndata] | chat completions with your own data. |
21+
| [chatCompletions.ts][chatcompletions] | get chat completions. |
22+
| [completions.ts][completions] | get completions. |
23+
| [functions.ts][functions] | get chat completions with functions. |
24+
| [getImages.ts][getimages] | generates images from prompts using Azure OpenAI Batch Image Generation. |
25+
| [listChatCompletions.ts][listchatcompletions] | list chat completions. |
26+
| [listChatCompletionsWithContentFilter.ts][listchatcompletionswithcontentfilter] | get completions. |
27+
| [listCompletions.ts][listcompletions] | list completions. |
28+
| [readableStream.ts][readablestream] | stream chat completions. |
29+
| [openAi.ts][openai] | get completions using the OpenAI API. |
2730

2831
## Prerequisites
2932

@@ -64,25 +67,28 @@ npm run build
6467
4. Run whichever samples you like (note that some samples may require additional setup, see the table above):
6568

6669
```bash
67-
node dist/bringYourOwnData.js
70+
node dist/audioTranscription.js
6871
```
6972

7073
Alternatively, run a single sample with the correct environment variables set (setting up the `.env` file is not required if you do this), for example (cross-platform):
7174

7275
```bash
73-
npx cross-env ENDPOINT="<endpoint>" AZURE_API_KEY="<azure api key>" AZURE_SEARCH_ENDPOINT="<azure search endpoint>" AZURE_SEARCH_KEY="<azure search key>" AZURE_SEARCH_INDEX="<azure search index>" node dist/bringYourOwnData.js
76+
npx cross-env ENDPOINT="<endpoint>" AZURE_API_KEY="<azure api key>" node dist/audioTranscription.js
7477
```
7578

7679
## Next Steps
7780

7881
Take a look at our [API Documentation][apiref] for more information about the APIs that are available in the clients.
7982

83+
[audiotranscription]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/audioTranscription.ts
84+
[audiotranslation]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/audioTranslation.ts
8085
[bringyourowndata]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/bringYourOwnData.ts
8186
[chatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/chatCompletions.ts
8287
[completions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/completions.ts
8388
[functions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/functions.ts
8489
[getimages]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/getImages.ts
8590
[listchatcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/listChatCompletions.ts
91+
[listchatcompletionswithcontentfilter]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/listChatCompletionsWithContentFilter.ts
8692
[listcompletions]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/listCompletions.ts
8793
[readablestream]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/readableStream.ts
8894
[openai]: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/openai/openai/samples/v1-beta/typescript/src/openAi.ts
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT License.
3+
4+
/**
5+
* Demonstrates how to transcribe the content of an audio file.
6+
*
7+
* @summary audio transcription.
8+
*/
9+
10+
import { OpenAIClient, AzureKeyCredential } from "@azure/openai";
11+
import { readFile } from "fs/promises";
12+
13+
// Load the .env file if it exists
14+
import dotenv from "dotenv";
15+
dotenv.config();
16+
17+
// You will need to set these environment variables or edit the following values
18+
const endpoint = process.env["ENDPOINT"] || "<endpoint>";
19+
const azureApiKey = process.env["AZURE_API_KEY"] || "<api key>";
20+
21+
export async function main() {
22+
console.log("== Transcribe Audio Sample ==");
23+
24+
const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
25+
const deploymentName = "whisper-deployment";
26+
const audio = await readFile("./assets/audio/countdown.wav");
27+
const result = await client.getAudioTranscription(deploymentName, audio);
28+
29+
console.log(`Transcription: ${result.text}`);
30+
}
31+
32+
main().catch((err) => {
33+
console.error("The sample encountered an error:", err);
34+
});
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
// Copyright (c) Microsoft Corporation.
2+
// Licensed under the MIT License.
3+
4+
/**
5+
* Demonstrates how to translate the content of an audio file.
6+
*
7+
* @summary audio translation.
8+
*/
9+
10+
import { OpenAIClient, AzureKeyCredential } from "@azure/openai";
11+
import { readFile } from "fs/promises";
12+
13+
// Load the .env file if it exists
14+
import dotenv from "dotenv";
15+
dotenv.config();
16+
17+
// You will need to set these environment variables or edit the following values
18+
const endpoint = process.env["ENDPOINT"] || "<endpoint>";
19+
const azureApiKey = process.env["AZURE_API_KEY"] || "<api key>";
20+
21+
export async function main() {
22+
console.log("== Translate Audio Sample ==");
23+
24+
const client = new OpenAIClient(endpoint, new AzureKeyCredential(azureApiKey));
25+
const deploymentName = "whisper-deployment";
26+
const audio = await readFile("./assets/audio/countdown.wav");
27+
const result = await client.getAudioTranslation(deploymentName, audio);
28+
29+
console.log(`Translation: ${result.text}`);
30+
}
31+
32+
main().catch((err) => {
33+
console.error("The sample encountered an error:", err);
34+
});

0 commit comments

Comments
 (0)