@@ -3,7 +3,7 @@ ms.topic: include
3
3
manager : nitinme
4
4
ms.service : azure-ai-openai
5
5
ms.topic : include
6
- ms.date : 2/1 /2024
6
+ ms.date : 9/10 /2024
7
7
ms.reviewer : v-baolianzou
8
8
ms.author : eur
9
9
author : eric-urban
@@ -102,16 +102,6 @@ npm init
102
102
103
103
Install the client libraries with:
104
104
105
- ## [ ** TypeScript** ] ( #tab/typescript )
106
-
107
- ``` console
108
- npm install openai @azure/openai @azure/identity
109
- ```
110
-
111
- The ` @azure/openai ` package provides the types the Azure service objects.
112
-
113
- ## [ ** JavaScript** ] ( #tab/javascript )
114
-
115
105
``` console
116
106
npm install openai @azure/identity
117
107
```
@@ -125,6 +115,43 @@ Create a new file named _Whisper.js_ and open it in your preferred code editor.
125
115
#### [ TypeScript] ( #tab/typescript )
126
116
127
117
``` typescript
118
+ import " dotenv/config" ;
119
+ import { createReadStream } from " fs" ;
120
+ import { AzureOpenAI } from " openai" ;
121
+
122
+ // You will need to set these environment variables or edit the following values
123
+ const audioFilePath = process .env [" AUDIO_FILE_PATH" ] || " <audio file path>" ;
124
+ const endpoint = process .env [" AZURE_OPENAI_ENDPOINT" ] || " <endpoint>" ;
125
+ const apiKey = process .env [" AZURE_OPENAI_API_KEY" ] || " <api key>" ;
126
+
127
+ // Required Azure OpenAI deployment name and API version
128
+ const apiVersion = " 2024-07-01-preview" ;
129
+ const deploymentName = " whisper" ;
130
+
131
+ function getClient(): AzureOpenAI {
132
+ return new AzureOpenAI ({
133
+ endpoint ,
134
+ apiKey ,
135
+ apiVersion ,
136
+ deployment: deploymentName ,
137
+ });
138
+ }
139
+
140
+ export async function main() {
141
+ console .log (" == Transcribe Audio Sample ==" );
142
+
143
+ const client = getClient ();
144
+ const result = await client .audio .transcriptions .create ({
145
+ model: " " ,
146
+ file: createReadStream (audioFilePath ),
147
+ });
148
+
149
+ console .log (` Transcription: ${result .text } ` );
150
+ }
151
+
152
+ main ().catch ((err ) => {
153
+ console .error (" The sample encountered an error:" , err );
154
+ });
128
155
```
129
156
130
157
1 . Build the application with the following command:
@@ -143,6 +170,43 @@ Create a new file named _Whisper.js_ and open it in your preferred code editor.
143
170
#### [JavaScript](#tab/javascript)
144
171
145
172
```javascript
173
+ require("dotenv/config");
174
+ const { createReadStream } = require("fs");
175
+ const { AzureOpenAI } = require("openai");
176
+
177
+ // You will need to set these environment variables or edit the following values
178
+ const audioFilePath = process.env["AUDIO_FILE_PATH"] || "<audio file path>";
179
+ const endpoint = process.env["AZURE_OPENAI_ENDPOINT"] || "<endpoint>";
180
+ const apiKey = process.env["AZURE_OPENAI_API_KEY"] || "<api key>";
181
+
182
+ // Required Azure OpenAI deployment name and API version
183
+ const apiVersion = "2024-07-01-preview";
184
+ const deploymentName = "whisper";
185
+
186
+ function getClient() {
187
+ return new AzureOpenAI({
188
+ endpoint,
189
+ apiKey,
190
+ apiVersion,
191
+ deployment: deploymentName,
192
+ });
193
+ }
194
+
195
+ export async function main() {
196
+ console.log("== Transcribe Audio Sample ==");
197
+
198
+ const client = getClient();
199
+ const result = await client.audio.transcriptions.create({
200
+ model: "",
201
+ file: createReadStream(audioFilePath),
202
+ });
203
+
204
+ console.log(`Transcription: ${result.text}`);
205
+ }
206
+
207
+ main().catch((err) => {
208
+ console.error("The sample encountered an error:", err);
209
+ });
146
210
```
147
211
148
212
Run the script with the following command:
0 commit comments