@@ -24,12 +24,12 @@ yarn add openai
24
24
``` js
25
25
import OpenAI from ' openai' ;
26
26
27
- const openAI = new OpenAI ({
27
+ const openai = new OpenAI ({
28
28
apiKey: ' my api key' , // defaults to process.env["OPENAI_API_KEY"]
29
29
});
30
30
31
31
async function main () {
32
- const completion = await openAI .completions .create ({
32
+ const completion = await openai .completions .create ({
33
33
model: ' text-davinci-002' ,
34
34
prompt: ' Say this is a test' ,
35
35
max_tokens: 6 ,
@@ -71,7 +71,7 @@ If you like, you may reference our types directly:
71
71
``` ts
72
72
import OpenAI from ' openai' ;
73
73
74
- const openAI = new OpenAI ({
74
+ const openai = new OpenAI ({
75
75
apiKey: ' my api key' , // defaults to process.env["OPENAI_API_KEY"]
76
76
});
77
77
@@ -82,7 +82,7 @@ async function main() {
82
82
max_tokens: 6 ,
83
83
temperature: 0 ,
84
84
};
85
- const completion: OpenAI .Completion = await openAI .completions .create (params );
85
+ const completion: OpenAI .Completion = await openai .completions .create (params );
86
86
}
87
87
main ().catch (console .error );
88
88
```
@@ -91,17 +91,38 @@ Documentation for each method, request param, and response field are available i
91
91
92
92
## File Uploads
93
93
94
- Request parameters that correspond to file uploads can be passed as either a ` FormData.Blob ` or a ` FormData.File ` instance.
94
+ Request parameters that correspond to file uploads can be passed in many different forms:
95
95
96
- We provide a ` fileFromPath ` helper function to easily create ` FormData.File ` instances from a given class.
96
+ - ` File ` (or an object with the same structure)
97
+ - a ` fetch ` ` Response ` (or an object with the same structure)
98
+ - an ` fs.ReadStream `
99
+ - the return value of our ` toFile ` helper
97
100
98
101
``` ts
99
- import OpenAI , { fileFromPath } from ' openai' ;
102
+ import fs from ' fs' ;
103
+ import fetch from ' node-fetch' ;
104
+ import OpenAI , { toFile } from ' openai' ;
100
105
101
- const openAI = new OpenAI ();
106
+ const openai = new OpenAI ();
102
107
103
- const file = await fileFromPath (' input.jsonl' );
104
- await openAI .files .create ({ file: file , purpose: ' fine-tune' });
108
+ // If you have access to Node `fs` we recommend using `fs.createReadStream()`:
109
+ await openai .files .create ({ file: fs .createReadStream (' input.jsonl' ), purpose: ' fine-tune' });
110
+
111
+ // Or if you have the web `File` API you can pass a `File` instance:
112
+ await openai .files .create ({ file: new File ([' my bytes' ], ' input.jsonl' ), purpose: ' fine-tune' });
113
+
114
+ // You can also pass a `fetch` `Response`:
115
+ await openai .files .create ({ file: await fetch (' https://somesite/input.jsonl' ), purpose: ' fine-tune' });
116
+
117
+ // Finally, if none of the above are convenient, you can use our `toFile` helper:
118
+ await openai .files .create ({
119
+ file: await toFile (Buffer .from (' my bytes' ), ' input.jsonl' ),
120
+ purpose: ' fine-tune' ,
121
+ });
122
+ await openai .files .create ({
123
+ file: await toFile (new Uint8Array ([0 , 1 , 2 ]), ' input.jsonl' ),
124
+ purpose: ' fine-tune' ,
125
+ });
105
126
```
106
127
107
128
## Handling errors
@@ -112,7 +133,7 @@ a subclass of `APIError` will be thrown:
112
133
113
134
``` ts
114
135
async function main() {
115
- const fineTune = await openAI .fineTunes
136
+ const fineTune = await openai .fineTunes
116
137
.create ({ training_file: ' file-XGinujblHPwGLSztz8cPS8XY' })
117
138
.catch ((err ) => {
118
139
if (err instanceof OpenAI .APIError ) {
@@ -150,12 +171,12 @@ You can use the `maxRetries` option to configure or disable this:
150
171
<!-- prettier-ignore -->
151
172
``` js
152
173
// Configure the default for all requests:
153
- const openAI = new OpenAI ({
174
+ const openai = new OpenAI ({
154
175
maxRetries: 0 , // default is 2
155
176
});
156
177
157
178
// Or, configure per-request:
158
- openAI .embeddings .create ({ model: ' text-similarity-babbage-001' ,input: ' The food was delicious and the waiter...' }, {
179
+ openai .embeddings .create ({ model: ' text-similarity-babbage-001' ,input: ' The food was delicious and the waiter...' }, {
159
180
maxRetries: 5 ,
160
181
});
161
182
```
@@ -167,12 +188,12 @@ Requests time out after 60 seconds by default. You can configure this with a `ti
167
188
<!-- prettier-ignore -->
168
189
``` ts
169
190
// Configure the default for all requests:
170
- const openAI = new OpenAI ({
191
+ const openai = new OpenAI ({
171
192
timeout: 20 * 1000 , // 20 seconds (default is 60s)
172
193
});
173
194
174
195
// Override per-request:
175
- openAI .edits .create ({ model: ' text-davinci-edit-001' ,input: ' What day of the wek is it?' ,instruction: ' Fix the spelling mistakes' }, {
196
+ openai .edits .create ({ model: ' text-davinci-edit-001' ,input: ' What day of the wek is it?' ,instruction: ' Fix the spelling mistakes' }, {
176
197
timeout: 5 * 1000 ,
177
198
});
178
199
```
@@ -193,12 +214,12 @@ import http from 'http';
193
214
import HttpsProxyAgent from ' https-proxy-agent' ;
194
215
195
216
// Configure the default for all requests:
196
- const openAI = new OpenAI ({
217
+ const openai = new OpenAI ({
197
218
httpAgent: new HttpsProxyAgent (process .env .PROXY_URL ),
198
219
});
199
220
200
221
// Override per-request:
201
- openAI .models .list ({
222
+ openai .models .list ({
202
223
baseURL: ' http://localhost:8080/test-api' ,
203
224
httpAgent: new http .Agent ({ keepAlive: false }),
204
225
})
@@ -216,7 +237,7 @@ We are keen for your feedback; please open an [issue](https://www.github.com/ope
216
237
217
238
The following runtimes are supported:
218
239
219
- - Node.js version 12 or higher .
240
+ - Node.js 16 LTS or later ( [ non-EOL ] ( https://endoflife.date/nodejs ) ) versions .
220
241
- Deno v1.28.0 or higher (experimental).
221
242
Use ` import OpenAI from "npm:openai" ` .
222
243
0 commit comments