|
| 1 | +// yet another basic demo usage for openai's node.js client with command line args and non-local worker. |
| 2 | +// Run with: |
| 3 | +// node example2.js |
| 4 | +// With example.js, you can test how openai-caching-proxy-worker works by running `wrangler dev` in one terminal and |
| 5 | +// `cd examples/node-js && node example.js` in another, then you can see in your local environment how requests |
| 6 | +// to open-ai are executed using redis caching. |
| 7 | +// If you want to publish your local instance of openai-caching-proxy-worker to Cloudflare after running `wrangler publish` |
| 8 | +// you will get a working url, add it in your local variable .env PROXY_PATH and then you can try `example2.js`, it has command line options, note that order is important: |
| 9 | +// 1. option model < text-ada-001 | text-davinci-003 | ...any suported by openAI > |
| 10 | +// 2. the 'cacheYES' option <boolean> : string|any |
| 11 | +// 3. option prompt - your question to chatGPT <string> |
| 12 | + |
| 13 | +import * as dotenv from 'dotenv'; |
| 14 | +import { Configuration, OpenAIApi } from 'openai'; |
| 15 | + |
| 16 | +dotenv.config(); |
| 17 | + |
| 18 | +const apiKey = process.env.OPENAI_API_KEY; |
| 19 | +const proxyPath = process.env.PROXY_PATH; |
| 20 | +if (!apiKey) { |
| 21 | + console.error('Error: OPENAI_API_KEY must be set'); |
| 22 | + process.exit(1); |
| 23 | +} |
| 24 | + |
| 25 | +// cli args are following: 1. option model < text-ada-001 | text-davinci-003 > 2. option 'cacheYES' <boolean> : string|any 3. option prompt - question to chatGPT <string> |
| 26 | +const args = process.argv.slice(2); |
| 27 | +let model = ''; |
| 28 | +const prompt = '' || String(args.at(-1)); |
| 29 | +const cache = String(args.at(1)) === 'cacheYES' ? true : false; |
| 30 | + |
| 31 | +const configuration = new Configuration({ |
| 32 | + apiKey, |
| 33 | + // Set this to your local instance or Cloudflare deployment after doing `wrangler publish` and got cloudflare worker url to local .env PROXY_PATH variable |
| 34 | + basePath: proxyPath || `http://localhost:8787/proxy`, |
| 35 | + baseOptions: { |
| 36 | + headers: { |
| 37 | + // Cache responses for 3600 seconds (1 hour) |
| 38 | + 'X-Proxy-TTL': 3600, |
| 39 | + // If you need to force refresh cache, you can uncomment below: |
| 40 | + 'X-Proxy-Refresh': !cache, |
| 41 | + }, |
| 42 | + }, |
| 43 | +}); |
| 44 | +const openai = new OpenAIApi(configuration); |
| 45 | + |
| 46 | +switch (args[0]) { |
| 47 | + case 'text-ada-001': |
| 48 | + // cheap |
| 49 | + model = 'text-ada-001'; |
| 50 | + break; |
| 51 | + case 'text-davinci-003': |
| 52 | + // overprice |
| 53 | + model = 'text-davinci-003'; |
| 54 | + break; |
| 55 | + default: |
| 56 | + break; |
| 57 | +} |
| 58 | + |
| 59 | +const makeSampleRequests = async () => { |
| 60 | + const completionOpts = { |
| 61 | + model: model || 'text-ada-001', |
| 62 | + prompt: prompt || 'HOW MUCH IS 2 + 2 * 2 ?', |
| 63 | + }; |
| 64 | + const completion = await openai.createCompletion(completionOpts); |
| 65 | + console.log( |
| 66 | + `${cache ? 'R from chache' : 'R fresh'}:\nQ:\n ${ |
| 67 | + completionOpts.prompt |
| 68 | + }\nR:\n${completion.data.choices[0].text.trim()}`, |
| 69 | + ); |
| 70 | +}; |
| 71 | + |
| 72 | +const main = async () => { |
| 73 | + // The first time these requests are made, they should |
| 74 | + // be proxied as-is to OpenAI API: |
| 75 | + await makeSampleRequests(); |
| 76 | +}; |
| 77 | + |
| 78 | +main(); |
0 commit comments