@@ -171,22 +171,28 @@ console.log(response);
171171import { ChatGPTClient } from ' @waylaidwanderer/chatgpt-api' ;
172172
173173const clientOptions = {
174- // (Optional) Support for a reverse proxy for the completions endpoint (private API server).
175- // Warning: This will expose your `openaiApiKey` to a third-party. Consider the risks before using this.
176- // reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
177- // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
178- modelOptions: {
179- // You can override the model name and any other parameters here.
180- // model: 'text-chat-davinci-002-20221122',
181- },
182- // (Optional) Set custom instructions instead of "You are ChatGPT...".
183- // promptPrefix: 'You are Bob, a cowboy in Western times...',
184- // (Optional) Set a custom name for the user
185- // userLabel: 'User',
186- // (Optional) Set a custom name for ChatGPT
187- // chatGptLabel: 'ChatGPT',
188- // (Optional) Set to true to enable `console.debug()` logging
189- debug: false ,
174+ // (Optional) Support for a reverse proxy for the completions endpoint (private API server).
175+ // Warning: This will expose your `openaiApiKey` to a third-party. Consider the risks before using this.
176+ // reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
177+ // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
178+ modelOptions: {
179+ // You can override the model name and any other parameters here.
180+ // model: 'text-chat-davinci-002-20221122',
181+ // Set max_tokens here to override the default max_tokens of 1000 for the completion.
182+ // max_tokens: 1000,
183+ },
184+ // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
185+ // maxContextTokens: 4097,
186+ // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
187+ // maxPromptTokens: 3097,
188+ // (Optional) Set custom instructions instead of "You are ChatGPT...".
189+ // promptPrefix: 'You are Bob, a cowboy in Western times...',
190+ // (Optional) Set a custom name for the user
191+ // userLabel: 'User',
192+ // (Optional) Set a custom name for ChatGPT
193+ // chatGptLabel: 'ChatGPT',
194+ // (Optional) Set to true to enable `console.debug()` logging
195+ debug: false ,
190196};
191197
192198const cacheOptions = {
@@ -238,7 +244,13 @@ module.exports = {
238244 modelOptions: {
239245 // You can override the model name and any other parameters here.
240246 // model: 'text-chat-davinci-002-20221122',
247+ // Set max_tokens here to override the default max_tokens of 1000 for the completion.
248+ // max_tokens: 1000,
241249 },
250+ // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
251+ // maxContextTokens: 4097,
252+ // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
253+ // maxPromptTokens: 3097,
242254 // (Optional) Set custom instructions instead of "You are ChatGPT...".
243255 // promptPrefix: 'You are Bob, a cowboy in Western times...',
244256 // (Optional) Set a custom name for the user
0 commit comments