Skip to content

Commit d2d1eaa

Browse files
chore: add jsdoc in the main class (#15)
1 parent e96b7b2 commit d2d1eaa

File tree

1 file changed

+45
-1
lines changed

1 file changed

+45
-1
lines changed

ResilientLLM.js

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,34 @@ import { Tiktoken } from "js-tiktoken/lite";
1111
import o200k_base from "js-tiktoken/ranks/o200k_base";
1212
import ResilientOperation from "./ResilientOperation.js";
1313

14+
/**
15+
* ResilientLLM class
16+
* @class
17+
* @param {Object} options - The options for the ResilientLLM instance
18+
* @param {string} options.aiService - The AI service to use
19+
* @param {string} options.model - The model to use
20+
* @param {number} options.temperature - The temperature for the LLM
21+
* @param {number} options.maxTokens - The maximum number of tokens for the LLM
22+
* @param {number} options.timeout - The timeout for the LLM
23+
* @param {Object} options.cacheStore - The cache store for the LLM
24+
* @param {number} options.maxInputTokens - The maximum number of input tokens for the LLM
25+
* @param {number} options.topP - The top P for the LLM
26+
* @param {Object} options.rateLimitConfig - The rate limit config for the LLM
27+
* @param {number} options.retries - The number of retries for the LLM
28+
* @param {number} options.backoffFactor - The backoff factor for the LLM
29+
* @param {Function} options.onRateLimitUpdate - The function to call when the rate limit is updated
30+
* @param {Function} options.onError - The function to call when an error occurs
31+
* @param {Function} options.onRateLimitUpdate - The function to call when the rate limit is updated
32+
* @example
33+
* const llm = new ResilientLLM({
34+
* aiService: "anthropic",
35+
* model: "claude-3-5-sonnet-20240620",
36+
* temperature: 0,
37+
* maxTokens: 2048,
38+
* });
39+
* const response = await llm.chat([{ role: "user", content: "Hello, world!" }]);
40+
* console.log(response);
41+
*/
1442
class ResilientLLM {
1543
static encoder;
1644
static DEFAULT_MODELS = {
@@ -38,6 +66,11 @@ class ResilientLLM {
3866
this.resilientOperations = {}; // Store resilient operation instances for observability
3967
}
4068

69+
/**
70+
* Get the API URL for the given AI service
71+
* @param {string} aiService - The AI service to use
72+
* @returns {string} - The API URL for the given AI service
73+
*/
4174
getApiUrl(aiService) {
4275
let apiUrl = null;
4376
if (aiService === 'openai') {
@@ -54,6 +87,11 @@ class ResilientLLM {
5487
return apiUrl;
5588
}
5689

90+
/**
91+
* Get the API key for the given AI service
92+
* @param {string} aiService - The AI service to use
93+
* @returns {string} - The API key for the given AI service
94+
*/
5795
getApiKey(aiService) {
5896
let apiKey = null;
5997
if (aiService === 'openai') {
@@ -80,6 +118,7 @@ class ResilientLLM {
80118
* @returns {Promise<string>} - The response from the LLM
81119
*/
82120
async chat(conversationHistory, llmOptions = {}) {
121+
//TODO: Support reasoning models, they have different parameters
83122
let requestBody, headers;
84123
let aiService = llmOptions?.aiService || this.aiService;
85124
let model = llmOptions?.model || this.model;
@@ -92,7 +131,6 @@ class ResilientLLM {
92131
if(estimatedLLMTokens > maxInputTokens){
93132
throw new Error("Input tokens exceed the maximum limit of " + maxInputTokens);
94133
}
95-
//TODO: Support reasoning models, they have different parameters
96134
requestBody = {
97135
model: model
98136
};
@@ -242,6 +280,12 @@ class ResilientLLM {
242280
}
243281
}
244282

283+
/**
284+
* Retry the chat with an alternate service
285+
* @param {Array} conversationHistory - The conversation history
286+
* @param {Object} llmOptions - The LLM options
287+
* @returns {Promise<string>} - The response from the LLM
288+
*/
245289
async retryChatWithAlternateService(conversationHistory, llmOptions = {}){
246290
console.log("LLM out of service:", llmOptions.aiService || this.aiService);
247291
this.llmOutOfService = this.llmOutOfService || [];

0 commit comments

Comments
 (0)