diff --git a/docs/guide/downloading-models.md b/docs/guide/downloading-models.md index eadd0f36..9c33b096 100644 --- a/docs/guide/downloading-models.md +++ b/docs/guide/downloading-models.md @@ -87,7 +87,7 @@ When using a URI to reference a model, it's recommended [to add it to your `package.json` file](#cli) to ensure it's downloaded when running `npm install`, and also resolve it using the [`resolveModelFile`](../api/functions/resolveModelFile.md) method to get the full path of the resolved model file. -Here's and example usage of the [`resolveModelFile`](../api/functions/resolveModelFile.md) method: +Here's an example usage of the [`resolveModelFile`](../api/functions/resolveModelFile.md) method: ```typescript import {fileURLToPath} from "url"; import path from "path"; diff --git a/src/bindings/Llama.ts b/src/bindings/Llama.ts index 1c844286..50831cc9 100644 --- a/src/bindings/Llama.ts +++ b/src/bindings/Llama.ts @@ -7,6 +7,7 @@ import {GbnfJsonSchema} from "../utils/gbnfJson/types.js"; import {LlamaJsonSchemaGrammar} from "../evaluator/LlamaJsonSchemaGrammar.js"; import {LlamaGrammar, LlamaGrammarOptions} from "../evaluator/LlamaGrammar.js"; import {ThreadsSplitter} from "../utils/ThreadsSplitter.js"; +import {getLlamaClasses, LlamaClasses} from "../utils/getLlamaClasses.js"; import {BindingModule} from "./AddonTypes.js"; import {BuildGpu, BuildMetadataFile, LlamaGpuType, LlamaLocks, LlamaLogLevel} from "./types.js"; import {MemoryOrchestrator, MemoryReservation} from "./utils/MemoryOrchestrator.js"; @@ -56,6 +57,7 @@ export class Llama { /** @internal */ private _nextLogNeedNewLine: boolean = false; /** @internal */ private _disposed: boolean = false; + private _classes?: LlamaClasses; public readonly onDispose = new EventRelay(); private constructor({ @@ -137,6 +139,13 @@ export class Llama { return this._disposed; } + public get classes() { + if (this._classes == null) + this._classes = getLlamaClasses(); + + return this._classes; + } + public get gpu() { return this._gpu; } diff --git a/src/index.ts b/src/index.ts index 286a71eb..0ec8ab11 100644 --- a/src/index.ts +++ b/src/index.ts @@ -100,6 +100,7 @@ import { import {GgmlType, type GgufTensorInfo} from "./gguf/types/GgufTensorInfoTypes.js"; import {type ModelFileAccessTokens} from "./utils/modelFileAccesTokens.js"; import {type OverridesObject} from "./utils/OverridesObject.js"; +import type {LlamaClasses} from "./utils/getLlamaClasses.js"; export { @@ -108,6 +109,7 @@ export { type LlamaOptions, type LastBuildOptions, type LlamaGpuType, + type LlamaClasses, LlamaLogLevel, NoBinaryFoundError, resolveModelFile, diff --git a/src/utils/getLlamaClasses.ts b/src/utils/getLlamaClasses.ts new file mode 100644 index 00000000..74fa9c6c --- /dev/null +++ b/src/utils/getLlamaClasses.ts @@ -0,0 +1,22 @@ +import {LlamaChatSession} from "../evaluator/LlamaChatSession/LlamaChatSession.js"; +import {LlamaChat} from "../evaluator/LlamaChat/LlamaChat.js"; +import {LlamaCompletion} from "../evaluator/LlamaCompletion.js"; + +export type LlamaClasses = { + readonly LlamaChatSession: typeof LlamaChatSession, + readonly LlamaChat: typeof LlamaChat, + readonly LlamaCompletion: typeof LlamaCompletion +}; + +let cachedClasses: LlamaClasses | undefined = undefined; + +export function getLlamaClasses(): LlamaClasses { + if (cachedClasses == null) + cachedClasses = Object.seal({ + LlamaChatSession, + LlamaChat, + LlamaCompletion + }); + + return cachedClasses; +}