Skip to content

Commit 8145c94

Browse files
authored
feat(minor): reference common classes on the Llama instance (#360)
1 parent 51eab61 commit 8145c94

File tree

4 files changed

+34
-1
lines changed

4 files changed

+34
-1
lines changed

docs/guide/downloading-models.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ When using a URI to reference a model,
8787
it's recommended [to add it to your `package.json` file](#cli) to ensure it's downloaded when running `npm install`,
8888
and also resolve it using the [`resolveModelFile`](../api/functions/resolveModelFile.md) method to get the full path of the resolved model file.
8989

90-
Here's and example usage of the [`resolveModelFile`](../api/functions/resolveModelFile.md) method:
90+
Here's an example usage of the [`resolveModelFile`](../api/functions/resolveModelFile.md) method:
9191
```typescript
9292
import {fileURLToPath} from "url";
9393
import path from "path";

src/bindings/Llama.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import {GbnfJsonSchema} from "../utils/gbnfJson/types.js";
77
import {LlamaJsonSchemaGrammar} from "../evaluator/LlamaJsonSchemaGrammar.js";
88
import {LlamaGrammar, LlamaGrammarOptions} from "../evaluator/LlamaGrammar.js";
99
import {ThreadsSplitter} from "../utils/ThreadsSplitter.js";
10+
import {getLlamaClasses, LlamaClasses} from "../utils/getLlamaClasses.js";
1011
import {BindingModule} from "./AddonTypes.js";
1112
import {BuildGpu, BuildMetadataFile, LlamaGpuType, LlamaLocks, LlamaLogLevel} from "./types.js";
1213
import {MemoryOrchestrator, MemoryReservation} from "./utils/MemoryOrchestrator.js";
@@ -56,6 +57,7 @@ export class Llama {
5657
/** @internal */ private _nextLogNeedNewLine: boolean = false;
5758
/** @internal */ private _disposed: boolean = false;
5859

60+
private _classes?: LlamaClasses;
5961
public readonly onDispose = new EventRelay<void>();
6062

6163
private constructor({
@@ -137,6 +139,13 @@ export class Llama {
137139
return this._disposed;
138140
}
139141

142+
public get classes() {
143+
if (this._classes == null)
144+
this._classes = getLlamaClasses();
145+
146+
return this._classes;
147+
}
148+
140149
public get gpu() {
141150
return this._gpu;
142151
}

src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ import {
100100
import {GgmlType, type GgufTensorInfo} from "./gguf/types/GgufTensorInfoTypes.js";
101101
import {type ModelFileAccessTokens} from "./utils/modelFileAccesTokens.js";
102102
import {type OverridesObject} from "./utils/OverridesObject.js";
103+
import type {LlamaClasses} from "./utils/getLlamaClasses.js";
103104

104105

105106
export {
@@ -108,6 +109,7 @@ export {
108109
type LlamaOptions,
109110
type LastBuildOptions,
110111
type LlamaGpuType,
112+
type LlamaClasses,
111113
LlamaLogLevel,
112114
NoBinaryFoundError,
113115
resolveModelFile,

src/utils/getLlamaClasses.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
import {LlamaChatSession} from "../evaluator/LlamaChatSession/LlamaChatSession.js";
2+
import {LlamaChat} from "../evaluator/LlamaChat/LlamaChat.js";
3+
import {LlamaCompletion} from "../evaluator/LlamaCompletion.js";
4+
5+
export type LlamaClasses = {
6+
readonly LlamaChatSession: typeof LlamaChatSession,
7+
readonly LlamaChat: typeof LlamaChat,
8+
readonly LlamaCompletion: typeof LlamaCompletion
9+
};
10+
11+
let cachedClasses: LlamaClasses | undefined = undefined;
12+
13+
export function getLlamaClasses(): LlamaClasses {
14+
if (cachedClasses == null)
15+
cachedClasses = Object.seal({
16+
LlamaChatSession,
17+
LlamaChat,
18+
LlamaCompletion
19+
});
20+
21+
return cachedClasses;
22+
}

0 commit comments

Comments
 (0)