|
| 1 | +import {withLock} from "lifecycle-utils"; |
| 2 | +import {Token} from "../types.js"; |
| 3 | +import {isLlamaText, LlamaText} from "../utils/LlamaText.js"; |
| 4 | +import {LlamaModel} from "./LlamaModel.js"; |
| 5 | +import {LlamaContext, LlamaContextSequence} from "./LlamaContext/LlamaContext.js"; |
| 6 | + |
| 7 | +export type LlamaEmbeddingContextOptions = { |
| 8 | + model: LlamaModel, |
| 9 | + |
| 10 | + /** text context size */ |
| 11 | + contextSize?: number, |
| 12 | + |
| 13 | + /** prompt processing batch size */ |
| 14 | + batchSize?: number, |
| 15 | + |
| 16 | + /** |
| 17 | + * number of threads to use to evaluate tokens. |
| 18 | + * set to 0 to use the maximum threads supported by the current machine hardware |
| 19 | + */ |
| 20 | + threads?: number, |
| 21 | +}; |
| 22 | + |
| 23 | +export class LlamaEmbeddingContext { |
| 24 | + /** @internal */ private readonly _llamaContext: LlamaContext; |
| 25 | + /** @internal */ private readonly _sequence: LlamaContextSequence; |
| 26 | + |
| 27 | + public constructor({ |
| 28 | + model, |
| 29 | + contextSize = model.trainContextSize, |
| 30 | + batchSize = contextSize, |
| 31 | + threads = 6 |
| 32 | + }: LlamaEmbeddingContextOptions) { |
| 33 | + const resolvedContextSize = Math.min(contextSize, model.trainContextSize); |
| 34 | + const resolvedBatchSize = Math.min(batchSize, resolvedContextSize); |
| 35 | + |
| 36 | + this._llamaContext = new LlamaContext({ |
| 37 | + model, |
| 38 | + contextSize: resolvedContextSize, |
| 39 | + batchSize: resolvedBatchSize, |
| 40 | + threads, |
| 41 | + _embedding: true, |
| 42 | + _noSeed: true |
| 43 | + }); |
| 44 | + this._sequence = this._llamaContext.getSequence(); |
| 45 | + } |
| 46 | + |
| 47 | + public async getEmbeddingFor(input: Token[] | string | LlamaText) { |
| 48 | + const resolvedInput = typeof input === "string" |
| 49 | + ? this._llamaContext.model.tokenize(input) |
| 50 | + : isLlamaText(input) |
| 51 | + ? input.tokenize(this._llamaContext.model.tokenize) |
| 52 | + : input; |
| 53 | + |
| 54 | + if (resolvedInput.length > this._llamaContext.contextSize) |
| 55 | + throw new Error( |
| 56 | + "Input is longer than the context size. " + |
| 57 | + "Try to increase the context size or use another model that supports longer contexts." |
| 58 | + ); |
| 59 | + else if (resolvedInput.length === 0) |
| 60 | + return new LlamaEmbedding({vector: []}); |
| 61 | + |
| 62 | + return await withLock(this, "evaluate", async () => { |
| 63 | + await this._sequence.eraseContextTokenRanges([{ |
| 64 | + start: 0, |
| 65 | + end: this._sequence.nextTokenIndex |
| 66 | + }]); |
| 67 | + |
| 68 | + await this._sequence.evaluateWithoutGeneratingNewTokens(resolvedInput); |
| 69 | + |
| 70 | + const embedding = this._llamaContext._ctx.getEmbedding(); |
| 71 | + const embeddingVector = Array.from(embedding); |
| 72 | + |
| 73 | + return new LlamaEmbedding({vector: embeddingVector}); |
| 74 | + }); |
| 75 | + } |
| 76 | + |
| 77 | + public dispose() { |
| 78 | + return this._llamaContext.dispose(); |
| 79 | + } |
| 80 | + |
| 81 | + /** @hidden */ |
| 82 | + public [Symbol.dispose]() { |
| 83 | + return this.dispose(); |
| 84 | + } |
| 85 | + |
| 86 | + public get disposed() { |
| 87 | + return this._llamaContext.disposed; |
| 88 | + } |
| 89 | +} |
| 90 | + |
| 91 | +export type LlamaEmbeddingJSON = { |
| 92 | + type: "LlamaEmbedding", |
| 93 | + vector: number[] |
| 94 | +}; |
| 95 | + |
| 96 | +export class LlamaEmbedding { |
| 97 | + public readonly vector: number[]; |
| 98 | + |
| 99 | + public constructor({vector}: {vector: number[]}) { |
| 100 | + this.vector = vector; |
| 101 | + } |
| 102 | + |
| 103 | + public toJSON(): LlamaEmbeddingJSON { |
| 104 | + return { |
| 105 | + type: "LlamaEmbedding", |
| 106 | + vector: this.vector |
| 107 | + }; |
| 108 | + } |
| 109 | + |
| 110 | + public static fromJSON(json: LlamaEmbeddingJSON) { |
| 111 | + if (json == null || json.type !== "LlamaEmbedding" || !(json.vector instanceof Array) || |
| 112 | + json.vector.some(v => typeof v !== "number") |
| 113 | + ) |
| 114 | + throw new Error("Invalid LlamaEmbedding JSON"); |
| 115 | + |
| 116 | + return new LlamaEmbedding({ |
| 117 | + vector: json.vector |
| 118 | + }); |
| 119 | + } |
| 120 | +} |
0 commit comments