Skip to content

Commit c399314

Browse files
daniel-lxshannesrudolph
authored andcommitted
fix: remove model-specific query prefix handling in createEmbeddings method
1 parent 4f9935c commit c399314

File tree

1 file changed

+1
-25
lines changed

1 file changed

+1
-25
lines changed

src/services/code-index/embedders/openai.ts

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import {
88
MAX_BATCH_RETRIES as MAX_RETRIES,
99
INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS,
1010
} from "../constants"
11-
import { getModelQueryPrefix } from "../../../shared/embeddingModels"
1211
import { t } from "../../../i18n"
1312

1413
/**
@@ -37,32 +36,9 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder {
3736
*/
3837
async createEmbeddings(texts: string[], model?: string): Promise<EmbeddingResponse> {
3938
const modelToUse = model || this.defaultModelId
40-
41-
// Apply model-specific query prefix if required
42-
const queryPrefix = getModelQueryPrefix("openai", modelToUse)
43-
const processedTexts = queryPrefix
44-
? texts.map((text, index) => {
45-
const prefixedText = `${queryPrefix}${text}`
46-
const estimatedTokens = Math.ceil(prefixedText.length / 4)
47-
if (estimatedTokens > MAX_ITEM_TOKENS) {
48-
console.warn(
49-
t("embeddings:textWithPrefixExceedsTokenLimit", {
50-
index,
51-
estimatedTokens,
52-
maxTokens: MAX_ITEM_TOKENS,
53-
prefixLength: queryPrefix.length,
54-
}),
55-
)
56-
// Return original text without prefix to avoid exceeding limit
57-
return text
58-
}
59-
return prefixedText
60-
})
61-
: texts
62-
6339
const allEmbeddings: number[][] = []
6440
const usage = { promptTokens: 0, totalTokens: 0 }
65-
const remainingTexts = [...processedTexts]
41+
const remainingTexts = [...texts]
6642

6743
while (remainingTexts.length > 0) {
6844
const currentBatch: string[] = []

0 commit comments

Comments
 (0)