Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 15 additions & 10 deletions src/core/webview/webviewMessageHandler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2065,11 +2065,14 @@ export const webviewMessageHandler = async (
}

case "requestIndexingStatus": {
const status = provider.codeIndexManager!.getCurrentStatus()
provider.postMessageToWebview({
type: "indexingStatusUpdate",
values: status,
})
const manager = provider.codeIndexManager
if (manager) {
const status = manager.getCurrentStatus()
provider.postMessageToWebview({
type: "indexingStatusUpdate",
values: status,
})
}
break
}
case "requestCodeIndexSecretStatus": {
Expand All @@ -2094,8 +2097,8 @@ export const webviewMessageHandler = async (
}
case "startIndexing": {
try {
const manager = provider.codeIndexManager!
if (manager.isFeatureEnabled && manager.isFeatureConfigured) {
const manager = provider.codeIndexManager
if (manager && manager.isFeatureEnabled && manager.isFeatureConfigured) {
if (!manager.isInitialized) {
await manager.initialize(provider.contextProxy)
}
Expand All @@ -2109,9 +2112,11 @@ export const webviewMessageHandler = async (
}
case "clearIndexData": {
try {
const manager = provider.codeIndexManager!
await manager.clearIndexData()
provider.postMessageToWebview({ type: "indexCleared", values: { success: true } })
const manager = provider.codeIndexManager
if (manager) {
await manager.clearIndexData()
provider.postMessageToWebview({ type: "indexCleared", values: { success: true } })
}
} catch (error) {
provider.log(`Error clearing index data: ${error instanceof Error ? error.message : String(error)}`)
provider.postMessageToWebview({
Expand Down
36 changes: 36 additions & 0 deletions src/services/code-index/embedders/__tests__/gemini.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,40 @@ describe("GeminiEmbedder", () => {
await expect(embedder.validateConfiguration()).rejects.toThrow("Validation failed")
})
})

describe("createEmbeddings", () => {
let mockCreateEmbeddings: any

beforeEach(() => {
mockCreateEmbeddings = vitest.fn()
MockedOpenAICompatibleEmbedder.prototype.createEmbeddings = mockCreateEmbeddings
embedder = new GeminiEmbedder("test-api-key")
})

it("should use default model when none is provided", async () => {
// Arrange
const texts = ["text1", "text2"]
mockCreateEmbeddings.mockResolvedValue({ embeddings: [], usage: { promptTokens: 0, totalTokens: 0 } })

// Act
await embedder.createEmbeddings(texts)

// Assert
expect(mockCreateEmbeddings).toHaveBeenCalledWith(texts, "text-embedding-004", undefined)
})

it("should pass model and dimension to the OpenAICompatibleEmbedder", async () => {
// Arrange
const texts = ["text1", "text2"]
const model = "custom-model"
const options = { dimension: 1536 }
mockCreateEmbeddings.mockResolvedValue({ embeddings: [], usage: { promptTokens: 0, totalTokens: 0 } })

// Act
await embedder.createEmbeddings(texts, model, options)

// Assert
expect(mockCreateEmbeddings).toHaveBeenCalledWith(texts, model, options)
})
})
})
11 changes: 8 additions & 3 deletions src/services/code-index/embedders/gemini.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,15 @@ export class GeminiEmbedder implements IEmbedder {
* @param model Optional model identifier (ignored - always uses text-embedding-004)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Update the JSDoc for the 'model' parameter – it now honors a provided value (using model || GEMINI_MODEL) instead of always ignoring it.

Suggested change
* @param model Optional model identifier (ignored - always uses text-embedding-004)
* @param model Optional model identifier (uses provided value or defaults to text-embedding-004)

* @returns Promise resolving to embedding response
*/
async createEmbeddings(texts: string[], model?: string): Promise<EmbeddingResponse> {
async createEmbeddings(
texts: string[],
model?: string,
options?: { dimension?: number },
): Promise<EmbeddingResponse> {
try {
// Always use the fixed Gemini model, ignoring any passed model parameter
return await this.openAICompatibleEmbedder.createEmbeddings(texts, GeminiEmbedder.GEMINI_MODEL)
// Use the provided model or the fixed Gemini model
const modelToUse = model || GeminiEmbedder.GEMINI_MODEL
return await this.openAICompatibleEmbedder.createEmbeddings(texts, modelToUse, options)
} catch (error) {
TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
error: error instanceof Error ? error.message : String(error),
Expand Down
29 changes: 21 additions & 8 deletions src/services/code-index/embedders/openai-compatible.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,11 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
* @param model Optional model identifier
* @returns Promise resolving to embedding response
*/
async createEmbeddings(texts: string[], model?: string): Promise<EmbeddingResponse> {
async createEmbeddings(
texts: string[],
model?: string,
options?: { dimension?: number },
): Promise<EmbeddingResponse> {
const modelToUse = model || this.defaultModelId

// Apply model-specific query prefix if required
Expand Down Expand Up @@ -139,7 +143,7 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
}

if (currentBatch.length > 0) {
const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse)
const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse, options)
allEmbeddings.push(...batchResult.embeddings)
usage.promptTokens += batchResult.usage.promptTokens
usage.totalTokens += batchResult.usage.totalTokens
Expand Down Expand Up @@ -181,7 +185,18 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
url: string,
batchTexts: string[],
model: string,
options?: { dimension?: number },
): Promise<OpenAIEmbeddingResponse> {
const body: Record<string, any> = {
input: batchTexts,
model: model,
encoding_format: "base64",
}

if (options?.dimension) {
body.dimensions = options.dimension
}

const response = await fetch(url, {
method: "POST",
headers: {
Expand All @@ -191,11 +206,7 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
"api-key": this.apiKey,
Authorization: `Bearer ${this.apiKey}`,
},
body: JSON.stringify({
input: batchTexts,
model: model,
encoding_format: "base64",
}),
body: JSON.stringify(body),
})

if (!response || !response.ok) {
Expand Down Expand Up @@ -234,6 +245,7 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
private async _embedBatchWithRetries(
batchTexts: string[],
model: string,
options?: { dimension?: number },
): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> {
// Use cached value for performance
const isFullUrl = this.isFullUrl
Expand All @@ -244,7 +256,7 @@ export class OpenAICompatibleEmbedder implements IEmbedder {

if (isFullUrl) {
// Use direct HTTP request for full endpoint URLs
response = await this.makeDirectEmbeddingRequest(this.baseUrl, batchTexts, model)
response = await this.makeDirectEmbeddingRequest(this.baseUrl, batchTexts, model, options)
} else {
// Use OpenAI SDK for base URLs
response = (await this.embeddingsClient.embeddings.create({
Expand All @@ -254,6 +266,7 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
// when processing numeric arrays, which breaks compatibility with models using larger dimensions.
// By requesting base64 encoding, we bypass the package's parser and handle decoding ourselves.
encoding_format: "base64",
...(options?.dimension && { dimensions: options.dimension }),
})) as OpenAIEmbeddingResponse
}

Expand Down
2 changes: 1 addition & 1 deletion src/services/code-index/interfaces/embedder.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ export interface IEmbedder {
* @param model Optional model ID to use for embeddings
* @returns Promise resolving to an EmbeddingResponse
*/
createEmbeddings(texts: string[], model?: string): Promise<EmbeddingResponse>
createEmbeddings(texts: string[], model?: string, options?: { dimension?: number }): Promise<EmbeddingResponse>

/**
* Validates the embedder configuration by testing connectivity and credentials.
Expand Down
3 changes: 3 additions & 0 deletions src/shared/embeddingModels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ export const EMBEDDING_MODEL_PROFILES: EmbeddingModelProfiles = {
},
gemini: {
"text-embedding-004": { dimension: 768 },
// ADD: New model with a default dimension.
// The actual dimension will be passed from the configuration at runtime.
"gemini-embedding-exp-03-07": { dimension: 768 },
},
}

Expand Down