Skip to content

Commit eb2e277

Browse files
committed
feat: improve Code Index error messages with detailed context
- Enhanced error messages in validation-helpers.ts to provide specific context - Added detailed error translations in embeddings.json - Improved error handling in manager.ts with contextual hints - Enhanced service-factory.ts validation error reporting - Updated OpenAI and OpenAI-compatible embedders with better error context - Fixed tests to match new error message format Addresses #8015
1 parent ee58c5d commit eb2e277

File tree

8 files changed

+412
-129
lines changed

8 files changed

+412
-129
lines changed
Lines changed: 50 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
11
{
22
"unknownError": "Unknown error",
3-
"authenticationFailed": "Failed to create embeddings: Authentication failed. Please check your API key.",
4-
"failedWithStatus": "Failed to create embeddings after {{attempts}} attempts: HTTP {{statusCode}} - {{errorMessage}}",
3+
"authenticationFailed": "Failed to create embeddings: Authentication failed for {{provider}}. Please check your API key.",
4+
"failedWithStatus": "Failed to create embeddings after {{attempts}} attempts with {{provider}}: HTTP {{statusCode}} - {{errorMessage}}",
55
"failedWithError": "Failed to create embeddings after {{attempts}} attempts: {{errorMessage}}",
66
"failedMaxAttempts": "Failed to create embeddings after {{attempts}} attempts",
77
"textExceedsTokenLimit": "Text at index {{index}} exceeds maximum token limit ({{itemTokens}} > {{maxTokens}}). Skipping.",
88
"rateLimitRetry": "Rate limit hit, retrying in {{delayMs}}ms (attempt {{attempt}}/{{maxRetries}})",
9+
"rateLimitExhausted": "Rate limit exceeded for {{provider}} after {{attempts}} attempts. Please wait before retrying or check your API plan limits.",
10+
"textWithPrefixExceedsTokenLimit": "Text at index {{index}} with query prefix exceeds maximum token limit ({{estimatedTokens}} > {{maxTokens}}). Using original text without prefix.",
911
"ollama": {
1012
"couldNotReadErrorBody": "Could not read error body",
1113
"requestFailed": "Ollama API request failed with status {{status}} {{statusText}}: {{errorBody}}",
@@ -27,40 +29,68 @@
2729
"vectorDimensionMismatch": "Failed to update vector index for new model. Please try clearing the index and starting again. Details: {{errorMessage}}"
2830
},
2931
"validation": {
30-
"authenticationFailed": "Authentication failed. Please check your API key in the settings.",
32+
"authenticationFailed": "Authentication failed for {{provider}}. Please check your API key in the settings.",
3133
"connectionFailed": "Failed to connect to the embedder service. Please check your connection settings and ensure the service is running.",
32-
"modelNotAvailable": "The specified model is not available. Please check your model configuration.",
33-
"configurationError": "Invalid embedder configuration. Please review your settings.",
34+
"modelNotAvailable": "Model '{{modelId}}' is not available. Please verify the model exists and you have access to it.",
35+
"configurationError": "Invalid configuration for {{provider}}. Please review your settings.",
3436
"serviceUnavailable": "The embedder service is not available. Please ensure it is running and accessible.",
35-
"invalidEndpoint": "Invalid API endpoint. Please check your URL configuration.",
37+
"invalidEndpoint": "Invalid API endpoint{{#if endpoint}}: {{endpoint}}{{/if}}. Please check your URL configuration.",
3638
"invalidEmbedderConfig": "Invalid embedder configuration. Please check your settings.",
3739
"invalidApiKey": "Invalid API key. Please check your API key configuration.",
3840
"invalidBaseUrl": "Invalid base URL. Please check your URL configuration.",
3941
"invalidModel": "Invalid model. Please check your model configuration.",
4042
"invalidResponse": "Invalid response from embedder service. Please check your configuration.",
4143
"apiKeyRequired": "API key is required for this embedder",
42-
"baseUrlRequired": "Base URL is required for this embedder"
44+
"baseUrlRequired": "Base URL is required for this embedder",
45+
"badRequest": "Bad request to {{provider}}{{#if endpoint}} at {{endpoint}}{{/if}}. Please check your configuration and request parameters.",
46+
"forbidden": "Access forbidden for {{provider}}. Please check your API key permissions and account status.",
47+
"rateLimitExceeded": "Rate limit exceeded for {{provider}}. Please wait before retrying or upgrade your API plan.",
48+
"serverError": "Server error from {{provider}} (HTTP {{status}}). The service may be experiencing issues. Please try again later.",
49+
"notImplemented": "The endpoint{{#if endpoint}} {{endpoint}}{{/if}} does not support embeddings. Please check if you're using the correct endpoint URL.",
50+
"clientError": "Client error with {{provider}} (HTTP {{status}}). Please check your configuration.",
51+
"hostNotFound": "Cannot resolve host{{#if endpoint}} for {{endpoint}}{{/if}}. Please check the URL and your network connection.",
52+
"connectionRefused": "Connection refused{{#if endpoint}} to {{endpoint}}{{/if}}. Please ensure the service is running and accessible.",
53+
"connectionTimeout": "Connection timeout{{#if endpoint}} to {{endpoint}}{{/if}}. The service may be slow or unreachable.",
54+
"noResponse": "No response received from {{provider}}. Please check your network connection.",
55+
"invalidResponseFormat": "Invalid response format from {{provider}}. The endpoint may not be compatible with the expected API format.",
56+
"apiKeyIssue": "API key issue with {{provider}}{{#if source}} (configured in {{source}}){{/if}}. Please verify your API key is correct and has the necessary permissions.",
57+
"modelIssue": "Model issue with {{provider}}{{#if modelId}} for model '{{modelId}}'{{/if}}. Please verify the model is available and supports embeddings.",
58+
"dimensionMismatch": "Vector dimension mismatch{{#if modelId}} for model '{{modelId}}'{{/if}}. The model's embedding dimensions don't match the existing index.",
59+
"unexpectedError": "Unexpected error with {{provider}}. Please check the logs for more details.",
60+
"checkNetworkAndVPN": "Check your network connection and VPN settings if applicable.",
61+
"checkServiceRunning": "Ensure the service is running and the port is not blocked by a firewall.",
62+
"checkFirewallProxy": "Check firewall settings and proxy configuration if behind a corporate network.",
63+
"checkNetworkStability": "Check your network stability and try again.",
64+
"checkEndpointCompatibility": "Verify the endpoint is compatible with OpenAI's embedding API format.",
65+
"clearIndexAndRestart": "Try clearing the index and restarting with the new model configuration.",
66+
"checkApiKeyInSettings": "Please verify your API key in Settings > Code Index.",
67+
"checkEndpointAndModel": "Please verify the endpoint URL and model '{{modelId}}' are correct.",
68+
"checkServiceAndNetwork": "Please ensure the service is running and check your network connection."
4369
},
4470
"serviceFactory": {
45-
"openAiConfigMissing": "OpenAI configuration missing for embedder creation",
46-
"ollamaConfigMissing": "Ollama configuration missing for embedder creation",
47-
"openAiCompatibleConfigMissing": "OpenAI Compatible configuration missing for embedder creation",
48-
"geminiConfigMissing": "Gemini configuration missing for embedder creation",
49-
"mistralConfigMissing": "Mistral configuration missing for embedder creation",
50-
"vercelAiGatewayConfigMissing": "Vercel AI Gateway configuration missing for embedder creation",
51-
"invalidEmbedderType": "Invalid embedder type configured: {{embedderProvider}}",
71+
"openAiConfigMissing": "OpenAI configuration missing. Please provide an API key in the Code Index settings.",
72+
"ollamaConfigMissing": "Ollama configuration missing. Please provide the Ollama base URL in the Code Index settings.",
73+
"openAiCompatibleConfigMissing": "OpenAI Compatible configuration missing. Please provide both base URL and API key in the Code Index settings.",
74+
"geminiConfigMissing": "Gemini configuration missing. Please provide an API key in the Code Index settings.",
75+
"mistralConfigMissing": "Mistral configuration missing. Please provide an API key in the Code Index settings.",
76+
"vercelAiGatewayConfigMissing": "Vercel AI Gateway configuration missing. Please provide an API key in the Code Index settings.",
77+
"invalidEmbedderType": "Invalid embedder type configured: '{{embedderProvider}}'. Please select a valid provider in the Code Index settings.",
5278
"vectorDimensionNotDeterminedOpenAiCompatible": "Could not determine vector dimension for model '{{modelId}}' with provider '{{provider}}'. Please ensure the 'Embedding Dimension' is correctly set in the OpenAI-Compatible provider settings.",
5379
"vectorDimensionNotDetermined": "Could not determine vector dimension for model '{{modelId}}' with provider '{{provider}}'. Check model profiles or configuration.",
54-
"qdrantUrlMissing": "Qdrant URL missing for vector store creation",
55-
"codeIndexingNotConfigured": "Cannot create services: Code indexing is not properly configured"
80+
"qdrantUrlMissing": "Qdrant URL missing. Please configure the Qdrant vector database URL in the Code Index settings.",
81+
"codeIndexingNotConfigured": "Code indexing is not properly configured. Please complete the setup in Settings > Code Index.",
82+
"embeddingValidationFailed": "Failed to validate {{provider}} embedder: {{error}}. Please check your configuration and try again."
5683
},
5784
"orchestrator": {
58-
"indexingFailedNoBlocks": "Indexing failed: No code blocks were successfully indexed. This usually indicates an embedder configuration issue.",
59-
"indexingFailedCritical": "Indexing failed: No code blocks were successfully indexed despite finding files to process. This indicates a critical embedder failure.",
85+
"indexingFailedNoBlocks": "Indexing failed: No code blocks were successfully indexed. This usually indicates an embedder configuration issue. Please check your API key, endpoint URL, and model settings.",
86+
"indexingFailedCritical": "Indexing failed: No code blocks were successfully indexed despite finding files to process. This indicates a critical embedder failure. Please verify your embedder configuration and API connectivity.",
6087
"fileWatcherStarted": "File watcher started.",
6188
"fileWatcherStopped": "File watcher stopped.",
62-
"failedDuringInitialScan": "Failed during initial scan: {{errorMessage}}",
89+
"failedDuringInitialScan": "Failed during initial scan: {{errorMessage}}. Please check your configuration and try again.",
6390
"unknownError": "Unknown error",
64-
"indexingRequiresWorkspace": "Indexing requires an open workspace folder"
91+
"indexingRequiresWorkspace": "Indexing requires an open workspace folder. Please open a folder or workspace to enable code indexing."
92+
},
93+
"openai": {
94+
"invalidResponseFormat": "Invalid response format from OpenAI API. Please check your API key and model configuration."
6595
}
6696
}

src/services/code-index/__tests__/manager.spec.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ describe("CodeIndexManager - handleSettingsChange regression", () => {
373373
expect(mockServiceFactoryInstance.validateEmbedder).toHaveBeenCalledWith(createdEmbedder)
374374
expect(mockStateManager.setSystemState).toHaveBeenCalledWith(
375375
"Error",
376-
"embeddings:validation.authenticationFailed",
376+
expect.stringContaining("embeddings:validation.authenticationFailed"),
377377
)
378378
})
379379

src/services/code-index/__tests__/service-factory.spec.ts

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -795,7 +795,11 @@ describe("CodeIndexServiceFactory", () => {
795795
},
796796
}
797797
mockConfigManager.getConfig.mockReturnValue(testConfig as any)
798+
799+
// Mock embedder with embedderInfo
800+
mockEmbedderInstance.embedderInfo = { name: "openai" }
798801
MockedOpenAiEmbedder.mockImplementation(() => mockEmbedderInstance)
802+
799803
const networkError = new Error("Network error")
800804
mockEmbedderInstance.validateConfiguration.mockRejectedValue(networkError)
801805

@@ -804,10 +808,12 @@ describe("CodeIndexServiceFactory", () => {
804808
const result = await factory.validateEmbedder(embedder)
805809

806810
// Assert
807-
expect(result).toEqual({
811+
expect(result).toMatchObject({
808812
valid: false,
809-
error: "Network error",
813+
// The error is wrapped in a translation key that includes the original error
814+
error: expect.stringContaining("embeddingValidationFailed"),
810815
})
816+
// The result may also have a 'details' property, but we don't need to assert on it
811817
expect(mockEmbedderInstance.validateConfiguration).toHaveBeenCalled()
812818
})
813819

src/services/code-index/embedders/openai-compatible.ts

Lines changed: 58 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -348,8 +348,12 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
348348
// Log the error for debugging
349349
console.error(`OpenAI Compatible embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error)
350350

351-
// Format and throw the error
352-
throw formatEmbeddingError(error, MAX_RETRIES)
351+
// Format and throw the error with context
352+
throw formatEmbeddingError(error, MAX_RETRIES, {
353+
provider: "OpenAI Compatible",
354+
endpoint: this.baseUrl,
355+
modelId: model,
356+
})
353357
}
354358
}
355359

@@ -360,46 +364,61 @@ export class OpenAICompatibleEmbedder implements IEmbedder {
360364
* Validates the OpenAI-compatible embedder configuration by testing endpoint connectivity and API key
361365
* @returns Promise resolving to validation result with success status and optional error message
362366
*/
363-
async validateConfiguration(): Promise<{ valid: boolean; error?: string }> {
364-
return withValidationErrorHandling(async () => {
365-
try {
366-
// Test with a minimal embedding request
367-
const testTexts = ["test"]
368-
const modelToUse = this.defaultModelId
369-
370-
let response: OpenAIEmbeddingResponse
371-
372-
if (this.isFullUrl) {
373-
// Test direct HTTP request for full endpoint URLs
374-
response = await this.makeDirectEmbeddingRequest(this.baseUrl, testTexts, modelToUse)
375-
} else {
376-
// Test using OpenAI SDK for base URLs
377-
response = (await this.embeddingsClient.embeddings.create({
378-
input: testTexts,
379-
model: modelToUse,
380-
encoding_format: "base64",
381-
})) as OpenAIEmbeddingResponse
382-
}
367+
async validateConfiguration(): Promise<{ valid: boolean; error?: string; details?: string }> {
368+
return withValidationErrorHandling(
369+
async () => {
370+
try {
371+
// Test with a minimal embedding request
372+
const testTexts = ["test"]
373+
const modelToUse = this.defaultModelId
374+
375+
let response: OpenAIEmbeddingResponse
376+
377+
if (this.isFullUrl) {
378+
// Test direct HTTP request for full endpoint URLs
379+
response = await this.makeDirectEmbeddingRequest(this.baseUrl, testTexts, modelToUse)
380+
} else {
381+
// Test using OpenAI SDK for base URLs
382+
response = (await this.embeddingsClient.embeddings.create({
383+
input: testTexts,
384+
model: modelToUse,
385+
encoding_format: "base64",
386+
})) as OpenAIEmbeddingResponse
387+
}
383388

384-
// Check if we got a valid response
385-
if (!response?.data || response.data.length === 0) {
386-
return {
387-
valid: false,
388-
error: "embeddings:validation.invalidResponse",
389+
// Check if we got a valid response
390+
if (!response?.data || response.data.length === 0) {
391+
return {
392+
valid: false,
393+
error: t("embeddings:validation.invalidResponseFormat", {
394+
provider: "OpenAI Compatible",
395+
}),
396+
details: t("embeddings:validation.checkEndpointCompatibility"),
397+
}
389398
}
390-
}
391399

392-
return { valid: true }
393-
} catch (error) {
394-
// Capture telemetry for validation errors
395-
TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
396-
error: error instanceof Error ? error.message : String(error),
397-
stack: error instanceof Error ? error.stack : undefined,
398-
location: "OpenAICompatibleEmbedder:validateConfiguration",
399-
})
400-
throw error
401-
}
402-
}, "openai-compatible")
400+
return { valid: true }
401+
} catch (error) {
402+
// Capture telemetry for validation errors
403+
TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
404+
error: error instanceof Error ? error.message : String(error),
405+
stack: error instanceof Error ? error.stack : undefined,
406+
location: "OpenAICompatibleEmbedder:validateConfiguration",
407+
endpoint: this.baseUrl,
408+
model: this.defaultModelId,
409+
})
410+
throw error
411+
}
412+
},
413+
"openai-compatible",
414+
undefined,
415+
{
416+
provider: "OpenAI Compatible",
417+
endpoint: this.baseUrl,
418+
modelId: this.defaultModelId,
419+
apiKeySource: "OpenAI Compatible API Key setting",
420+
},
421+
)
403422
}
404423

405424
/**

src/services/code-index/embedders/openai.ts

Lines changed: 42 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -178,8 +178,11 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder {
178178
// Log the error for debugging
179179
console.error(`OpenAI embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error)
180180

181-
// Format and throw the error
182-
throw formatEmbeddingError(error, MAX_RETRIES)
181+
// Format and throw the error with context
182+
throw formatEmbeddingError(error, MAX_RETRIES, {
183+
provider: "OpenAI",
184+
modelId: model,
185+
})
183186
}
184187
}
185188

@@ -190,34 +193,45 @@ export class OpenAiEmbedder extends OpenAiNativeHandler implements IEmbedder {
190193
* Validates the OpenAI embedder configuration by attempting a minimal embedding request
191194
* @returns Promise resolving to validation result with success status and optional error message
192195
*/
193-
async validateConfiguration(): Promise<{ valid: boolean; error?: string }> {
194-
return withValidationErrorHandling(async () => {
195-
try {
196-
// Test with a minimal embedding request
197-
const response = await this.embeddingsClient.embeddings.create({
198-
input: ["test"],
199-
model: this.defaultModelId,
200-
})
201-
202-
// Check if we got a valid response
203-
if (!response.data || response.data.length === 0) {
204-
return {
205-
valid: false,
206-
error: t("embeddings:openai.invalidResponseFormat"),
196+
async validateConfiguration(): Promise<{ valid: boolean; error?: string; details?: string }> {
197+
return withValidationErrorHandling(
198+
async () => {
199+
try {
200+
// Test with a minimal embedding request
201+
const response = await this.embeddingsClient.embeddings.create({
202+
input: ["test"],
203+
model: this.defaultModelId,
204+
})
205+
206+
// Check if we got a valid response
207+
if (!response.data || response.data.length === 0) {
208+
return {
209+
valid: false,
210+
error: t("embeddings:openai.invalidResponseFormat"),
211+
details: t("embeddings:validation.checkEndpointCompatibility"),
212+
}
207213
}
208-
}
209214

210-
return { valid: true }
211-
} catch (error) {
212-
// Capture telemetry for validation errors
213-
TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
214-
error: error instanceof Error ? error.message : String(error),
215-
stack: error instanceof Error ? error.stack : undefined,
216-
location: "OpenAiEmbedder:validateConfiguration",
217-
})
218-
throw error
219-
}
220-
}, "openai")
215+
return { valid: true }
216+
} catch (error) {
217+
// Capture telemetry for validation errors
218+
TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, {
219+
error: error instanceof Error ? error.message : String(error),
220+
stack: error instanceof Error ? error.stack : undefined,
221+
location: "OpenAiEmbedder:validateConfiguration",
222+
model: this.defaultModelId,
223+
})
224+
throw error
225+
}
226+
},
227+
"openai",
228+
undefined,
229+
{
230+
provider: "OpenAI",
231+
modelId: this.defaultModelId,
232+
apiKeySource: "OpenAI API Key setting",
233+
},
234+
)
221235
}
222236

223237
get embedderInfo(): EmbedderInfo {

src/services/code-index/manager.ts

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,30 @@ export class CodeIndexManager {
343343
// Validate embedder configuration before proceeding
344344
const validationResult = await this._serviceFactory.validateEmbedder(embedder)
345345
if (!validationResult.valid) {
346-
const errorMessage = validationResult.error || "Embedder configuration validation failed"
346+
// Construct a detailed error message with context
347+
const provider = this._configManager!.currentEmbedderProvider
348+
const modelId = this._configManager!.currentModelId
349+
let errorMessage =
350+
validationResult.error ||
351+
t("embeddings:serviceFactory.embeddingValidationFailed", {
352+
provider,
353+
error: "Unknown validation error",
354+
})
355+
356+
// Add details if available
357+
if ((validationResult as any).details) {
358+
errorMessage += `. ${(validationResult as any).details}`
359+
}
360+
361+
// Add helpful context based on the error
362+
if (errorMessage.includes("401") || errorMessage.toLowerCase().includes("authentication")) {
363+
errorMessage += `. Please check your API key in Settings > Code Index.`
364+
} else if (errorMessage.includes("404") || errorMessage.toLowerCase().includes("not found")) {
365+
errorMessage += `. Please verify the endpoint URL and model '${modelId}' are correct.`
366+
} else if (errorMessage.includes("connection") || errorMessage.includes("ECONNREFUSED")) {
367+
errorMessage += `. Please ensure the service is running and accessible.`
368+
}
369+
347370
this._stateManager.setSystemState("Error", errorMessage)
348371
throw new Error(errorMessage)
349372
}

0 commit comments

Comments
 (0)