Skip to content

Commit cd24032

Browse files
committed
fix: normalize base URL in LM Studio embedder and improve error logging
1 parent bff556b commit cd24032

File tree

2 files changed

+41
-40
lines changed

2 files changed

+41
-40
lines changed

src/services/code-index/embedders/lmstudio.ts

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,16 @@ export class CodeIndexLmStudioEmbedder implements IEmbedder {
2323
*/
2424
constructor(options: ApiHandlerOptions & { embeddingModelId?: string }) {
2525
this.options = options
26+
27+
// Normalize base URL to prevent duplicate /v1 if user already provided it
28+
let baseUrl = this.options.lmStudioBaseUrl || "http://localhost:1234"
29+
if (!baseUrl.endsWith("/v1")) {
30+
baseUrl = baseUrl + "/v1"
31+
}
32+
2633
this.embeddingsClient = new OpenAI({
27-
baseURL: (this.options.lmStudioBaseUrl || "http://localhost:1234") + "/v1",
28-
apiKey: "noop", // LM Studio doesn't require a real API key
34+
baseURL: baseUrl,
35+
apiKey: "noop", // API key is intentionally hardcoded to "noop" because LM Studio does not require authentication
2936
})
3037
this.defaultModelId = options.embeddingModelId || "text-embedding-nomic-embed-text-v1.5@f16"
3138
}
@@ -81,8 +88,11 @@ export class CodeIndexLmStudioEmbedder implements IEmbedder {
8188
usage.promptTokens += batchResult.usage.promptTokens
8289
usage.totalTokens += batchResult.usage.totalTokens
8390
} catch (error) {
84-
console.error("Failed to process batch:", error)
85-
throw new Error("Failed to create embeddings: batch processing error")
91+
const batchInfo = `batch of ${currentBatch.length} documents (indices: ${processedIndices.join(", ")})`
92+
console.error(`Failed to process ${batchInfo}:`, error)
93+
throw new Error(
94+
`Failed to create embeddings for ${batchInfo}: ${error instanceof Error ? error.message : "batch processing error"}`,
95+
)
8696
}
8797
}
8898
}

webview-ui/src/components/settings/providers/LMStudio.tsx

Lines changed: 27 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -56,47 +56,38 @@ export const LMStudio = ({ apiConfiguration, setApiConfigurationField }: LMStudi
5656
vscode.postMessage({ type: "requestLmStudioModels" })
5757
}, [])
5858

59+
// Reusable function to check if a model is available
60+
const checkModelAvailability = useCallback(
61+
(modelId: string | undefined): boolean => {
62+
if (!modelId) return false
63+
64+
// Check if model exists in local LM Studio models
65+
if (lmStudioModels.length > 0 && lmStudioModels.includes(modelId)) {
66+
return false // Model is available locally
67+
}
68+
69+
// If we have router models data for LM Studio
70+
if (routerModels.data?.lmstudio) {
71+
const availableModels = Object.keys(routerModels.data.lmstudio)
72+
// Show warning if model is not in the list (regardless of how many models there are)
73+
return !availableModels.includes(modelId)
74+
}
75+
76+
// If neither source has loaded yet, don't show warning
77+
return false
78+
},
79+
[lmStudioModels, routerModels.data],
80+
)
81+
5982
// Check if the selected model exists in the fetched models
6083
const modelNotAvailable = useMemo(() => {
61-
const selectedModel = apiConfiguration?.lmStudioModelId
62-
if (!selectedModel) return false
63-
64-
// Check if model exists in local LM Studio models
65-
if (lmStudioModels.length > 0 && lmStudioModels.includes(selectedModel)) {
66-
return false // Model is available locally
67-
}
68-
69-
// If we have router models data for LM Studio
70-
if (routerModels.data?.lmstudio) {
71-
const availableModels = Object.keys(routerModels.data.lmstudio)
72-
// Show warning if model is not in the list (regardless of how many models there are)
73-
return !availableModels.includes(selectedModel)
74-
}
75-
76-
// If neither source has loaded yet, don't show warning
77-
return false
78-
}, [apiConfiguration?.lmStudioModelId, routerModels.data, lmStudioModels])
84+
return checkModelAvailability(apiConfiguration?.lmStudioModelId)
85+
}, [apiConfiguration?.lmStudioModelId, checkModelAvailability])
7986

8087
// Check if the draft model exists
8188
const draftModelNotAvailable = useMemo(() => {
82-
const draftModel = apiConfiguration?.lmStudioDraftModelId
83-
if (!draftModel) return false
84-
85-
// Check if model exists in local LM Studio models
86-
if (lmStudioModels.length > 0 && lmStudioModels.includes(draftModel)) {
87-
return false // Model is available locally
88-
}
89-
90-
// If we have router models data for LM Studio
91-
if (routerModels.data?.lmstudio) {
92-
const availableModels = Object.keys(routerModels.data.lmstudio)
93-
// Show warning if model is not in the list (regardless of how many models there are)
94-
return !availableModels.includes(draftModel)
95-
}
96-
97-
// If neither source has loaded yet, don't show warning
98-
return false
99-
}, [apiConfiguration?.lmStudioDraftModelId, routerModels.data, lmStudioModels])
89+
return checkModelAvailability(apiConfiguration?.lmStudioDraftModelId)
90+
}, [apiConfiguration?.lmStudioDraftModelId, checkModelAvailability])
10091

10192
return (
10293
<>

0 commit comments

Comments
 (0)