diff --git a/package-lock.json b/package-lock.json
index 9fc60e62c84..1c8c4e88f46 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -68,6 +68,7 @@
"react-dom": "^19.1.0",
"react-tooltip": "^5.28.1",
"tas-client-umd": "0.2.0",
+ "undici": "^7.11.0",
"v8-inspect-profiler": "^0.1.1",
"vscode-oniguruma": "1.7.0",
"vscode-regexpp": "^3.1.0",
@@ -22767,9 +22768,9 @@
"dev": true
},
"node_modules/undici": {
- "version": "7.2.3",
- "resolved": "https://registry.npmjs.org/undici/-/undici-7.2.3.tgz",
- "integrity": "sha512-2oSLHaDalSt2/O/wHA9M+/ZPAOcU2yrSP/cdBYJ+YxZskiPYDSqHbysLSlD7gq3JMqOoJI5O31RVU3BxX/MnAA==",
+ "version": "7.11.0",
+ "resolved": "http://artifactory.intra.xiaojukeji.com/artifactory/api/npm/npm/undici/-/undici-7.11.0.tgz?dl=https%3A%2F%2Fregistry.npmmirror.com%2Fundici%2F-%2Fundici-7.11.0.tgz",
+ "integrity": "sha512-heTSIac3iLhsmZhUCjyS3JQEkZELateufzZuBaVM5RHXdSBMb1LPMQf5x+FH7qjsZYDP0ttAc3nnVpUB+wYbOg==",
"license": "MIT",
"engines": {
"node": ">=20.18.1"
diff --git a/package.json b/package.json
index e6341c0903b..e3fa4448ec3 100644
--- a/package.json
+++ b/package.json
@@ -130,6 +130,7 @@
"react-dom": "^19.1.0",
"react-tooltip": "^5.28.1",
"tas-client-umd": "0.2.0",
+ "undici": "^7.11.0",
"v8-inspect-profiler": "^0.1.1",
"vscode-oniguruma": "1.7.0",
"vscode-regexpp": "^3.1.0",
diff --git a/src/vs/code/electron-main/app.ts b/src/vs/code/electron-main/app.ts
index c3d2dfe5461..3c283786249 100644
--- a/src/vs/code/electron-main/app.ts
+++ b/src/vs/code/electron-main/app.ts
@@ -1243,7 +1243,7 @@ export class CodeApplication extends Disposable {
const voidUpdatesChannel = ProxyChannel.fromService(accessor.get(IVoidUpdateService), disposables);
mainProcessElectronServer.registerChannel('void-channel-update', voidUpdatesChannel);
- const sendLLMMessageChannel = new LLMMessageChannel(accessor.get(IMetricsService));
+ const sendLLMMessageChannel = new LLMMessageChannel(accessor.get(IMetricsService), this.configurationService);
mainProcessElectronServer.registerChannel('void-channel-llmMessage', sendLLMMessageChannel);
// Void added this
diff --git a/src/vs/workbench/contrib/void/browser/react/src/void-settings-tsx/Settings.tsx b/src/vs/workbench/contrib/void/browser/react/src/void-settings-tsx/Settings.tsx
index acc3c6d6cdc..be40fec5ed6 100644
--- a/src/vs/workbench/contrib/void/browser/react/src/void-settings-tsx/Settings.tsx
+++ b/src/vs/workbench/contrib/void/browser/react/src/void-settings-tsx/Settings.tsx
@@ -31,6 +31,7 @@ type Tab =
| 'featureOptions'
| 'mcp'
| 'general'
+ | 'network'
| 'all';
@@ -1042,6 +1043,7 @@ export const Settings = () => {
{ tab: 'featureOptions', label: 'Feature Options' },
{ tab: 'general', label: 'General' },
{ tab: 'mcp', label: 'MCP' },
+ { tab: 'network', label: 'Network' },
{ tab: 'all', label: 'All Settings' },
];
const shouldShowTab = (tab: Tab) => selectedSection === 'all' || selectedSection === tab;
@@ -1550,8 +1552,41 @@ Use Model Context Protocol to provide Agent mode with more tools.
+ {/* Network section */}
+
+
+ Network
+
+
+
+ {/* LLM Proxy */}
+
+
Use Proxy
+
+
+ Use proxy setting in LLM.{' '}
+
+
+
+ {/* Enable Switch */}
+
+
+ voidSettingsService.setGlobalSetting('enableNetworkProxy', newVal)}
+ />
+ {settingsState.globalSettings.enableNetworkProxy ? 'Enabled' : 'Disabled'}
+
+
+
+
+
+
+
+
diff --git a/src/vs/workbench/contrib/void/common/sendLLMMessageService.ts b/src/vs/workbench/contrib/void/common/sendLLMMessageService.ts
index 7618e7365ac..74de905870b 100644
--- a/src/vs/workbench/contrib/void/common/sendLLMMessageService.ts
+++ b/src/vs/workbench/contrib/void/common/sendLLMMessageService.ts
@@ -116,7 +116,7 @@ export class LLMMessageService extends Disposable implements ILLMMessageService
return null
}
- const { settingsOfProvider, } = this.voidSettingsService.state
+ const { settingsOfProvider, globalSettings } = this.voidSettingsService.state
const mcpTools = this.mcpService.getMCPTools()
@@ -134,6 +134,7 @@ export class LLMMessageService extends Disposable implements ILLMMessageService
settingsOfProvider,
modelSelection,
mcpTools,
+ proxyRequest: globalSettings.enableNetworkProxy,
} satisfies MainSendLLMMessageParams);
return requestId
diff --git a/src/vs/workbench/contrib/void/common/sendLLMMessageTypes.ts b/src/vs/workbench/contrib/void/common/sendLLMMessageTypes.ts
index f476b851987..1cc647c09a2 100644
--- a/src/vs/workbench/contrib/void/common/sendLLMMessageTypes.ts
+++ b/src/vs/workbench/contrib/void/common/sendLLMMessageTypes.ts
@@ -135,6 +135,7 @@ export type SendLLMMessageParams = {
settingsOfProvider: SettingsOfProvider;
mcpTools: InternalToolInfo[] | undefined;
+ proxyRequest: boolean;
} & SendLLMType
diff --git a/src/vs/workbench/contrib/void/common/voidSettingsTypes.ts b/src/vs/workbench/contrib/void/common/voidSettingsTypes.ts
index 38497c60ce7..19c0ce80f06 100644
--- a/src/vs/workbench/contrib/void/common/voidSettingsTypes.ts
+++ b/src/vs/workbench/contrib/void/common/voidSettingsTypes.ts
@@ -452,6 +452,7 @@ export type GlobalSettings = {
isOnboardingComplete: boolean;
disableSystemMessage: boolean;
autoAcceptLLMChanges: boolean;
+ enableNetworkProxy: boolean;
}
export const defaultGlobalSettings: GlobalSettings = {
@@ -468,6 +469,7 @@ export const defaultGlobalSettings: GlobalSettings = {
isOnboardingComplete: false,
disableSystemMessage: false,
autoAcceptLLMChanges: false,
+ enableNetworkProxy: true,
}
export type GlobalSettingName = keyof GlobalSettings
diff --git a/src/vs/workbench/contrib/void/electron-main/llmMessage/llmRequestProxy.ts b/src/vs/workbench/contrib/void/electron-main/llmMessage/llmRequestProxy.ts
new file mode 100644
index 00000000000..38213410703
--- /dev/null
+++ b/src/vs/workbench/contrib/void/electron-main/llmMessage/llmRequestProxy.ts
@@ -0,0 +1,70 @@
+import { setGlobalDispatcher, ProxyAgent, Agent } from 'undici';
+import { HttpsProxyAgent } from 'https-proxy-agent';
+import OpenAI, { ClientOptions } from 'openai'
+import { IConfigurationService } from '../../../../../platform/configuration/common/configuration.js';
+
+
+function getConfigValue(configurationService: IConfigurationService, key: string): T | undefined {
+ const values = configurationService.inspect(key);
+ return values.userLocalValue || values.defaultValue;
+}
+
+
+export const llmRequestProxy = {
+ config: {
+ proxyUrl: '' as string | undefined,
+ strictSSL: false,
+ authorization: '' as string | undefined,
+ },
+ proxyEnabled: false,
+ newOpenAI: function (options: ClientOptions) {
+ const params = {
+ ...options,
+ }
+
+ if (this.proxyEnabled && this.config.proxyUrl) {
+ params.httpAgent = new HttpsProxyAgent(this.config.proxyUrl)
+ }
+
+ return new OpenAI(params)
+ },
+
+ configure(configurationService: IConfigurationService) {
+ const proxyUrl = getConfigValue(configurationService, 'http.proxy');
+ const strictSSL = !!getConfigValue(configurationService, 'http.proxyStrictSSL');
+ const authorization = getConfigValue(configurationService, 'http.proxyAuthorization');
+
+ this.config.proxyUrl = proxyUrl
+ this.config.strictSSL = strictSSL
+ this.config.authorization = authorization
+ },
+
+
+ initialize(configurationService: IConfigurationService) {
+ // initialize proxy config
+ this.configure(configurationService)
+ },
+
+ enableProxy() {
+ if (this.config.proxyUrl) {
+ if (!this.proxyEnabled) {
+ this.proxyEnabled = true;
+ this.setCommonProxy(this.config.proxyUrl)
+ }
+ }
+ },
+ disableProxy() {
+ if (this.proxyEnabled) {
+ this.proxyEnabled = false;
+ this.clearCommonProxy()
+ }
+ },
+
+ setCommonProxy(proxyUrl: string) {
+ const dispatcher = new ProxyAgent({ uri: proxyUrl });
+ setGlobalDispatcher(dispatcher);
+ },
+ clearCommonProxy() {
+ setGlobalDispatcher(new Agent());
+ }
+}
diff --git a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts
index b4c794e2074..43303dd8080 100644
--- a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts
+++ b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.impl.ts
@@ -20,6 +20,8 @@ import { getSendableReasoningInfo, getModelCapabilities, getProviderCapabilities
import { extractReasoningWrapper, extractXMLToolsWrapper } from './extractGrammar.js';
import { availableTools, InternalToolInfo } from '../../common/prompt/prompts.js';
import { generateUuid } from '../../../../../base/common/uuid.js';
+import { llmRequestProxy } from './llmRequestProxy.js';
+
const getGoogleApiKey = async () => {
// module‑level singleton
@@ -76,7 +78,7 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
}
if (providerName === 'openAI') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({ apiKey: thisConfig.apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ apiKey: thisConfig.apiKey, ...commonPayloadOpts })
}
else if (providerName === 'ollama') {
const thisConfig = settingsOfProvider[providerName]
@@ -88,7 +90,7 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
}
else if (providerName === 'liteLLM') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({ baseURL: `${thisConfig.endpoint}/v1`, apiKey: 'noop', ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: `${thisConfig.endpoint}/v1`, apiKey: 'noop', ...commonPayloadOpts })
}
else if (providerName === 'lmStudio') {
const thisConfig = settingsOfProvider[providerName]
@@ -96,7 +98,7 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
}
else if (providerName === 'openRouter') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({
+ return llmRequestProxy.newOpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: thisConfig.apiKey,
defaultHeaders: {
@@ -111,7 +113,7 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
const thisConfig = settingsOfProvider[providerName]
const baseURL = `https://${thisConfig.region}-aiplatform.googleapis.com/v1/projects/${thisConfig.project}/locations/${thisConfig.region}/endpoints/${'openapi'}`
const apiKey = await getGoogleApiKey()
- return new OpenAI({ baseURL: baseURL, apiKey: apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: baseURL, apiKey: apiKey, ...commonPayloadOpts })
}
else if (providerName === 'microsoftAzure') {
// https://learn.microsoft.com/en-us/rest/api/aifoundry/model-inference/get-chat-completions/get-chat-completions?view=rest-aifoundry-model-inference-2024-05-01-preview&tabs=HTTP
@@ -142,30 +144,30 @@ const newOpenAICompatibleSDK = async ({ settingsOfProvider, providerName, includ
if (!baseURL.endsWith('/v1'))
baseURL = baseURL.replace(/\/+$/, '') + '/v1'
- return new OpenAI({ baseURL, apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL, apiKey, ...commonPayloadOpts })
}
else if (providerName === 'deepseek') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({ baseURL: 'https://api.deepseek.com/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: 'https://api.deepseek.com/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
}
else if (providerName === 'openAICompatible') {
const thisConfig = settingsOfProvider[providerName]
const headers = parseHeadersJSON(thisConfig.headersJSON)
- return new OpenAI({ baseURL: thisConfig.endpoint, apiKey: thisConfig.apiKey, defaultHeaders: headers, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: thisConfig.endpoint, apiKey: thisConfig.apiKey, defaultHeaders: headers, ...commonPayloadOpts })
}
else if (providerName === 'groq') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({ baseURL: 'https://api.groq.com/openai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: 'https://api.groq.com/openai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
}
else if (providerName === 'xAI') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({ baseURL: 'https://api.x.ai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: 'https://api.x.ai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
}
else if (providerName === 'mistral') {
const thisConfig = settingsOfProvider[providerName]
- return new OpenAI({ baseURL: 'https://api.mistral.ai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
+ return llmRequestProxy.newOpenAI({ baseURL: 'https://api.mistral.ai/v1', apiKey: thisConfig.apiKey, ...commonPayloadOpts })
}
else throw new Error(`Void providerName was invalid: ${providerName}.`)
diff --git a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts
index 27f35ad556c..3d77e536fba 100644
--- a/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts
+++ b/src/vs/workbench/contrib/void/electron-main/llmMessage/sendLLMMessage.ts
@@ -7,7 +7,7 @@ import { SendLLMMessageParams, OnText, OnFinalMessage, OnError } from '../../com
import { IMetricsService } from '../../common/metricsService.js';
import { displayInfoOfProviderName } from '../../common/voidSettingsTypes.js';
import { sendLLMMessageToProviderImplementation } from './sendLLMMessage.impl.js';
-
+import { llmRequestProxy } from './llmRequestProxy.js';
export const sendLLMMessage = async ({
messagesType,
@@ -24,11 +24,17 @@ export const sendLLMMessage = async ({
chatMode,
separateSystemMessage,
mcpTools,
+ proxyRequest,
}: SendLLMMessageParams,
metricsService: IMetricsService
) => {
+ if (proxyRequest) {
+ llmRequestProxy.enableProxy()
+ } else {
+ llmRequestProxy.disableProxy()
+ }
const { providerName, modelName } = modelSelection
diff --git a/src/vs/workbench/contrib/void/electron-main/sendLLMMessageChannel.ts b/src/vs/workbench/contrib/void/electron-main/sendLLMMessageChannel.ts
index bff528ce84c..a194efbd478 100644
--- a/src/vs/workbench/contrib/void/electron-main/sendLLMMessageChannel.ts
+++ b/src/vs/workbench/contrib/void/electron-main/sendLLMMessageChannel.ts
@@ -12,6 +12,10 @@ import { EventLLMMessageOnTextParams, EventLLMMessageOnErrorParams, EventLLMMess
import { sendLLMMessage } from './llmMessage/sendLLMMessage.js'
import { IMetricsService } from '../common/metricsService.js';
import { sendLLMMessageToProviderImplementation } from './llmMessage/sendLLMMessage.impl.js';
+import { IConfigurationService } from '../../../../platform/configuration/common/configuration.js';
+// import { IVoidSettingsService } from '../common/voidSettingsService.js';
+import { llmRequestProxy } from './llmMessage/llmRequestProxy.js';
+
// NODE IMPLEMENTATION - calls actual sendLLMMessage() and returns listeners to it
@@ -48,7 +52,10 @@ export class LLMMessageChannel implements IServerChannel {
// stupidly, channels can't take in @IService
constructor(
private readonly metricsService: IMetricsService,
- ) { }
+ private readonly configurationService: IConfigurationService,
+ ) {
+ llmRequestProxy.initialize(this.configurationService)
+ }
// browser uses this to listen for changes
listen(_: unknown, event: string): Event {