diff --git a/src/api/providers/__tests__/openai.spec.ts b/src/api/providers/__tests__/openai.spec.ts index 3e744d6e16e..16c44a8ba5d 100644 --- a/src/api/providers/__tests__/openai.spec.ts +++ b/src/api/providers/__tests__/openai.spec.ts @@ -116,6 +116,7 @@ describe("OpenAiHandler", () => { "User-Agent": `RooCode/${Package.version}`, }, timeout: expect.any(Number), + fetch: expect.any(Function), }) }) }) diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 939816480a5..ca275b02e8b 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -24,6 +24,7 @@ import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" import { getApiRequestTimeout } from "./utils/timeout-config" +import { createAxiosFetchAdapter, shouldUseAxiosForProxy } from "./utils/axios-fetch-adapter" // TODO: Rename this to OpenAICompatibleHandler. Also, I think the // `OpenAINativeHandler` can subclass from this, since it's obviously @@ -49,6 +50,10 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl const timeout = getApiRequestTimeout() + // Determine if we should use axios for proxy support + const useAxiosForProxy = this.options.openAiUseAxiosForProxy ?? shouldUseAxiosForProxy() + const customFetch = createAxiosFetchAdapter(useAxiosForProxy) + if (isAzureAiInference) { // Azure AI Inference Service (e.g., for DeepSeek) uses a different path structure this.client = new OpenAI({ @@ -57,6 +62,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl defaultHeaders: headers, defaultQuery: { "api-version": this.options.azureApiVersion || "2024-05-01-preview" }, timeout, + fetch: customFetch, }) } else if (isAzureOpenAi) { // Azure API shape slightly differs from the core API shape: @@ -67,6 +73,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion, defaultHeaders: headers, timeout, + fetch: customFetch, }) } else { this.client = new OpenAI({ @@ -74,6 +81,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl apiKey, defaultHeaders: headers, timeout, + fetch: customFetch, }) } } diff --git a/src/api/providers/utils/axios-fetch-adapter.ts b/src/api/providers/utils/axios-fetch-adapter.ts new file mode 100644 index 00000000000..b66399882aa --- /dev/null +++ b/src/api/providers/utils/axios-fetch-adapter.ts @@ -0,0 +1,130 @@ +import axios, { AxiosRequestConfig, AxiosResponse } from "axios" +import { Readable } from "stream" + +/** + * Creates a fetch-compatible wrapper around axios for use with OpenAI SDK. + * This adapter allows axios to be used instead of the native fetch API, + * which is important for proxy support in VSCode extensions where the + * patched fetch may not work correctly with certain proxy configurations + * (particularly SOCKS5 proxies). + * + * @param useAxiosForProxy - If true, uses axios instead of native fetch + * @returns A fetch-compatible function + */ +export function createAxiosFetchAdapter(useAxiosForProxy: boolean = false): typeof fetch { + // If not using axios for proxy, return native fetch + if (!useAxiosForProxy) { + return fetch + } + + // Return an axios-based fetch implementation + return async (input: RequestInfo | URL, init?: RequestInit): Promise => { + const url = typeof input === "string" ? input : input instanceof URL ? input.toString() : (input as Request).url + + // Convert RequestInit to AxiosRequestConfig + const config: AxiosRequestConfig = { + url, + method: (init?.method || "GET") as any, + headers: init?.headers as any, + data: init?.body, + // Important: Set responseType to 'stream' for streaming responses + responseType: "stream", + // Disable automatic decompression to let the consumer handle it + decompress: false, + // Don't throw on HTTP error status codes + validateStatus: () => true, + } + + try { + const axiosResponse: AxiosResponse = await axios(config) + + // Convert axios response to fetch Response + return createResponseFromAxios(axiosResponse) + } catch (error: any) { + // Handle network errors + throw new TypeError(`Failed to fetch: ${error.message}`) + } + } +} + +/** + * Converts an Axios response to a fetch Response object + */ +function createResponseFromAxios(axiosResponse: AxiosResponse): Response { + const { status, statusText, headers, data } = axiosResponse + + // Convert Node.js Readable stream to Web ReadableStream + const readableStream = nodeStreamToWebStream(data) + + // Create Response with proper headers + const responseHeaders = new Headers() + Object.entries(headers).forEach(([key, value]) => { + if (value !== undefined) { + responseHeaders.set(key, String(value)) + } + }) + + return new Response(readableStream, { + status, + statusText, + headers: responseHeaders, + }) +} + +/** + * Converts a Node.js Readable stream to a Web ReadableStream + */ +function nodeStreamToWebStream(nodeStream: Readable): ReadableStream { + return new ReadableStream({ + start(controller) { + nodeStream.on("data", (chunk) => { + // Ensure chunk is a Uint8Array + if (typeof chunk === "string") { + controller.enqueue(new TextEncoder().encode(chunk)) + } else if (chunk instanceof Buffer) { + controller.enqueue(new Uint8Array(chunk)) + } else { + controller.enqueue(chunk) + } + }) + + nodeStream.on("end", () => { + controller.close() + }) + + nodeStream.on("error", (err) => { + controller.error(err) + }) + }, + cancel() { + nodeStream.destroy() + }, + }) +} + +/** + * Checks if the current environment suggests that axios should be used + * instead of fetch for proxy support. This can be based on: + * - Presence of proxy environment variables + * - VSCode proxy settings + * - User configuration + */ +export function shouldUseAxiosForProxy(): boolean { + // Check for common proxy environment variables + const proxyVars = [ + "HTTP_PROXY", + "http_proxy", + "HTTPS_PROXY", + "https_proxy", + "ALL_PROXY", + "all_proxy", + "NO_PROXY", + "no_proxy", + ] + + const hasProxyEnvVars = proxyVars.some((varName) => process.env[varName]) + + // For now, we'll enable axios for proxy support if proxy env vars are detected + // This can be extended to check VSCode settings or user preferences + return hasProxyEnvVars +} diff --git a/src/shared/api.ts b/src/shared/api.ts index f1bf7dbaea4..8376348b197 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -14,6 +14,14 @@ export type ApiHandlerOptions = Omit & { * Defaults to true; set to false to disable summaries. */ enableGpt5ReasoningSummary?: boolean + + /** + * When true, uses axios instead of native fetch for OpenAI API calls. + * This can help with proxy configurations where VSCode's patched fetch + * doesn't work correctly (e.g., SOCKS5 proxies). + * Defaults to auto-detection based on proxy environment variables. + */ + openAiUseAxiosForProxy?: boolean } // RouterName