diff --git a/bun.lock b/bun.lock
index 8d863179a5..bdab10b591 100644
--- a/bun.lock
+++ b/bun.lock
@@ -7,6 +7,7 @@
"dependencies": {
"@ai-sdk/amazon-bedrock": "^3.0.65",
"@ai-sdk/anthropic": "^2.0.53",
+ "@ai-sdk/azure": "^2.0.0",
"@ai-sdk/deepseek": "^1.0.31",
"@ai-sdk/google": "^2.0.44",
"@ai-sdk/mcp": "^0.0.11",
@@ -196,6 +197,8 @@
"@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.53", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ih7NV+OFSNWZCF+tYYD7ovvvM+gv7TRKQblpVohg2ipIwC9Y0TirzocJVREzZa/v9luxUwFbsPji++DUDWWxsg=="],
+ "@ai-sdk/azure": ["@ai-sdk/azure@2.0.91", "", { "dependencies": { "@ai-sdk/openai": "2.0.89", "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-9tznVSs6LGQNKKxb8pKd7CkBV9yk+a/ENpFicHCj2CmBUKefxzwJ9JbUqrlK3VF6dGZw3LXq0dWxt7/Yekaj1w=="],
+
"@ai-sdk/deepseek": ["@ai-sdk/deepseek@1.0.31", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-Il7WJp8bA3CmlreYSl1YzCucGTn2e5P81IANYIIEeLtWrbK0Y9CLoOCROj8xKYyUSMKlINyGZX2uP79cKewtSg=="],
"@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.18", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.18", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-sDQcW+6ck2m0pTIHW6BPHD7S125WD3qNkx/B8sEzJp/hurocmJ5Cni0ybExg6sQMGo+fr/GWOwpHF1cmCdg5rQ=="],
@@ -3770,6 +3773,12 @@
"zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
+ "@ai-sdk/azure/@ai-sdk/openai": ["@ai-sdk/openai@2.0.89", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@ai-sdk/provider-utils": "3.0.20" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-4+qWkBCbL9HPKbgrUO/F2uXZ8GqrYxHa8SWEYIzxEJ9zvWw3ISr3t1/27O1i8MGSym+PzEyHBT48EV4LAwWaEw=="],
+
+ "@ai-sdk/azure/@ai-sdk/provider": ["@ai-sdk/provider@2.0.1", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-KCUwswvsC5VsW2PWFqF8eJgSCu5Ysj7m1TxiHTVA6g7k360bk0RNQENT8KTMAYEs+8fWPD3Uu4dEmzGHc+jGng=="],
+
+ "@ai-sdk/azure/@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.20", "", { "dependencies": { "@ai-sdk/provider": "2.0.1", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-iXHVe0apM2zUEzauqJwqmpC37A5rihrStAih5Ks+JE32iTe4LZ58y17UGBjpQQTCRw9YxMeo2UFLxLpBluyvLQ=="],
+
"@aws-crypto/sha256-browser/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="],
"@aws-crypto/util/@smithy/util-utf8": ["@smithy/util-utf8@2.3.0", "", { "dependencies": { "@smithy/util-buffer-from": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A=="],
diff --git a/docs/config/providers.mdx b/docs/config/providers.mdx
index 161cb4c85e..492cd9b2d2 100644
--- a/docs/config/providers.mdx
+++ b/docs/config/providers.mdx
@@ -16,16 +16,18 @@ Most providers only need an API key. The UI handles validation and shows which p
## Supported Providers
-| Provider | Models | Get API Key |
-| -------------- | -------------------------- | ------------------------------------------------------- |
-| **Anthropic** | Claude Opus, Sonnet, Haiku | [console.anthropic.com](https://console.anthropic.com/) |
-| **OpenAI** | GPT-5, Codex | [platform.openai.com](https://platform.openai.com/) |
-| **Google** | Gemini Pro, Flash | [aistudio.google.com](https://aistudio.google.com/) |
-| **xAI** | Grok | [console.x.ai](https://console.x.ai/) |
-| **DeepSeek** | DeepSeek Chat, Reasoner | [platform.deepseek.com](https://platform.deepseek.com/) |
-| **OpenRouter** | 300+ models | [openrouter.ai](https://openrouter.ai/) |
-| **Ollama** | Local models | [ollama.com](https://ollama.com/) (no key needed) |
-| **Bedrock** | Claude via AWS | AWS Console |
+| Provider | Models | Get API Key |
+| ----------------- | -------------------------- | ------------------------------------------------------- |
+| **Anthropic** | Claude Opus, Sonnet, Haiku | [console.anthropic.com](https://console.anthropic.com/) |
+| **Azure Foundry** | Claude via Azure Foundry | [ai.azure.com](https://ai.azure.com/) |
+| **Azure OpenAI** | GPT via Azure | [portal.azure.com](https://portal.azure.com/) |
+| **OpenAI** | GPT-5, Codex | [platform.openai.com](https://platform.openai.com/) |
+| **Google** | Gemini Pro, Flash | [aistudio.google.com](https://aistudio.google.com/) |
+| **xAI** | Grok | [console.x.ai](https://console.x.ai/) |
+| **DeepSeek** | DeepSeek Chat, Reasoner | [platform.deepseek.com](https://platform.deepseek.com/) |
+| **OpenRouter** | 300+ models | [openrouter.ai](https://openrouter.ai/) |
+| **Ollama** | Local models | [ollama.com](https://ollama.com/) (no key needed) |
+| **Bedrock** | Claude via AWS | AWS Console |
## Environment Variables
@@ -33,15 +35,17 @@ Providers also read from environment variables as fallback:
{/* BEGIN PROVIDER_ENV_VARS */}
-| Provider | Environment Variable |
-| ---------- | -------------------------------------------------- |
-| Anthropic | `ANTHROPIC_API_KEY` or `ANTHROPIC_AUTH_TOKEN` |
-| OpenAI | `OPENAI_API_KEY` |
-| Google | `GOOGLE_GENERATIVE_AI_API_KEY` or `GOOGLE_API_KEY` |
-| xAI | `XAI_API_KEY` |
-| OpenRouter | `OPENROUTER_API_KEY` |
-| DeepSeek | `DEEPSEEK_API_KEY` |
-| Bedrock | `AWS_REGION` (credentials via AWS SDK chain) |
+| Provider | Environment Variable |
+| ------------- | ----------------------------------------------------- |
+| Anthropic | `ANTHROPIC_API_KEY` or `ANTHROPIC_AUTH_TOKEN` |
+| Azure Foundry | `AZURE_FOUNDRY_API_KEY` and `AZURE_FOUNDRY_RESOURCE` |
+| Azure OpenAI | `AZURE_OPENAI_API_KEY` and `AZURE_OPENAI_ENDPOINT` |
+| OpenAI | `OPENAI_API_KEY` |
+| Google | `GOOGLE_GENERATIVE_AI_API_KEY` or `GOOGLE_API_KEY` |
+| xAI | `XAI_API_KEY` |
+| OpenRouter | `OPENROUTER_API_KEY` |
+| DeepSeek | `DEEPSEEK_API_KEY` |
+| Bedrock | `AWS_REGION` (credentials via AWS SDK chain) |
Additional environment variables
@@ -53,12 +57,8 @@ Providers also read from environment variables as fallback:
| OpenAI | `OPENAI_ORG_ID` | Organization ID |
| Google | `GOOGLE_BASE_URL` | Custom API endpoint |
| xAI | `XAI_BASE_URL` | Custom API endpoint |
-| Azure OpenAI | `AZURE_OPENAI_API_KEY` | API key |
-| Azure OpenAI | `AZURE_OPENAI_ENDPOINT` | Endpoint URL |
-| Azure OpenAI | `AZURE_OPENAI_DEPLOYMENT` | Deployment name |
-| Azure OpenAI | `AZURE_OPENAI_API_VERSION` | API version |
-
-Azure OpenAI env vars configure the OpenAI provider with Azure backend.
+| Azure OpenAI | `AZURE_OPENAI_DEPLOYMENT` | Deployment name (optional) |
+| Azure OpenAI | `AZURE_OPENAI_API_VERSION` | API version (optional) |
@@ -95,6 +95,62 @@ For advanced options not exposed in the UI, edit `~/.mux/providers.jsonc` direct
}
```
+### Azure Foundry
+
+Azure Foundry provides access to Claude models through Microsoft's AI marketplace. It uses Anthropic's native API format.
+
+```jsonc
+{
+ "azure-foundry": {
+ "apiKey": "your-azure-api-key",
+ "resource": "your-resource-name" // Just the resource name, not full URL
+ }
+}
+```
+
+**Getting your credentials:**
+1. Go to [Azure AI Foundry](https://ai.azure.com/)
+2. Create a project and deploy a Claude model
+3. Find your resource name (e.g., `my-resource` from `https://my-resource.services.ai.azure.com/`)
+4. Copy your API key from the Keys section
+
+**Environment variables:**
+```bash
+export AZURE_FOUNDRY_RESOURCE=your-resource-name # Just the name, not full URL
+export AZURE_FOUNDRY_API_KEY=your-azure-api-key
+```
+
+**Note:** Azure Foundry is separate from Azure OpenAI. All Claude features work identically: streaming, tool calling, thinking, and prompt caching.
+
+### Azure OpenAI
+
+Azure OpenAI provides access to OpenAI models (GPT-5, Codex) through Microsoft Azure.
+
+```jsonc
+{
+ "azure-openai": {
+ "apiKey": "your-azure-api-key",
+ "baseUrl": "https://your-resource.cognitiveservices.azure.com",
+ "deployment": "your-deployment-name", // Optional: defaults to model ID
+ "apiVersion": "2024-12-01-preview" // Optional
+ }
+}
+```
+
+**Getting your credentials:**
+1. Go to [Azure Portal](https://portal.azure.com/)
+2. Create an Azure OpenAI resource
+3. Deploy a model (e.g., gpt-5.2)
+4. Copy your endpoint URL (e.g., `https://your-resource.cognitiveservices.azure.com`) and API key
+
+**Environment variables:**
+```bash
+export AZURE_OPENAI_ENDPOINT=https://your-resource.cognitiveservices.azure.com
+export AZURE_OPENAI_API_KEY=your-azure-api-key
+export AZURE_OPENAI_DEPLOYMENT=your-deployment-name # Optional
+export AZURE_OPENAI_API_VERSION=2024-12-01-preview # Optional
+```
+
### Bedrock Authentication
Bedrock supports multiple authentication methods (tried in order):
diff --git a/package.json b/package.json
index 312ddb4442..c4312039a9 100644
--- a/package.json
+++ b/package.json
@@ -47,6 +47,7 @@
"dependencies": {
"@ai-sdk/amazon-bedrock": "^3.0.65",
"@ai-sdk/anthropic": "^2.0.53",
+ "@ai-sdk/azure": "^2.0.0",
"@ai-sdk/deepseek": "^1.0.31",
"@ai-sdk/google": "^2.0.44",
"@ai-sdk/mcp": "^0.0.11",
diff --git a/src/browser/components/ProviderIcon.tsx b/src/browser/components/ProviderIcon.tsx
index b56346f853..cfa0ad432e 100644
--- a/src/browser/components/ProviderIcon.tsx
+++ b/src/browser/components/ProviderIcon.tsx
@@ -19,6 +19,7 @@ import { cn } from "@/common/lib/utils";
*/
const PROVIDER_ICONS: Partial> = {
anthropic: AnthropicIcon,
+ "azure-foundry": AnthropicIcon, // Same icon as Anthropic (Claude branding)
openai: OpenAIIcon,
google: GoogleIcon,
xai: XAIIcon,
diff --git a/src/browser/components/Settings/sections/ProvidersSection.tsx b/src/browser/components/Settings/sections/ProvidersSection.tsx
index 6ce3a70388..4966e7b1a8 100644
--- a/src/browser/components/Settings/sections/ProvidersSection.tsx
+++ b/src/browser/components/Settings/sections/ProvidersSection.tsx
@@ -116,6 +116,44 @@ function getProviderFields(provider: ProviderName): FieldConfig[] {
return [];
}
+ if (provider === "azure-foundry") {
+ return [
+ { key: "apiKey", label: "API Key", placeholder: "Enter API key", type: "secret" },
+ {
+ key: "resource",
+ label: "Resource",
+ placeholder: "your-resource-name",
+ type: "text",
+ },
+ ];
+ }
+
+ if (provider === "azure-openai") {
+ return [
+ { key: "apiKey", label: "API Key", placeholder: "Enter API key", type: "secret" },
+ {
+ key: "baseUrl",
+ label: "Endpoint",
+ placeholder: "https://your-resource.cognitiveservices.azure.com",
+ type: "text",
+ },
+ {
+ key: "deployment",
+ label: "Deployment",
+ placeholder: "your-deployment-name",
+ type: "text",
+ optional: true,
+ },
+ {
+ key: "apiVersion",
+ label: "API Version",
+ placeholder: "2024-12-01-preview",
+ type: "text",
+ optional: true,
+ },
+ ];
+ }
+
// Default for most providers
return [
{ key: "apiKey", label: "API Key", placeholder: "Enter API key", type: "secret" },
@@ -134,6 +172,8 @@ function getProviderFields(provider: ProviderName): FieldConfig[] {
*/
const PROVIDER_KEY_URLS: Partial> = {
anthropic: "https://console.anthropic.com/settings/keys",
+ "azure-foundry": "https://ai.azure.com/",
+ "azure-openai": "https://portal.azure.com/",
openai: "https://platform.openai.com/api-keys",
google: "https://aistudio.google.com/app/apikey",
xai: "https://console.x.ai/team/default/api-keys",
@@ -430,6 +470,8 @@ export function ProvidersSection() {
updateOptimistically(provider, { apiKeySet: editValue !== "" });
} else if (field === "baseUrl") {
updateOptimistically(provider, { baseUrl: editValue || undefined });
+ } else if (field === "resource") {
+ updateOptimistically(provider, { resource: editValue || undefined });
}
setEditingField(null);
@@ -449,6 +491,8 @@ export function ProvidersSection() {
updateOptimistically(provider, { apiKeySet: false });
} else if (field === "baseUrl") {
updateOptimistically(provider, { baseUrl: undefined });
+ } else if (field === "resource") {
+ updateOptimistically(provider, { resource: undefined });
}
// Save in background
diff --git a/src/common/constants/knownModels.ts b/src/common/constants/knownModels.ts
index ed298f0f84..dc3ae3900f 100644
--- a/src/common/constants/knownModels.ts
+++ b/src/common/constants/knownModels.ts
@@ -4,7 +4,7 @@
import { formatModelDisplayName } from "../utils/ai/modelDisplay";
-type ModelProvider = "anthropic" | "openai" | "google" | "xai";
+type ModelProvider = "anthropic" | "openai" | "google" | "xai" | "azure-foundry" | "azure-openai";
interface KnownModelDefinition {
/** Provider identifier used by SDK factories */
@@ -46,6 +46,40 @@ const MODEL_DEFINITIONS = {
aliases: ["haiku"],
tokenizerOverride: "anthropic/claude-3.5-haiku",
},
+ AZURE_FOUNDRY_OPUS: {
+ provider: "azure-foundry",
+ providerModelId: "claude-opus-4-5",
+ aliases: ["azure-opus", "foundry-opus"],
+ },
+ AZURE_FOUNDRY_SONNET: {
+ provider: "azure-foundry",
+ providerModelId: "claude-sonnet-4-5",
+ aliases: ["azure-sonnet", "foundry-sonnet"],
+ },
+ AZURE_FOUNDRY_HAIKU: {
+ provider: "azure-foundry",
+ providerModelId: "claude-haiku-4-5",
+ aliases: ["azure-haiku", "foundry-haiku"],
+ },
+ // Azure OpenAI models (GPT via Azure)
+ AZURE_OPENAI_GPT: {
+ provider: "azure-openai",
+ providerModelId: "gpt-5.2",
+ aliases: ["azure-gpt"],
+ tokenizerOverride: "openai/gpt-5",
+ },
+ AZURE_OPENAI_GPT_CODEX: {
+ provider: "azure-openai",
+ providerModelId: "gpt-5.1-codex",
+ aliases: ["azure-codex"],
+ tokenizerOverride: "openai/gpt-5",
+ },
+ AZURE_OPENAI_GPT_CODEX_MAX: {
+ provider: "azure-openai",
+ providerModelId: "gpt-5.1-codex-max",
+ aliases: ["azure-codex-max"],
+ tokenizerOverride: "openai/gpt-5",
+ },
GPT: {
provider: "openai",
providerModelId: "gpt-5.2",
diff --git a/src/common/constants/providers.ts b/src/common/constants/providers.ts
index d7f9eacb54..4a2c6c8566 100644
--- a/src/common/constants/providers.ts
+++ b/src/common/constants/providers.ts
@@ -38,6 +38,18 @@ export const PROVIDER_DEFINITIONS = {
factoryName: "createAnthropic",
requiresApiKey: true,
},
+ "azure-foundry": {
+ displayName: "Azure Foundry",
+ import: () => import("@ai-sdk/anthropic"), // Uses Anthropic SDK with Azure baseURL
+ factoryName: "createAnthropic",
+ requiresApiKey: true,
+ },
+ "azure-openai": {
+ displayName: "Azure OpenAI",
+ import: () => import("@ai-sdk/azure"), // Uses Azure OpenAI SDK
+ factoryName: "createAzure",
+ requiresApiKey: true,
+ },
openai: {
displayName: "OpenAI",
import: () => import("@ai-sdk/openai"),
diff --git a/src/common/orpc/schemas/api.ts b/src/common/orpc/schemas/api.ts
index a84bd8ed4b..630dde0c76 100644
--- a/src/common/orpc/schemas/api.ts
+++ b/src/common/orpc/schemas/api.ts
@@ -100,6 +100,12 @@ export const ProviderConfigInfoSchema = z.object({
/** Whether this provider is configured and ready to use */
isConfigured: z.boolean(),
baseUrl: z.string().optional(),
+ /** Azure Foundry resource name */
+ resource: z.string().optional(),
+ /** Azure OpenAI deployment name */
+ deployment: z.string().optional(),
+ /** Azure OpenAI API version */
+ apiVersion: z.string().optional(),
models: z.array(z.string()).optional(),
/** OpenAI-specific fields */
serviceTier: z.enum(["auto", "default", "flex", "priority"]).optional(),
diff --git a/src/node/services/aiService.ts b/src/node/services/aiService.ts
index f90a3386d8..6a56bc24c2 100644
--- a/src/node/services/aiService.ts
+++ b/src/node/services/aiService.ts
@@ -641,6 +641,94 @@ export class AIService extends EventEmitter {
return Ok(provider(modelId));
}
+ // Handle Azure Foundry provider (uses @ai-sdk/anthropic with Azure baseURL)
+ if (providerName === "azure-foundry") {
+ // Resolve credentials from config + env
+ const creds = resolveProviderCredentials("azure-foundry", providerConfig);
+ if (!creds.isConfigured || !creds.apiKey || !creds.resource) {
+ return Err({ type: "api_key_not_found", provider: providerName });
+ }
+
+ // Build Azure Foundry baseURL from resource name
+ const baseURL = `https://${creds.resource}.services.ai.azure.com/anthropic/v1/`;
+ log.debug(`Azure Foundry baseURL: ${baseURL}`);
+
+ // Use @ai-sdk/anthropic with Azure baseURL - no special adapter needed
+ const { createAnthropic } = await import("@ai-sdk/anthropic");
+
+ // Wrap fetch to inject cache_control on tools and messages
+ // (SDK doesn't translate providerOptions to cache_control for these)
+ // Use getProviderFetch to preserve any user-configured custom fetch (e.g., proxies)
+ const baseFetch = getProviderFetch(providerConfig);
+ const fetchWithCacheControl = wrapFetchWithAnthropicCacheControl(baseFetch);
+ const provider = createAnthropic({
+ apiKey: creds.apiKey,
+ baseURL,
+ headers: providerConfig.headers,
+ fetch: fetchWithCacheControl,
+ });
+
+ return Ok(provider(modelId));
+ }
+
+ // Handle Azure OpenAI provider (uses @ai-sdk/azure)
+ if (providerName === "azure-openai") {
+ // Resolve credentials from config + env
+ const creds = resolveProviderCredentials("azure-openai", providerConfig);
+ if (!creds.isConfigured || !creds.apiKey || !creds.baseUrl) {
+ return Err({ type: "api_key_not_found", provider: providerName });
+ }
+
+ let baseURL = creds.baseUrl;
+ // Remove trailing /openai or other path suffixes if present
+ baseURL = baseURL.replace(/\/openai.*$/, "");
+ // Ensure no trailing slash
+ baseURL = baseURL.replace(/\/$/, "");
+
+ log.debug(`Azure OpenAI baseURL: ${baseURL}`);
+
+ const { createAzure } = await import("@ai-sdk/azure");
+ const baseFetch = getProviderFetch(providerConfig);
+
+ // Use deployment name if provided, otherwise use the model ID directly
+ const deploymentOrModel = creds.deployment ?? modelId;
+
+ // Codex models use the Responses API (/openai/responses) with 2025-04-01-preview
+ // Other models (gpt-5.2) use deployment-based Chat Completions API
+ const isCodexModel = modelId.includes("codex");
+
+ if (isCodexModel) {
+ // Codex uses Responses API: /openai/responses?api-version=2025-04-01-preview
+ // Responses API requires api-version 2025-03-01-preview or later - force this version
+ // The SDK adds /v1/ to the path, but Azure expects no /v1/ prefix
+ // Use custom fetch to strip the /v1/ from the URL
+ const codexFetch = (async (input: RequestInfo | URL, init?: RequestInit) => {
+ const url = typeof input === "string" ? input : input.toString();
+ const fixedUrl = url.replace("/v1/responses", "/responses");
+ return baseFetch(fixedUrl, init);
+ }) as typeof fetch;
+ const provider = createAzure({
+ apiKey: creds.apiKey,
+ baseURL: `${baseURL}/openai`,
+ // Force 2025-04-01-preview for Responses API - older versions not supported
+ apiVersion: "2025-04-01-preview",
+ fetch: codexFetch,
+ });
+ return Ok(provider.responses(deploymentOrModel));
+ } else {
+ // GPT models use deployment-based Chat Completions API
+ // Format: /openai/deployments/{deployment}/chat/completions?api-version={version}
+ const provider = createAzure({
+ apiKey: creds.apiKey,
+ baseURL: `${baseURL}/openai`,
+ apiVersion: creds.apiVersion ?? "2024-12-01-preview",
+ useDeploymentBasedUrls: true,
+ fetch: baseFetch,
+ });
+ return Ok(provider(deploymentOrModel));
+ }
+ }
+
// Handle OpenAI provider (using Responses API)
if (providerName === "openai") {
// Resolve credentials from config + env (single source of truth)
diff --git a/src/node/services/providerService.ts b/src/node/services/providerService.ts
index 47935c50e4..ccc1725875 100644
--- a/src/node/services/providerService.ts
+++ b/src/node/services/providerService.ts
@@ -55,6 +55,9 @@ export class ProviderService {
const config = (providersConfig[provider] ?? {}) as {
apiKey?: string;
baseUrl?: string;
+ resource?: string;
+ deployment?: string;
+ apiVersion?: string;
models?: string[];
serviceTier?: unknown;
region?: string;
@@ -67,9 +70,16 @@ export class ProviderService {
apiKeySet: !!config.apiKey,
isConfigured: false, // computed below
baseUrl: config.baseUrl,
+ resource: config.resource,
models: config.models,
};
+ // Azure OpenAI-specific fields
+ if (provider === "azure-openai") {
+ providerInfo.deployment = config.deployment;
+ providerInfo.apiVersion = config.apiVersion;
+ }
+
// OpenAI-specific fields
const serviceTier = config.serviceTier;
if (
diff --git a/src/node/utils/providerRequirements.ts b/src/node/utils/providerRequirements.ts
index edb18889f8..b0d34436df 100644
--- a/src/node/utils/providerRequirements.ts
+++ b/src/node/utils/providerRequirements.ts
@@ -23,6 +23,9 @@ export const PROVIDER_ENV_VARS: Partial<
baseUrl?: string[];
organization?: string[];
region?: string[];
+ resource?: string[];
+ deployment?: string[];
+ apiVersion?: string[];
}
>
> = {
@@ -30,6 +33,16 @@ export const PROVIDER_ENV_VARS: Partial<
apiKey: ["ANTHROPIC_API_KEY", "ANTHROPIC_AUTH_TOKEN"],
baseUrl: ["ANTHROPIC_BASE_URL"],
},
+ "azure-foundry": {
+ apiKey: ["AZURE_FOUNDRY_API_KEY", "AZURE_API_KEY"],
+ resource: ["AZURE_FOUNDRY_RESOURCE"],
+ },
+ "azure-openai": {
+ apiKey: ["AZURE_OPENAI_API_KEY"],
+ baseUrl: ["AZURE_OPENAI_ENDPOINT"],
+ deployment: ["AZURE_OPENAI_DEPLOYMENT"],
+ apiVersion: ["AZURE_OPENAI_API_VERSION"],
+ },
openai: {
apiKey: ["OPENAI_API_KEY"],
baseUrl: ["OPENAI_BASE_URL", "OPENAI_API_BASE"],
@@ -54,14 +67,6 @@ export const PROVIDER_ENV_VARS: Partial<
},
};
-/** Azure OpenAI env vars (special case: maps to "openai" provider) */
-export const AZURE_OPENAI_ENV_VARS = {
- apiKey: "AZURE_OPENAI_API_KEY",
- endpoint: "AZURE_OPENAI_ENDPOINT",
- deployment: "AZURE_OPENAI_DEPLOYMENT",
- apiVersion: "AZURE_OPENAI_API_VERSION",
-};
-
/** Resolve first non-empty env var from a list of candidates */
function resolveEnv(
keys: string[] | undefined,
@@ -91,6 +96,9 @@ export interface ProviderConfigRaw {
couponCode?: string;
voucher?: string; // legacy mux-gateway field
organization?: string; // OpenAI org ID
+ resource?: string; // Azure Foundry resource name
+ deployment?: string; // Azure OpenAI deployment name
+ apiVersion?: string; // Azure OpenAI API version
}
/** Result of resolving provider credentials */
@@ -105,6 +113,9 @@ export interface ResolvedCredentials {
couponCode?: string; // mux-gateway
baseUrl?: string; // from config or env
organization?: string; // openai
+ resource?: string; // azure-foundry
+ deployment?: string; // azure-openai
+ apiVersion?: string; // azure-openai
}
/** Legacy alias for backward compatibility */
@@ -144,6 +155,36 @@ export function resolveProviderCredentials(
: { isConfigured: false, missingRequirement: "coupon_code" };
}
+ // Azure Foundry: requires both API key and resource name
+ if (provider === "azure-foundry") {
+ const envMapping = PROVIDER_ENV_VARS[provider];
+ // eslint-disable-next-line @typescript-eslint/prefer-nullish-coalescing -- empty string should be treated as unset
+ const configKey = config.apiKey || null;
+ const apiKey = configKey ?? resolveEnv(envMapping?.apiKey, env);
+ const resource = config.resource ?? resolveEnv(envMapping?.resource, env);
+
+ if (apiKey && resource) {
+ return { isConfigured: true, apiKey, resource };
+ }
+ return { isConfigured: false, missingRequirement: "api_key" };
+ }
+
+ // Azure OpenAI: requires API key and endpoint (baseUrl)
+ if (provider === "azure-openai") {
+ const envMapping = PROVIDER_ENV_VARS[provider];
+ // eslint-disable-next-line @typescript-eslint/prefer-nullish-coalescing -- empty string should be treated as unset
+ const configKey = config.apiKey || null;
+ const apiKey = configKey ?? resolveEnv(envMapping?.apiKey, env);
+ const baseUrl = config.baseURL ?? config.baseUrl ?? resolveEnv(envMapping?.baseUrl, env);
+ const deployment = config.deployment ?? resolveEnv(envMapping?.deployment, env);
+ const apiVersion = config.apiVersion ?? resolveEnv(envMapping?.apiVersion, env);
+
+ if (apiKey && baseUrl) {
+ return { isConfigured: true, apiKey, baseUrl, deployment, apiVersion };
+ }
+ return { isConfigured: false, missingRequirement: "api_key" };
+ }
+
// Keyless providers (e.g., ollama): require explicit opt-in via baseUrl or models
const def = PROVIDER_DEFINITIONS[provider];
if (!def.requiresApiKey) {
@@ -206,31 +247,13 @@ export function buildProvidersFromEnv(
const entry: ProviderConfig = { apiKey: creds.apiKey };
if (creds.baseUrl) entry.baseUrl = creds.baseUrl;
if (creds.organization) entry.organization = creds.organization;
+ // Azure OpenAI-specific fields
+ if (creds.deployment) entry.deployment = creds.deployment;
+ if (creds.apiVersion) entry.apiVersion = creds.apiVersion;
providers[provider] = entry;
}
}
- // Azure OpenAI special case: maps to "openai" provider if not already set
- if (!providers.openai) {
- const azureKey = env[AZURE_OPENAI_ENV_VARS.apiKey]?.trim();
- const azureEndpoint = env[AZURE_OPENAI_ENV_VARS.endpoint]?.trim();
-
- if (azureKey && azureEndpoint) {
- const entry: ProviderConfig = {
- apiKey: azureKey,
- baseUrl: azureEndpoint,
- };
-
- const deployment = env[AZURE_OPENAI_ENV_VARS.deployment]?.trim();
- if (deployment) entry.defaultModel = deployment;
-
- const apiVersion = env[AZURE_OPENAI_ENV_VARS.apiVersion]?.trim();
- if (apiVersion) entry.apiVersion = apiVersion;
-
- providers.openai = entry;
- }
- }
-
return providers;
}