diff --git a/conversation/anthropic/anthropic.go b/conversation/anthropic/anthropic.go
index fc7d4638e6..19771d1a06 100644
--- a/conversation/anthropic/anthropic.go
+++ b/conversation/anthropic/anthropic.go
@@ -41,8 +41,6 @@ func NewAnthropic(logger logger.Logger) conversation.Conversation {
 	return a
 }
 
-const defaultModel = "claude-3-5-sonnet-20240620"
-
 func (a *Anthropic) Init(ctx context.Context, meta conversation.Metadata) error {
 	m := conversation.LangchainMetadata{}
 	err := kmeta.DecodeMetadata(meta.Properties, &m)
@@ -50,10 +48,8 @@ func (a *Anthropic) Init(ctx context.Context, meta conversation.Metadata) error
 		return err
 	}
 
-	model := defaultModel
-	if m.Model != "" {
-		model = m.Model
-	}
+	// Resolve model via central helper (uses metadata, then env var, then default)
+	model := conversation.GetAnthropicModel(m.Model)
 
 	llm, err := anthropic.New(
 		anthropic.WithModel(model),
diff --git a/conversation/anthropic/metadata.yaml b/conversation/anthropic/metadata.yaml
index 729971ca47..db46bb8916 100644
--- a/conversation/anthropic/metadata.yaml
+++ b/conversation/anthropic/metadata.yaml
@@ -24,10 +24,10 @@ metadata:
   - name: model
     required: false
     description: |
-      The Anthropic LLM to use.
+      The Anthropic LLM to use. Configurable via ANTHROPIC_MODEL environment variable.
     type: string
-    example: 'claude-3-5-sonnet-20240620'
-    default: 'claude-3-5-sonnet-20240620'
+    example: 'claude-sonnet-4-20250514'
+    default: 'claude-sonnet-4-20250514'
   - name: cacheTTL
     required: false
     description: |
diff --git a/conversation/googleai/googleai.go b/conversation/googleai/googleai.go
index 28abc7f3fa..fd36e3c6b5 100644
--- a/conversation/googleai/googleai.go
+++ b/conversation/googleai/googleai.go
@@ -41,8 +41,6 @@ func NewGoogleAI(logger logger.Logger) conversation.Conversation {
 	return g
 }
 
-const defaultModel = "gemini-2.5-flash"
-
 func (g *GoogleAI) Init(ctx context.Context, meta conversation.Metadata) error {
 	md := conversation.LangchainMetadata{}
 	err := kmeta.DecodeMetadata(meta.Properties, &md)
@@ -50,10 +48,8 @@ func (g *GoogleAI) Init(ctx context.Context, meta conversation.Metadata) error {
 		return err
 	}
 
-	model := defaultModel
-	if md.Model != "" {
-		model = md.Model
-	}
+	// Resolve model via central helper (uses metadata, then env var, then default)
+	model := conversation.GetGoogleAIModel(md.Model)
 
 	opts := []openai.Option{
 		openai.WithModel(model),
diff --git a/conversation/googleai/metadata.yaml b/conversation/googleai/metadata.yaml
index 3a9aa95cc0..61703b5ac9 100644
--- a/conversation/googleai/metadata.yaml
+++ b/conversation/googleai/metadata.yaml
@@ -24,10 +24,10 @@ metadata:
   - name: model
     required: false
     description: |
-      The GoogleAI LLM to use.
+      The GoogleAI LLM to use. Configurable via GOOGLEAI_MODEL environment variable.
     type: string
-    example: 'gemini-2.5-flash'
-    default: 'gemini-2.5-flash'
+    example: 'gemini-2.5-flash-lite'
+    default: 'gemini-2.5-flash-lite'
   - name: cacheTTL
     required: false
     description: |
diff --git a/conversation/huggingface/huggingface.go b/conversation/huggingface/huggingface.go
index 0ef727f874..d6d91ca5af 100644
--- a/conversation/huggingface/huggingface.go
+++ b/conversation/huggingface/huggingface.go
@@ -42,9 +42,6 @@ func NewHuggingface(logger logger.Logger) conversation.Conversation {
 	return h
 }
 
-// Default model - using a popular and reliable model
-const defaultModel = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
-
 // Default HuggingFace OpenAI-compatible endpoint
 const defaultEndpoint = "https://router.huggingface.co/hf-inference/models/{{model}}/v1"
 
@@ -55,10 +52,8 @@ func (h *Huggingface) Init(ctx context.Context, meta conversation.Metadata) erro
 		return err
 	}
 
-	model := defaultModel
-	if m.Model != "" {
-		model = m.Model
-	}
+	// Resolve model via central helper (uses metadata, then env var, then default)
+	model := conversation.GetHuggingFaceModel(m.Model)
 
 	endpoint := strings.Replace(defaultEndpoint, "{{model}}", model, 1)
 	if m.Endpoint != "" {
diff --git a/conversation/huggingface/metadata.yaml b/conversation/huggingface/metadata.yaml
index 89ec7d01a5..e37ccc6c6c 100644
--- a/conversation/huggingface/metadata.yaml
+++ b/conversation/huggingface/metadata.yaml
@@ -24,7 +24,7 @@ metadata:
   - name: model
     required: false
     description: |
-      The Huggingface model to use. Uses OpenAI-compatible API.
+      The Huggingface model to use. Uses OpenAI-compatible API. Configurable via HUGGINGFACE_MODEL environment variable.
     type: string
     example: 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B'
     default: 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B'
diff --git a/conversation/metadata_test.go b/conversation/metadata_test.go
index 58edab76e2..de83e82cb0 100644
--- a/conversation/metadata_test.go
+++ b/conversation/metadata_test.go
@@ -25,7 +25,7 @@ func TestLangchainMetadata(t *testing.T) {
 	t.Run("json marshaling with endpoint", func(t *testing.T) {
 		metadata := LangchainMetadata{
 			Key:      "test-key",
-			Model:    "gpt-4",
+			Model:    DefaultOpenAIModel,
 			CacheTTL: "10m",
 			Endpoint: "https://custom-endpoint.example.com",
 		}
diff --git a/conversation/mistral/metadata.yaml b/conversation/mistral/metadata.yaml
index 329379dc24..28e2e1b47f 100644
--- a/conversation/mistral/metadata.yaml
+++ b/conversation/mistral/metadata.yaml
@@ -24,7 +24,7 @@ metadata:
   - name: model
     required: false
     description: |
-      The Mistral LLM to use.
+      The Mistral LLM to use. Configurable via MISTRAL_MODEL environment variable.
     type: string
     example: 'open-mistral-7b'
     default: 'open-mistral-7b'
diff --git a/conversation/mistral/mistral.go b/conversation/mistral/mistral.go
index c319319747..d22b9ef9a4 100644
--- a/conversation/mistral/mistral.go
+++ b/conversation/mistral/mistral.go
@@ -43,8 +43,6 @@ func NewMistral(logger logger.Logger) conversation.Conversation {
 	return m
 }
 
-const defaultModel = "open-mistral-7b"
-
 func (m *Mistral) Init(ctx context.Context, meta conversation.Metadata) error {
 	md := conversation.LangchainMetadata{}
 	err := kmeta.DecodeMetadata(meta.Properties, &md)
@@ -52,10 +50,8 @@ func (m *Mistral) Init(ctx context.Context, meta conversation.Metadata) error {
 		return err
 	}
 
-	model := defaultModel
-	if md.Model != "" {
-		model = md.Model
-	}
+	// Resolve model via central helper (uses metadata, then env var, then default)
+	model := conversation.GetMistralModel(md.Model)
 
 	llm, err := mistral.New(
 		mistral.WithModel(model),
diff --git a/conversation/models.go b/conversation/models.go
new file mode 100644
index 0000000000..3042c0fbd5
--- /dev/null
+++ b/conversation/models.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2025 The Dapr Authors
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package conversation
+
+import (
+	"os"
+)
+
+// Default models for conversation components
+// These can be overridden via environment variables for runtime configuration
+const (
+	// Environment variable names
+	envOpenAIModel      = "OPENAI_MODEL"
+	envAzureOpenAIModel = "AZURE_OPENAI_MODEL"
+	envAnthropicModel   = "ANTHROPIC_MODEL"
+	envGoogleAIModel    = "GOOGLEAI_MODEL"
+	envMistralModel     = "MISTRAL_MODEL"
+	envHuggingFaceModel = "HUGGINGFACE_MODEL"
+	envOllamaModel      = "OLLAMA_MODEL"
+)
+
+// Exported default model constants for consumers of the conversation package.
+// These are used as fallbacks when env vars and metadata are not set.
+const (
+	DefaultOpenAIModel      = "gpt-5-nano"   // Enable GPT-5 (Preview) for all clients
+	DefaultAzureOpenAIModel = "gpt-4.1-nano" // Default Azure OpenAI model
+	DefaultAnthropicModel   = "claude-sonnet-4-20250514"
+	DefaultGoogleAIModel    = "gemini-2.5-flash-lite"
+	DefaultMistralModel     = "open-mistral-7b"
+	DefaultHuggingFaceModel = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
+	DefaultOllamaModel      = "llama3.2:latest"
+)
+
+// getModel returns the value of an environment variable or a default value
+func getModel(envVar, defaultValue, metadataValue string) string {
+	if value := os.Getenv(envVar); value != "" {
+		return value
+	}
+	if metadataValue != "" {
+		return metadataValue
+	}
+	return defaultValue
+}
+
+// Example usage for model getters with metadata support:
+// Pass metadataValue from your metadata file/struct, or "" if not set.
+func GetOpenAIModel(metadataValue string) string {
+	return getModel(envOpenAIModel, DefaultOpenAIModel, metadataValue)
+}
+
+func GetAzureOpenAIModel(metadataValue string) string {
+	return getModel(envAzureOpenAIModel, DefaultAzureOpenAIModel, metadataValue)
+}
+
+func GetAnthropicModel(metadataValue string) string {
+	return getModel(envAnthropicModel, DefaultAnthropicModel, metadataValue)
+}
+
+func GetGoogleAIModel(metadataValue string) string {
+	return getModel(envGoogleAIModel, DefaultGoogleAIModel, metadataValue)
+}
+
+func GetMistralModel(metadataValue string) string {
+	return getModel(envMistralModel, DefaultMistralModel, metadataValue)
+}
+
+func GetHuggingFaceModel(metadataValue string) string {
+	return getModel(envHuggingFaceModel, DefaultHuggingFaceModel, metadataValue)
+}
+
+func GetOllamaModel(metadataValue string) string {
+	return getModel(envOllamaModel, DefaultOllamaModel, metadataValue)
+}
diff --git a/conversation/ollama/metadata.yaml b/conversation/ollama/metadata.yaml
index 113c7fbcd6..3f120aa6cc 100644
--- a/conversation/ollama/metadata.yaml
+++ b/conversation/ollama/metadata.yaml
@@ -12,7 +12,7 @@ metadata:
   - name: model
     required: false
     description: |
-      The Ollama LLM to use.
+      The Ollama LLM to use. Configurable via OLLAMA_MODEL environment variable.
     type: string
     example: 'llama3.2:latest'
     default: 'llama3.2:latest'
diff --git a/conversation/ollama/ollama.go b/conversation/ollama/ollama.go
index d3f7aa0913..56b262bb3e 100644
--- a/conversation/ollama/ollama.go
+++ b/conversation/ollama/ollama.go
@@ -41,8 +41,6 @@ func NewOllama(logger logger.Logger) conversation.Conversation {
 	return o
 }
 
-const defaultModel = "llama3.2:latest"
-
 func (o *Ollama) Init(ctx context.Context, meta conversation.Metadata) error {
 	md := conversation.LangchainMetadata{}
 	err := kmeta.DecodeMetadata(meta.Properties, &md)
@@ -50,10 +48,8 @@ func (o *Ollama) Init(ctx context.Context, meta conversation.Metadata) error {
 		return err
 	}
 
-	model := defaultModel
-	if md.Model != "" {
-		model = md.Model
-	}
+	// Resolve model via central helper (uses metadata, then env var, then default)
+	model := conversation.GetOllamaModel(md.Model)
 
 	llm, err := ollama.New(
 		ollama.WithModel(model),
diff --git a/conversation/openai/metadata.yaml b/conversation/openai/metadata.yaml
index dc01eb4ea4..826a534888 100644
--- a/conversation/openai/metadata.yaml
+++ b/conversation/openai/metadata.yaml
@@ -24,10 +24,10 @@ metadata:
   - name: model
     required: false
     description: |
-      The OpenAI LLM to use.
+      The OpenAI LLM to use. Configurable via OPENAI_MODEL environment variable.
     type: string
-    example: 'gpt-4-turbo'
-    default: 'gpt-4o'
+    default: 'gpt-5-nano'
+    example: 'gpt-5-nano'
   - name: endpoint
     required: false
     description: |
@@ -53,4 +53,4 @@ metadata:
       The type of API to use for the OpenAI service. This is required when using Azure OpenAI.
     type: string
     example: 'azure'
-    default: ''
\ No newline at end of file
+    default: 'gpt-4.1-nano'
\ No newline at end of file
diff --git a/conversation/openai/openai.go b/conversation/openai/openai.go
index 9dfea30310..be414b75bb 100644
--- a/conversation/openai/openai.go
+++ b/conversation/openai/openai.go
@@ -42,8 +42,6 @@ func NewOpenAI(logger logger.Logger) conversation.Conversation {
 	return o
 }
 
-const defaultModel = "gpt-4o"
-
 func (o *OpenAI) Init(ctx context.Context, meta conversation.Metadata) error {
 	md := OpenAILangchainMetadata{}
 	err := kmeta.DecodeMetadata(meta.Properties, &md)
@@ -51,9 +49,12 @@ func (o *OpenAI) Init(ctx context.Context, meta conversation.Metadata) error {
 		return err
 	}
 
-	model := defaultModel
-	if md.Model != "" {
-		model = md.Model
+	// Resolve model via central helper (uses metadata, then env var, then default)
+	var model string
+	if md.APIType == "azure" {
+		model = conversation.GetAzureOpenAIModel(md.Model)
+	} else {
+		model = conversation.GetOpenAIModel(md.Model)
 	}
 	// Create options for OpenAI client
 	options := []openai.Option{
diff --git a/conversation/openai/openai_test.go b/conversation/openai/openai_test.go
index 1e9a7102ec..0c645b2032 100644
--- a/conversation/openai/openai_test.go
+++ b/conversation/openai/openai_test.go
@@ -34,7 +34,7 @@ func TestInit(t *testing.T) {
 			name: "with default endpoint",
 			metadata: map[string]string{
 				"key":   "test-key",
-				"model": "gpt-4",
+				"model": conversation.DefaultOpenAIModel,
 			},
 			testFn: func(t *testing.T, o *OpenAI, err error) {
 				require.NoError(t, err)
@@ -45,7 +45,7 @@ func TestInit(t *testing.T) {
 			name: "with custom endpoint",
 			metadata: map[string]string{
 				"key":      "test-key",
-				"model":    "gpt-4",
+				"model":    conversation.DefaultOpenAIModel,
 				"endpoint": "https://api.openai.com/v1",
 			},
 			testFn: func(t *testing.T, o *OpenAI, err error) {
@@ -59,7 +59,7 @@ func TestInit(t *testing.T) {
 			name: "with apiType azure and missing apiVersion",
 			metadata: map[string]string{
 				"key":      "test-key",
-				"model":    "gpt-4",
+				"model":    conversation.DefaultOpenAIModel,
 				"apiType":  "azure",
 				"endpoint": "https://custom-endpoint.openai.azure.com/",
 			},
@@ -72,7 +72,7 @@ func TestInit(t *testing.T) {
 			name: "with apiType azure and custom apiVersion",
 			metadata: map[string]string{
 				"key":        "test-key",
-				"model":      "gpt-4",
+				"model":      conversation.DefaultOpenAIModel,
 				"apiType":    "azure",
 				"endpoint":   "https://custom-endpoint.openai.azure.com/",
 				"apiVersion": "2025-01-01-preview",
@@ -86,7 +86,7 @@ func TestInit(t *testing.T) {
 			name: "with apiType azure but missing endpoint",
 			metadata: map[string]string{
 				"key":        "test-key",
-				"model":      "gpt-4",
+				"model":      conversation.DefaultOpenAIModel,
 				"apiType":    "azure",
 				"apiVersion": "2025-01-01-preview",
 			},
diff --git a/tests/config/conversation/README.md b/tests/config/conversation/README.md
index c8f097a7a0..a1991f23e0 100644
--- a/tests/config/conversation/README.md
+++ b/tests/config/conversation/README.md
@@ -35,6 +35,7 @@ The tests will automatically skip components for which the required environment
 ### Using a .env file (Recommended)
 
 1. Copy the template file:
+
 ```bash
 cp env.template .env
 ```
@@ -46,56 +47,84 @@ cp env.template .env
 
 Alternatively, you can set the following environment variables to run the respective tests:
 
-### OpenAI
+#### Model Configuration (Optional)
+
+You can override the default models used by each component by setting these environment variables:
+
+```bash
+export OPENAI_MODEL="gpt-5-nano"           # Default: gpt-5-nano
+export AZURE_OPENAI_MODEL="gpt-4.1-nano"   # Default: gpt-4.1-nano
+export ANTHROPIC_MODEL="claude-3-5-sonnet-20240620"  # Default: claude-3-5-sonnet-20240620
+export GOOGLEAI_MODEL="gemini-1.5-flash"   # Default: gemini-1.5-flash
+export MISTRAL_MODEL="open-mistral-7b"     # Default: open-mistral-7b
+export HUGGINGFACE_MODEL="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"  # Default: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
+export OLLAMA_MODEL="llama3.2:latest"      # Default: llama3.2:latest
+```
+
+#### OpenAI
+
 ```bash
 export OPENAI_API_KEY="your_openai_api_key"
 ```
+
 Get your API key from: https://platform.openai.com/api-keys
 
 ### Azure OpenAI
+
 ```bash
 export AZURE_OPENAI_API_KEY="your_openai_api_key"
 export AZURE_OPENAI_ENDPOINT="your_azureopenai_endpoint_here"
 export AZURE_OPENAI_API_VERSION="your_azreopenai_api_version_here"
 ```
+
 Get your configuration values from: https://ai.azure.com/
 
-### Anthropic
 ```bash
 export ANTHROPIC_API_KEY="your_anthropic_api_key"
 ```
-Get your API key from: https://console.anthropic.com/
 
-### Google AI
+Get your API key from: 
+
+#### Google AI
+
 ```bash
 export GOOGLE_AI_API_KEY="your_google_ai_api_key"
 ```
-Get your API key from: https://aistudio.google.com/app/apikey
 
-### Mistral
+Get your API key from: 
+
+#### Mistral
+
 ```bash
 export MISTRAL_API_KEY="your_mistral_api_key"
 ```
-Get your API key from: https://console.mistral.ai/
 
-### HuggingFace
+Get your API key from: 
+
+#### HuggingFace
+
 ```bash
 export HUGGINGFACE_API_KEY="your_huggingface_api_key"
 ```
-Get your API key from: https://huggingface.co/settings/tokens
 
-### AWS Bedrock
+Get your API key from: 
+
+#### AWS Bedrock
+
 ```bash
 export AWS_ACCESS_KEY_ID="your_aws_access_key"
 export AWS_SECRET_ACCESS_KEY="your_aws_secret_key"
 export AWS_REGION="us-east-1"  # Optional, defaults to us-east-1
 ```
+
 Get your credentials from AWS Console
 
-### Ollama
+#### Ollama
+
 ```bash
 export OLLAMA_ENABLED="1"
 ```
+
 Requires a local Ollama server running with the `llama3.2:latest` model available.
 
 ## Test Configuration
@@ -103,7 +132,7 @@ Requires a local Ollama server running with the `llama3.2:latest` model availabl
 Each component has its own configuration file in this directory:
 
 - `echo/echo.yml` - Echo component configuration
-- `openai/openai.yml` - OpenAI configuration with gpt-4o-mini model
+- `openai/openai.yml` - OpenAI configuration with gpt-5-nano model
 - `anthropic/anthropic.yml` - Anthropic configuration with Claude 3 Haiku
 - `googleai/googleai.yml` - Google AI configuration with Gemini 1.5 Flash
 - `mistral/mistral.yml` - Mistral configuration with open-mistral-7b
@@ -117,13 +146,15 @@ The configurations use cost-effective models where possible to minimize testing
 
 The HuggingFace component uses a workaround due to issues with the native HuggingFace implementation in langchaingo. Instead of using the HuggingFace SDK directly, it uses the OpenAI SDK with HuggingFace's OpenAI-compatible API endpoints.
 
-### How it works:
+### How it works
+
 - **Model Selection**: Any HuggingFace model can be used by specifying its full name (e.g., `deepseek-ai/DeepSeek-R1-Distill-Qwen-32B`)
 - **Dynamic Endpoints**: The endpoint URL is automatically generated based on the model name using the template: `https://router.huggingface.co/hf-inference/models/{{model}}/v1`
 - **Custom Endpoints**: You can override the endpoint by specifying a custom `endpoint` parameter
 - **Authentication**: Uses the same HuggingFace API key authentication
 
-### Example Configuration:
+### Example Configuration
+
 ```yaml
 apiVersion: dapr.io/v1alpha1
 kind: Component
diff --git a/tests/config/conversation/anthropic/anthropic.yml b/tests/config/conversation/anthropic/anthropic.yml
index 22353e19cc..bff41c83a4 100644
--- a/tests/config/conversation/anthropic/anthropic.yml
+++ b/tests/config/conversation/anthropic/anthropic.yml
@@ -9,4 +9,4 @@ spec:
     - name: key
       value: "${{ANTHROPIC_API_KEY}}"
     - name: model
-      value: "claude-3-haiku-20240307" 
\ No newline at end of file
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
\ No newline at end of file
diff --git a/tests/config/conversation/googleai/googleai.yml b/tests/config/conversation/googleai/googleai.yml
index d2ad6ee25f..ab9469b881 100644
--- a/tests/config/conversation/googleai/googleai.yml
+++ b/tests/config/conversation/googleai/googleai.yml
@@ -9,4 +9,4 @@ spec:
     - name: key
       value: "${{GOOGLE_AI_API_KEY}}"
     - name: model
-      value: "gemini-1.5-flash" 
\ No newline at end of file
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
\ No newline at end of file
diff --git a/tests/config/conversation/huggingface/huggingface.yml b/tests/config/conversation/huggingface/huggingface.yml
index c4ca9f2fea..2a1eee1884 100644
--- a/tests/config/conversation/huggingface/huggingface.yml
+++ b/tests/config/conversation/huggingface/huggingface.yml
@@ -9,4 +9,4 @@ spec:
     - name: key
       value: "${{HUGGINGFACE_API_KEY}}"
     - name: model
-      value: "HuggingFaceTB/SmolLM3-3B"
\ No newline at end of file
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
\ No newline at end of file
diff --git a/tests/config/conversation/mistral/mistral.yml b/tests/config/conversation/mistral/mistral.yml
index 016a8b5317..d484f6bae1 100644
--- a/tests/config/conversation/mistral/mistral.yml
+++ b/tests/config/conversation/mistral/mistral.yml
@@ -9,4 +9,4 @@ spec:
     - name: key
       value: "${{MISTRAL_API_KEY}}"
     - name: model
-      value: "open-mistral-7b" 
\ No newline at end of file
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
\ No newline at end of file
diff --git a/tests/config/conversation/ollama/ollama.yml b/tests/config/conversation/ollama/ollama.yml
index c144669c53..6d4bfc7c41 100644
--- a/tests/config/conversation/ollama/ollama.yml
+++ b/tests/config/conversation/ollama/ollama.yml
@@ -7,4 +7,4 @@ spec:
   version: v1
   metadata:
     - name: model
-      value: "llama3.2:latest" 
\ No newline at end of file
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
\ No newline at end of file
diff --git a/tests/config/conversation/openai/azure/openai.yml b/tests/config/conversation/openai/azure/openai.yml
index 106c59dc57..9545d3f3a4 100644
--- a/tests/config/conversation/openai/azure/openai.yml
+++ b/tests/config/conversation/openai/azure/openai.yml
@@ -9,7 +9,7 @@ spec:
     - name: key
       value: "${{AZURE_OPENAI_API_KEY}}"
     - name: model
-      value: "gpt-4o-mini" 
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
     - name: endpoint
       value: "${{AZURE_OPENAI_ENDPOINT}}"
     - name: apiType
diff --git a/tests/config/conversation/openai/openai.yml b/tests/config/conversation/openai/openai.yml
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/config/conversation/openai/openai/openai.yml b/tests/config/conversation/openai/openai/openai.yml
index 19eb55302f..ae84a73ff0 100644
--- a/tests/config/conversation/openai/openai/openai.yml
+++ b/tests/config/conversation/openai/openai/openai.yml
@@ -9,4 +9,4 @@ spec:
     - name: key
       value: "${{OPENAI_API_KEY}}"
     - name: model
-      value: "gpt-4o-mini" 
\ No newline at end of file
+      value: ""   # use default for provider or customize via environment variable as defined in conversation/models.go 
\ No newline at end of file
diff --git a/tests/conformance/conversation/conversation.go b/tests/conformance/conversation/conversation.go
index 1b4e33606e..f86924e359 100644
--- a/tests/conformance/conversation/conversation.go
+++ b/tests/conformance/conversation/conversation.go
@@ -69,6 +69,8 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 			ctx, cancel := context.WithTimeout(t.Context(), 25*time.Second)
 			defer cancel()
 
+			// Note: Temperature is set to 1 for OpenAI models to avoid issues with GPT-5 which does not support temperature=0.
+			// This can be removed once langchaingo is updated to handle this automatically (tmc/langchaingo#1374).
 			req := &conversation.Request{
 				Message: &[]llms.MessageContent{
 					{
@@ -79,6 +81,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 					},
 				},
 			}
+			if component == "openai" {
+				req.Temperature = 1
+			}
 			resp, err := conv.Converse(ctx, req)
 
 			require.NoError(t, err)
@@ -100,6 +105,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 			req := &conversation.Request{
 				Message: &userMsgs,
 			}
+			if component == "openai" {
+				req.Temperature = 1
+			}
 			resp, err := conv.Converse(ctx, req)
 
 			require.NoError(t, err)
@@ -132,6 +140,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 			req := &conversation.Request{
 				Message: &systemMsgs,
 			}
+			if component == "openai" {
+				req.Temperature = 1
+			}
 			resp, err := conv.Converse(ctx, req)
 
 			require.NoError(t, err)
@@ -223,6 +234,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 			req := &conversation.Request{
 				Message: &assistantMsgs,
 			}
+			if component == "openai" {
+				req.Temperature = 1
+			}
 			resp, err := conv.Converse(ctx, req)
 
 			require.NoError(t, err)
@@ -254,6 +268,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 			req := &conversation.Request{
 				Message: &developerMsgs,
 			}
+			if component == "openai" {
+				req.Temperature = 1
+			}
 			resp, err := conv.Converse(ctx, req)
 
 			require.NoError(t, err)
@@ -303,6 +320,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 				Message: &messages,
 				Tools:   &tools,
 			}
+			if component == "openai" {
+				req.Temperature = 1
+			}
 
 			resp, err := conv.Converse(ctx, req)
 			require.NoError(t, err)
@@ -362,6 +382,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 				req2 := &conversation.Request{
 					Message: &responseMessages,
 				}
+				if component == "openai" {
+					req2.Temperature = 1
+				}
 
 				resp2, err2 := conv.Converse(ctx, req2)
 				require.NoError(t, err2)
@@ -420,6 +443,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 				Message: &messages,
 				Tools:   &tools,
 			}
+			if component == "openai" {
+				req1.Temperature = 1
+			}
 
 			resp1, err := conv.Converse(ctx, req1)
 			require.NoError(t, err)
@@ -493,6 +519,9 @@ func ConformanceTests(t *testing.T, props map[string]string, conv conversation.C
 				req2 := &conversation.Request{
 					Message: &toolResponseMessages,
 				}
+				if component == "openai" {
+					req2.Temperature = 1
+				}
 
 				resp2, err := conv.Converse(ctx, req2)
 				require.NoError(t, err)