diff --git a/backend/openedx_ai_extensions/processors/litellm_base_processor.py b/backend/openedx_ai_extensions/processors/litellm_base_processor.py index 3dea8fd..f43f6a9 100644 --- a/backend/openedx_ai_extensions/processors/litellm_base_processor.py +++ b/backend/openedx_ai_extensions/processors/litellm_base_processor.py @@ -16,28 +16,36 @@ class LitellmProcessor: def __init__(self, config=None, user_session=None): config = config or {} - - class_name = self.__class__.__name__ - self.config = config.get(class_name, {}) + self.config = config.get(self.__class__.__name__, {}) self.user_session = user_session - self.config_profile = self.config.get("config", "default") + provider_spec = self.config.get("provider", "default") + self.config_profile = provider_spec + if isinstance(provider_spec, str): + providers = getattr(settings, "AI_EXTENSIONS", {}) + provider = providers.get(provider_spec) + + if provider is None and provider_spec != "default": + raise ValueError(f"Unknown AI_EXTENSIONS profile '{provider_spec}'") + + provider = provider or {} + else: + raise TypeError("`provider` must be a string") + + options = self.config.get("options", {}) or {} - if "MODEL" not in settings.AI_EXTENSIONS[self.config_profile]: + base_params = {k.lower(): v for k, v in provider.items()} + override_params = {k.lower(): v for k, v in options.items()} + self.extra_params = {**base_params, **override_params} + + model = self.extra_params.get("model") + if not isinstance(model, str) or "/" not in model: raise ValueError( - f"AI_EXTENSIONS config '{self.config_profile}' missing 'MODEL' setting." + "MODEL must be defined and have the format 'provider/model_name'. " + "e.g., 'openai/gpt-4'" ) - try: - self.provider = settings.AI_EXTENSIONS[self.config_profile].get("MODEL").split("/")[0] - except Exception as exc: - raise ValueError( - "MODEL setting must be in the format 'provider/model_name'. e.g., 'openai/gpt-4'" - ) from exc - - self.extra_params = {} - for key, value in settings.AI_EXTENSIONS[self.config_profile].items(): - self.extra_params[key.lower()] = value + self.provider = model.split("/")[0] self.custom_prompt = self.config.get("prompt", None) self.stream = self.config.get("stream", False) @@ -60,7 +68,7 @@ def __init__(self, config=None, user_session=None): if allowed_mcp_configs: self.mcp_configs = { key: value - for key, value in settings.AI_EXTENSIONS_MCP_CONFIGS.items() + for key, value in getattr(settings, 'AI_EXTENSIONS_MCP_CONFIGS', {}).items() if key in allowed_mcp_configs } self.extra_params["tools"] = [ diff --git a/backend/openedx_ai_extensions/workflows/models.py b/backend/openedx_ai_extensions/workflows/models.py index c257a51..684ffb0 100644 --- a/backend/openedx_ai_extensions/workflows/models.py +++ b/backend/openedx_ai_extensions/workflows/models.py @@ -61,10 +61,6 @@ class AIWorkflowProfile(models.Model): help_text="JSON5 Merge Patch (RFC 7386) to apply to base template. Supports comments and trailing commas." ) - class Meta: - verbose_name = "AI Workflow Profile" - verbose_name_plural = "AI Workflow Profiles" - def __str__(self): return f"{self.slug} ({self.base_filepath})" diff --git a/backend/openedx_ai_extensions/workflows/profiles/base/custom_prompt.json b/backend/openedx_ai_extensions/workflows/profiles/base/custom_prompt.json index e16fe36..323fe25 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/base/custom_prompt.json +++ b/backend/openedx_ai_extensions/workflows/profiles/base/custom_prompt.json @@ -10,7 +10,7 @@ have it explain in a different with this prompt. "function": "get_location_content" }, "LLMProcessor": { - "config": "default", + "provider": "default", "stream": true, "prompt": "\ You are an educational assistant embedded in an online course. \ @@ -33,14 +33,14 @@ Output only the rephrased content. \ "UIComponents": { "request": { "component": "AIRequestComponent", - "config": { + "provider": { "buttonText": "Rephrase", "customMessage": "Explain this but differently" } }, "response": { "component": "AIResponseComponent", - "config": { + "provider": { "customMessage": "In other words" } } diff --git a/backend/openedx_ai_extensions/workflows/profiles/base/library_questions_assistant.json b/backend/openedx_ai_extensions/workflows/profiles/base/library_questions_assistant.json index 1ae40a7..9dbf2f4 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/base/library_questions_assistant.json +++ b/backend/openedx_ai_extensions/workflows/profiles/base/library_questions_assistant.json @@ -11,7 +11,7 @@ Works only in studio UI and over the CMS service_variant. }, "EducatorAssistantProcessor": { "function": "generate_quiz_questions", - "config": "default" + "provider": "default" } }, "actuator_config": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/base/standalone_config.json b/backend/openedx_ai_extensions/workflows/profiles/base/standalone_config.json new file mode 100644 index 0000000..5d2b62d --- /dev/null +++ b/backend/openedx_ai_extensions/workflows/profiles/base/standalone_config.json @@ -0,0 +1,37 @@ +/* +A profile that stores the config directy in json. +Useful for getting started or testing API_KEYS +*/ +{ + "orchestrator_class": "DirectLLMResponse", + "processor_config": { + "OpenEdXProcessor": { + }, + "LLMProcessor": { + "stream": true, + "prompt": "Say hi and tell me: which LLM model are you? Be specific.", + "options": { + "MODEL": "openai/gpt-4.1-mini", + "API_KEY": "sk-proj-put-your-api-key-here", + }, + } + }, + "actuator_config": { + "UIComponents": { + "request": { + "component": "AIRequestComponent", + "config": { + "buttonText": "Hello AI", + "customMessage": "Call any AI with any config" + } + }, + "response": { + "component": "AIResponseComponent", + "config": { + "customMessage": "The response from AI" + } + } + } + }, + "schema_version": "1.0", +} diff --git a/backend/openedx_ai_extensions/workflows/profiles/base/summary.json b/backend/openedx_ai_extensions/workflows/profiles/base/summary.json index 3a6c78d..9099384 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/base/summary.json +++ b/backend/openedx_ai_extensions/workflows/profiles/base/summary.json @@ -10,7 +10,7 @@ present the streaming answer in the same box. }, "LLMProcessor": { "function": "summarize_content", - "config": "default", + "provider": "default", "stream": true } }, diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_functions.json b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_functions.json index 8310802..72aaac0 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_functions.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_functions.json @@ -9,7 +9,7 @@ to retrieve context from the current unit and the course outline }, "LLMProcessor": { "function": "answer_question", - "config": "anthropic", + "provider": "anthropic", "enabled_tools": [ "get_context", "get_location_content", diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_hello.json b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_hello.json index 7dbfbc8..1948912 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_hello.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/box_hello.json @@ -8,7 +8,7 @@ Test experience. Ask claude to say which llm model is answering }, "LLMProcessor": { "function": "greet_from_llm", - "config": "anthropic" + "provider": "anthropic" } }, "actuator_config": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat.json b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat.json index ca46535..592d787 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat.json @@ -9,7 +9,7 @@ Test experience. Ask claude to chat having given it the current unit context }, "LLMProcessor": { "function": "chat_with_context", - "config": "anthropic" + "provider": "anthropic" }, "SubmissionProcessor": { "function": "get_chat_history", diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat_functions.json b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat_functions.json index 901c03c..54bb40b 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat_functions.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/anthropic/chat_functions.json @@ -9,7 +9,7 @@ to retrieve context from the current unit and the course outline }, "LLMProcessor": { "function": "chat_with_context", - "config": "anthropic", + "provider": "anthropic", "enabled_tools": [ "get_context", "get_location_content", diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_chat.json b/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_chat.json index 86af9c0..c3ae45a 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_chat.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_chat.json @@ -9,7 +9,7 @@ Test experience. Use a hosted DeepSeek to chat having given it the current unit }, "LLMProcessor": { "function": "chat_with_context", - "config": "deepseek" + "provider": "deepseek" }, "SubmissionProcessor": { "function": "get_chat_history", diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_hello.json b/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_hello.json index 7a16750..591ce7e 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_hello.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/huggingface/deepseek_hello.json @@ -8,7 +8,7 @@ Test experience. Use a hugging face hosted version of DeepSeek to say which llm }, "LLMProcessor": { "function": "greet_from_llm", - "config": "deepseek" + "provider": "deepseek" } }, "actuator_config": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_functions.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_functions.json index 6ec65a2..8623171 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_functions.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_functions.json @@ -9,7 +9,7 @@ to retrieve context from the current unit and the course outline }, "LLMProcessor": { "function": "answer_question", - "config": "openai", + "provider": "openai", "enabled_tools": [ "get_context", "get_location_content", diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_hello.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_hello.json index b5577c6..41c7e76 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_hello.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/box_hello.json @@ -8,7 +8,7 @@ Test experience. Ask ChatGPT to say which llm model is answering }, "LLMProcessor": { "function": "greet_from_llm", - "config": "openai" + "provider": "openai" } }, "actuator_config": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat.json index 126cf07..82364ef 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat.json @@ -9,7 +9,7 @@ Test experience. Ask ChatGPT to chat having given it the current unit context }, "LLMProcessor": { "function": "chat_with_context", - "config": "openai", + "provider": "openai", "stream": true }, "SubmissionProcessor": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat_functions.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat_functions.json index 0faf279..d197883 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat_functions.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openai/chat_functions.json @@ -9,7 +9,7 @@ to retrieve context from the current unit and the course outline }, "LLMProcessor": { "function": "chat_with_context", - "config": "openai", + "provider": "openai", "enabled_tools": [ "get_context", "get_location_content", diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/ollama_hello.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/ollama_hello.json index 4d253f2..9608b32 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/ollama_hello.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/ollama_hello.json @@ -11,7 +11,7 @@ See: https://gist.github.com/felipemontoya/509495d3fbaa696fa2b684880a8388da }, "LLMProcessor": { "function": "greet_from_llm", - "config": "ollama" + "provider": "ollama" } }, "actuator_config": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_hello.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_hello.json index 4111a9a..a5fb86d 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_hello.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_hello.json @@ -11,7 +11,7 @@ See: https://gist.github.com/felipemontoya/509495d3fbaa696fa2b684880a8388da }, "LLMProcessor": { "function": "greet_from_llm", - "config": "qwen25" + "provider": "qwen25" } }, "actuator_config": { diff --git a/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_summary.json b/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_summary.json index f1cf5d3..19be4b4 100644 --- a/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_summary.json +++ b/backend/openedx_ai_extensions/workflows/profiles/examples/openweights/qwen_summary.json @@ -12,7 +12,7 @@ See: https://gist.github.com/felipemontoya/509495d3fbaa696fa2b684880a8388da }, "LLMProcessor": { "function": "summarize_content", - "config": "qwen25" + "provider": "qwen25" } }, "actuator_config": { diff --git a/backend/test_settings.py b/backend/test_settings.py index 82a6f67..bcd7d4d 100644 --- a/backend/test_settings.py +++ b/backend/test_settings.py @@ -122,7 +122,7 @@ def root(*args): AI_EXTENSIONS = { "default": { "API_KEY": "test-api-key", - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "TIMEOUT": 30, "TEMPERATURE": 0.7, "MAX_TOKENS": 1000, diff --git a/backend/tests/test_litellm_base_processor.py b/backend/tests/test_litellm_base_processor.py index 8aeb124..eee510d 100644 --- a/backend/tests/test_litellm_base_processor.py +++ b/backend/tests/test_litellm_base_processor.py @@ -73,12 +73,12 @@ def test_litellm_processor_initialization_basic(user): # pylint: disable=redefi """ config = { "LitellmProcessor": { - "config": "default", + "provider": "default", } } processor = LitellmProcessor(config=config, user_session=None) - assert processor.config == {"config": "default"} + assert processor.config == {"provider": "default"} assert processor.config_profile == "default" assert processor.provider == "openai" assert "model" in processor.extra_params @@ -107,9 +107,9 @@ def test_litellm_processor_initialization_no_config(mock_settings): # pylint: d @pytest.mark.django_db def test_litellm_processor_missing_config_profile_raises_error(mock_settings): # pylint: disable=unused-argument """ - Test that missing config profile raises KeyError. + Test that missing config profile raises ValueError when MODEL is not defined. """ - with pytest.raises(KeyError): + with pytest.raises(ValueError, match="MODEL must be defined"): LitellmProcessor(config=None, user_session=None) @@ -121,7 +121,7 @@ def test_litellm_processor_missing_model_raises_error(mock_settings): # pylint: """ Test that missing MODEL key raises ValueError. """ - with pytest.raises(ValueError, match="missing 'MODEL' setting"): + with pytest.raises(ValueError, match="MODEL must be defined"): LitellmProcessor(config=None, user_session=None) @@ -135,7 +135,7 @@ def test_litellm_processor_invalid_model_format_raises_error(mock_settings): # """ Test that invalid MODEL format (None) raises ValueError. """ - with pytest.raises(ValueError, match="must be in the format 'provider/model_name'"): + with pytest.raises(ValueError, match="have the format 'provider/model_name'"): LitellmProcessor(config=None, user_session=None) @@ -152,7 +152,7 @@ def test_litellm_processor_custom_config_profile(mock_settings): # pylint: disa """ config = { "LitellmProcessor": { - "config": "custom", + "provider": "custom", } } processor = LitellmProcessor(config=config, user_session=None) @@ -306,6 +306,27 @@ def test_enabled_tools_nonexistent_function(mock_settings): # pylint: disable=u assert processor.extra_params["tools"][0] == TOOLS_SCHEMA["roll_dice"] +@patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { + "default": { + "MODEL": "openai/gpt-4", + } +}) +@pytest.mark.django_db +def test_enabled_tools_all_nonexistent_functions(mock_settings): # pylint: disable=unused-argument + """ + Test that when all enabled_tools are nonexistent, tools parameter is not added. + """ + config = { + "LitellmProcessor": { + "enabled_tools": ["nonexistent_function1", "nonexistent_function2"], + } + } + processor = LitellmProcessor(config=config, user_session=None) + + # No tools should be added since all are nonexistent + assert "tools" not in processor.extra_params + + @patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { "default": { "MODEL": "openai/gpt-4", @@ -499,7 +520,7 @@ def test_integration_custom_profile_with_tools(user): # pylint: disable=redefin """ config = { "LitellmProcessor": { - "config": "production", + "provider": "production", "enabled_tools": ["roll_dice", "get_location_content"], } } @@ -511,3 +532,158 @@ def test_integration_custom_profile_with_tools(user): # pylint: disable=redefin assert processor.extra_params["temperature"] == 0.3 assert "tools" in processor.extra_params assert len(processor.extra_params["tools"]) == 2 + + +# ============================================================================ +# Error Handling Tests +# ============================================================================ + + +@patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { + "default": { + "MODEL": "openai/gpt-4", + } +}) +@pytest.mark.django_db +def test_unknown_profile_raises_error(mock_settings): # pylint: disable=unused-argument + """ + Test that using an unknown profile raises ValueError. + """ + config = { + "LitellmProcessor": { + "provider": "nonexistent", + } + } + with pytest.raises(ValueError, match="Unknown AI_EXTENSIONS profile 'nonexistent'"): + LitellmProcessor(config=config, user_session=None) + + +@patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { + "default": { + "MODEL": "openai/gpt-4", + } +}) +@pytest.mark.django_db +def test_non_string_provider_raises_error(mock_settings): # pylint: disable=unused-argument + """ + Test that using a non-string provider raises TypeError. + """ + config = { + "LitellmProcessor": { + "provider": {"not": "a string"}, + } + } + with pytest.raises(TypeError, match="`provider` must be a string"): + LitellmProcessor(config=config, user_session=None) + + +# ============================================================================ +# Streaming with Tools Tests +# ============================================================================ + + +@patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { + "default": { + "MODEL": "openai/gpt-4", + } +}) +@pytest.mark.django_db +def test_streaming_with_tools_disables_streaming(mock_settings): # pylint: disable=unused-argument + """ + Test that streaming is disabled when tools are enabled. + """ + config = { + "LitellmProcessor": { + "stream": True, + "enabled_tools": ["roll_dice"], + } + } + with patch('openedx_ai_extensions.processors.litellm_base_processor.logger') as mock_logger: + processor = LitellmProcessor(config=config, user_session=None) + + # Verify streaming was disabled + assert processor.stream is False + + # Verify warning was logged + mock_logger.warning.assert_called_once_with( + "Streaming responses with tools is not supported; disabling streaming." + ) + + +# ============================================================================ +# MCP Configs Tests +# ============================================================================ + + +@patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { + "default": { + "MODEL": "openai/gpt-4", + } +}) +@patch.object(settings, "AI_EXTENSIONS_MCP_CONFIGS", new_callable=lambda: { + "server1": { + "command": "python", + "args": ["server1.py"], + }, + "server2": { + "command": "node", + "args": ["server2.js"], + } +}) +@pytest.mark.django_db +def test_mcp_configs_single_server(mock_mcp_configs, mock_settings): # pylint: disable=unused-argument + """ + Test that MCP configs are properly configured with a single server. + """ + config = { + "LitellmProcessor": { + "mcp_configs": ["server1"], + } + } + processor = LitellmProcessor(config=config, user_session=None) + + assert len(processor.mcp_configs) == 1 + assert "server1" in processor.mcp_configs + assert processor.mcp_configs["server1"]["command"] == "python" + + # Verify tools parameter was set + assert "tools" in processor.extra_params + assert len(processor.extra_params["tools"]) == 1 + assert processor.extra_params["tools"][0]["type"] == "mcp" + assert processor.extra_params["tools"][0]["server_label"] == "server1" + + +@patch.object(settings, "AI_EXTENSIONS", new_callable=lambda: { + "default": { + "MODEL": "openai/gpt-4", + } +}) +@patch.object(settings, "AI_EXTENSIONS_MCP_CONFIGS", new_callable=lambda: { + "server1": { + "command": "python", + "args": ["server1.py"], + }, + "server2": { + "command": "node", + "args": ["server2.js"], + } +}) +@pytest.mark.django_db +def test_mcp_configs_multiple_servers(mock_mcp_configs, mock_settings): # pylint: disable=unused-argument + """ + Test that MCP configs are properly configured with multiple servers. + """ + config = { + "LitellmProcessor": { + "mcp_configs": ["server1", "server2"], + } + } + processor = LitellmProcessor(config=config, user_session=None) + + assert len(processor.mcp_configs) == 2 + assert "server1" in processor.mcp_configs + assert "server2" in processor.mcp_configs + + # Verify tools parameter was set + assert "tools" in processor.extra_params + assert len(processor.extra_params["tools"]) == 2 diff --git a/backend/tests/test_llm_processor.py b/backend/tests/test_llm_processor.py index a95aaa6..c1356d6 100644 --- a/backend/tests/test_llm_processor.py +++ b/backend/tests/test_llm_processor.py @@ -71,7 +71,7 @@ def llm_processor(user_session, settings): # pylint: disable=redefined-outer-na # Mock AI_EXTENSIONS settings settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-dummy-key" } } @@ -353,7 +353,7 @@ def test_mcp_configs_empty_when_not_specified(user_session, settings): # pylint """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -378,7 +378,7 @@ def test_mcp_configs_filtering_from_allowed_list(user_session, settings): # pyl """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -420,7 +420,7 @@ def test_mcp_configs_tools_parameter_generation(user_session, settings): # pyli """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -459,7 +459,7 @@ def test_mcp_configs_empty_allowed_list(user_session, settings): # pylint: disa """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -497,7 +497,7 @@ def test_call_with_custom_prompt_function( """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -538,7 +538,7 @@ def test_call_with_custom_prompt_when_function_not_specified( """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -572,7 +572,7 @@ def test_custom_prompt_overrides_system_role_in_completion( """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -611,7 +611,7 @@ def test_custom_prompt_in_chat_with_context( """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -654,7 +654,7 @@ def test_call_with_custom_prompt_streaming( """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } } @@ -697,7 +697,7 @@ def test_call_with_custom_prompt_missing_prompt_raises_error( """ settings.AI_EXTENSIONS = { "default": { - "MODEL": "gpt-3.5-turbo", + "MODEL": "openai/gpt-3.5-turbo", "API_KEY": "test-key" } }