Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def _call_completion_api(self, system_role):
# Build completion parameters
completion_params = {
"messages": [
{"role": "system", "content": system_role},
{"role": "system", "content": self.custom_prompt or system_role},
],
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def __init__(self, config=None, user_session=None):
for key, value in settings.AI_EXTENSIONS[self.config_profile].items():
self.extra_params[key.lower()] = value

self.custom_prompt = self.config.get("prompt", None)
self.stream = self.config.get("stream", False)

enabled_tools = self.config.get("enabled_tools", [])
Expand Down
18 changes: 15 additions & 3 deletions backend/openedx_ai_extensions/processors/llm_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ def process(self, *args, **kwargs):
self.input_data = kwargs.get("input_data", None)
self.chat_history = kwargs.get("chat_history", None)

function_name = self.config.get("function")
function_name = self.config.get("function", None)
# jsonmerge still returns "function": null", so check for that too
if not function_name:
function_name = "call_with_custom_prompt"
function = getattr(self, function_name)
return function()

Expand Down Expand Up @@ -106,7 +109,7 @@ def _build_response_api_params(self, system_role=None):
else:
# Initialize new thread with system role and context
params["input"] = [
{"role": "system", "content": system_role},
{"role": "system", "content": self.custom_prompt or system_role},
{"role": "system", "content": self.context},
]

Expand Down Expand Up @@ -197,7 +200,7 @@ def _call_completion_wrapper(self, system_role):
params = {
"stream": self.stream,
"messages": [
{"role": "system", "content": system_role},
{"role": "system", "content": self.custom_prompt or system_role},
],
}

Expand Down Expand Up @@ -396,3 +399,12 @@ def answer_question(self):
result = self._call_completion_wrapper(system_role=system_role)

return result

def call_with_custom_prompt(self):
"""Call LLM with a completely custom prompt provided in custom_prompt config."""
if not self.custom_prompt:
raise ValueError("Custom prompt not provided in configuration.")

result = self._call_completion_wrapper(system_role="")

return result
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
Using a custom prompt to determine the experience.
Send the course content to the LLM process and
have it explain in a different with this prompt.
*/
{
"orchestrator_class": "DirectLLMResponse",
"processor_config": {
"OpenEdXProcessor": {
"function": "get_location_content"
},
"LLMProcessor": {
"config": "default",
"stream": true,
"prompt": "\
You are an educational assistant embedded in an online course. \
\
Rephrase the given content using different wording while preserving the original meaning, intent, and factual accuracy. \
\
Guidelines: \
- Do not add new information. \
- Do not remove important details. \
- Do not simplify unless explicitly asked. \
- Keep the length roughly similar to the original. \
- Use clear, natural language suitable for the course audience. \
- Avoid repeating sentence structures or phrases from the original text. \
\
Output only the rephrased content. \
",
}
},
"actuator_config": {
"UIComponents": {
"request": {
"component": "AIRequestComponent",
"config": {
"buttonText": "Rephrase",
"customMessage": "Explain this but differently"
}
},
"response": {
"component": "AIResponseComponent",
"config": {
"customMessage": "In other words"
}
}
}
},
"schema_version": "1.0",
}
232 changes: 232 additions & 0 deletions backend/tests/test_llm_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,3 +481,235 @@ def test_mcp_configs_empty_allowed_list(user_session, settings): # pylint: disa

assert processor.mcp_configs == {}
assert "tools" not in processor.extra_params


# ============================================================================
# Custom Prompt Tests
# ============================================================================

@pytest.mark.django_db
@patch("openedx_ai_extensions.processors.llm_processor.completion")
def test_call_with_custom_prompt_function(
mock_completion, user_session, settings # pylint: disable=redefined-outer-name
):
"""
Test the call_with_custom_prompt function explicitly.
"""
settings.AI_EXTENSIONS = {
"default": {
"MODEL": "gpt-3.5-turbo",
"API_KEY": "test-key"
}
}

custom_prompt_text = "You are a helpful AI assistant."
config = {
"LLMProcessor": {
"function": "call_with_custom_prompt",
"model": "gpt-3.5-turbo",
"prompt": custom_prompt_text,
}
}
processor = LLMProcessor(config=config, user_session=user_session)

mock_resp_obj = MockChunk("Custom response", is_stream=False)
mock_completion.return_value = mock_resp_obj

result = processor.process(input_data="User's custom input")

assert result["status"] == "success"
assert result["response"] == "Custom response"
mock_completion.assert_called_once()

# Verify the custom prompt is used in the system role
call_kwargs = mock_completion.call_args[1]
messages = call_kwargs["messages"]
assert messages[0]["role"] == "system"
assert messages[0]["content"] == custom_prompt_text


@pytest.mark.django_db
@patch("openedx_ai_extensions.processors.llm_processor.completion")
def test_call_with_custom_prompt_when_function_not_specified(
mock_completion, user_session, settings # pylint: disable=redefined-outer-name
):
"""
Test that when no function is specified, it defaults to call_with_custom_prompt.
"""
settings.AI_EXTENSIONS = {
"default": {
"MODEL": "gpt-3.5-turbo",
"API_KEY": "test-key"
}
}

custom_prompt_text = "Default custom prompt for testing."
config = {
"LLMProcessor": {
# No "function" key specified
"model": "gpt-3.5-turbo",
"prompt": custom_prompt_text,
}
}
processor = LLMProcessor(config=config, user_session=user_session)

mock_resp_obj = MockChunk("Default function response", is_stream=False)
mock_completion.return_value = mock_resp_obj

result = processor.process(input_data="Test input")

assert result["status"] == "success"
assert result["response"] == "Default function response"


@pytest.mark.django_db
@patch("openedx_ai_extensions.processors.llm_processor.completion")
def test_custom_prompt_overrides_system_role_in_completion(
mock_completion, user_session, settings # pylint: disable=redefined-outer-name
):
"""
Test that custom_prompt from config overrides the default system role.
"""
settings.AI_EXTENSIONS = {
"default": {
"MODEL": "gpt-3.5-turbo",
"API_KEY": "test-key"
}
}

custom_prompt_text = "You are a specialized math tutor. Be concise and precise."
config = {
"LLMProcessor": {
"function": "call_with_custom_prompt",
"model": "gpt-3.5-turbo",
"prompt": custom_prompt_text, # This gets set as self.custom_prompt
}
}
processor = LLMProcessor(config=config, user_session=user_session)

mock_resp_obj = MockChunk("Math answer", is_stream=False)
mock_completion.return_value = mock_resp_obj

result = processor.process(input_data="What is 2+2?")

assert result["status"] == "success"

# Verify the custom prompt is used instead of default
call_kwargs = mock_completion.call_args[1]
messages = call_kwargs["messages"]
assert messages[0]["role"] == "system"
assert messages[0]["content"] == custom_prompt_text


@pytest.mark.django_db
@patch("openedx_ai_extensions.processors.llm_processor.responses")
def test_custom_prompt_in_chat_with_context(
mock_responses, user_session, settings # pylint: disable=redefined-outer-name
):
"""
Test that custom_prompt is used in chat_with_context function.
"""
settings.AI_EXTENSIONS = {
"default": {
"MODEL": "gpt-3.5-turbo",
"API_KEY": "test-key"
}
}

custom_prompt_text = "You are a coding assistant specialized in Python."
config = {
"LLMProcessor": {
"function": "chat_with_context",
"model": "gpt-3.5-turbo",
"prompt": custom_prompt_text,
}
}
processor = LLMProcessor(config=config, user_session=user_session)

mock_resp_obj = MockChunk("Here's a Python solution", is_stream=False)
mock_responses.return_value = mock_resp_obj

result = processor.process(
context="Python course",
input_data="How do I use list comprehensions?",
chat_history=[]
)

assert result["status"] == "success"

# Verify the custom prompt is used
call_kwargs = mock_responses.call_args[1]
input_msgs = call_kwargs["input"]
assert input_msgs[0]["role"] == "system"
assert input_msgs[0]["content"] == custom_prompt_text


@pytest.mark.django_db
@patch("openedx_ai_extensions.processors.llm_processor.completion")
def test_call_with_custom_prompt_streaming(
mock_completion, user_session, settings # pylint: disable=redefined-outer-name
):
"""
Test call_with_custom_prompt with streaming enabled.
"""
settings.AI_EXTENSIONS = {
"default": {
"MODEL": "gpt-3.5-turbo",
"API_KEY": "test-key"
}
}

custom_prompt_text = "You are a streaming assistant."
config = {
"LLMProcessor": {
"function": "call_with_custom_prompt",
"model": "gpt-3.5-turbo",
"stream": True,
"prompt": custom_prompt_text,
}
}
processor = LLMProcessor(config=config, user_session=user_session)

# Mock streaming chunks
chunks = [
MockChunk("Streaming ", is_stream=True),
MockChunk("response", is_stream=True),
]
mock_completion.return_value = iter(chunks)

generator = processor.process(input_data="Test streaming input")

# Consume generator
results = list(generator)

# Assertions for streaming
assert len(results) == 2
assert results[0] == b"Streaming "
assert results[1] == b"response"


@pytest.mark.django_db
def test_call_with_custom_prompt_missing_prompt_raises_error(
user_session, settings # pylint: disable=redefined-outer-name
):
"""
Test that call_with_custom_prompt raises ValueError when prompt is not provided.
"""
settings.AI_EXTENSIONS = {
"default": {
"MODEL": "gpt-3.5-turbo",
"API_KEY": "test-key"
}
}

config = {
"LLMProcessor": {
"function": "call_with_custom_prompt",
"model": "gpt-3.5-turbo",
# No "prompt" key provided
}
}
processor = LLMProcessor(config=config, user_session=user_session)

with pytest.raises(ValueError, match="Custom prompt not provided in configuration"):
processor.process(input_data="Test input")
Loading