Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 19 additions & 2 deletions azure/durable_functions/openai_agents/context.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
from typing import Any, Callable, Optional, TYPE_CHECKING, Union

from azure.durable_functions.models.DurableOrchestrationContext import (
Expand Down Expand Up @@ -138,13 +139,29 @@ def create_activity_tool(
else:
activity_name = activity_func._function._name

input_name = None
if (activity_func._function._trigger is not None
and hasattr(activity_func._function._trigger, 'name')):
input_name = activity_func._function._trigger.name

async def run_activity(ctx: RunContextWrapper[Any], input: str) -> Any:
# Parse JSON input and extract the named value if input_name is specified
activity_input = input
if input_name:
try:
parsed_input = json.loads(input)
if isinstance(parsed_input, dict) and input_name in parsed_input:
activity_input = parsed_input[input_name]
# If parsing fails or the named parameter is not found, pass the original input
except (json.JSONDecodeError, TypeError):
pass

if retry_options:
result = self._task_tracker.get_activity_call_result_with_retry(
activity_name, retry_options, input
activity_name, retry_options, activity_input
)
else:
result = self._task_tracker.get_activity_call_result(activity_name, input)
result = self._task_tracker.get_activity_call_result(activity_name, activity_input)
return result

schema = function_schema(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,16 @@ class ActivityModelInput(BaseModel):

def to_json(self) -> str:
"""Convert the ActivityModelInput to a JSON string."""
return self.model_dump_json()
try:
return self.model_dump_json(warnings=False)
except Exception:
# Fallback to basic JSON serialization
try:
return json.dumps(self.model_dump(warnings=False), default=str)
except Exception as fallback_error:
raise ValueError(
f"Unable to serialize ActivityModelInput: {fallback_error}"
) from fallback_error

@classmethod
def from_json(cls, json_str: str) -> 'ActivityModelInput':
Expand Down Expand Up @@ -310,6 +319,7 @@ async def get_response(
*,
previous_response_id: Optional[str],
prompt: Optional[ResponsePromptParam],
conversation_id: Optional[str] = None,
) -> ModelResponse:
"""Get a response from the model."""
def make_tool_info(tool: Tool) -> ToolInput:
Expand Down
23 changes: 21 additions & 2 deletions azure/durable_functions/openai_agents/orchestrator_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,27 @@ async def durable_openai_agent_activity(input: str, model_provider: ModelProvide
model_invoker = ModelInvoker(model_provider=model_provider)
result = await model_invoker.invoke_model_activity(activity_input)

json_obj = ModelResponse.__pydantic_serializer__.to_json(result)
return json_obj.decode()
# Use safe/public Pydantic API when possible. Prefer model_dump_json if result is a BaseModel
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is fine, but I think there's another serializer function used elsewhere; I wonder if we can combine them at some point.

Also, I wonder if we should add a layer of indirection in the output of the activity rather than return OpenAI types directly, just in case we need to make versioning adjustments in the future--else we may be subject to breaking OpenAI changes. (I had a change in progress that did this.)

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is fine, but I think there's another serializer function used elsewhere; I wonder if we can combine them at some point.

Sure, let's do it later.

Also, I wonder if we should add a layer of indirection in the output of the activity rather than return OpenAI types directly, just in case we need to make versioning adjustments in the future--else we may be subject to breaking OpenAI changes. (I had a change in progress that did this.)

Are you talking about a situation when activity output is saved in the orchestration history, then the user upgrades their app to a new OpenAI SDK version, and they expect this orchestration to continue after an upgrade? Yes, we should be able to eventually handle that, good catch.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, that's what I was referring to. Though, thinking about it again, I suppose it doesn't really matter whether the OpenAI type is returned directly or wrapped in an outer type (e.g. ActivityModelOutput). We can still add logic to the post-activity-call handler to make any adjustments needed before passing it back to the OpenAI SDK layer.

# Otherwise handle common types (str/bytes/dict/list) and fall back to json.dumps.
import json as _json

if hasattr(result, "model_dump_json"):
# Pydantic v2 BaseModel
json_str = result.model_dump_json()
else:
if isinstance(result, bytes):
json_str = result.decode()
elif isinstance(result, str):
json_str = result
else:
# Try the internal serializer as a last resort, but fall back to json.dumps
try:
json_bytes = ModelResponse.__pydantic_serializer__.to_json(result)
json_str = json_bytes.decode()
except Exception:
json_str = _json.dumps(result)

return json_str


def durable_openai_agent_orchestrator_generator(
Expand Down
4 changes: 2 additions & 2 deletions azure/durable_functions/openai_agents/task_tracker.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,13 @@ def _get_activity_result_or_raise(self, task):
result = json.loads(result_json)
return result

def get_activity_call_result(self, activity_name, input: str):
def get_activity_call_result(self, activity_name, input: Any):
"""Call an activity and return its result or raise ``YieldException`` if pending."""
task = self._context.call_activity(activity_name, input)
return self._get_activity_result_or_raise(task)

def get_activity_call_result_with_retry(
self, activity_name, retry_options: RetryOptions, input: str
self, activity_name, retry_options: RetryOptions, input: Any
):
"""Call an activity with retry and return its result or raise YieldException if pending."""
task = self._context.call_activity_with_retry(activity_name, retry_options, input)
Expand Down
10 changes: 10 additions & 0 deletions samples-v2/openai_agents/function_app.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import random

import azure.functions as func
import azure.durable_functions as df
Expand Down Expand Up @@ -102,4 +103,13 @@ def tools(context):
import basic.tools
return basic.tools.main()

@app.activity_trigger(input_name="max")
async def random_number_tool(max: int) -> int:
"""Return a random integer between 0 and the given maximum."""
return random.randint(0, max)

@app.orchestration_trigger(context_name="context")
@app.durable_openai_agent_orchestrator
def message_filter(context):
import handoffs.message_filter
return handoffs.message_filter.main(context.create_activity_tool(random_number_tool))
173 changes: 173 additions & 0 deletions samples-v2/openai_agents/handoffs/message_filter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
from __future__ import annotations

import json

from agents import Agent, HandoffInputData, Runner, function_tool, handoff
from agents.extensions import handoff_filters
from agents.models import is_gpt_5_default


def spanish_handoff_message_filter(handoff_message_data: HandoffInputData) -> HandoffInputData:
if is_gpt_5_default():
print("gpt-5 is enabled, so we're not filtering the input history")
# when using gpt-5, removing some of the items could break things, so we do this filtering only for other models
return HandoffInputData(
input_history=handoff_message_data.input_history,
pre_handoff_items=tuple(handoff_message_data.pre_handoff_items),
new_items=tuple(handoff_message_data.new_items),
)

# First, we'll remove any tool-related messages from the message history
handoff_message_data = handoff_filters.remove_all_tools(handoff_message_data)

# Second, we'll also remove the first two items from the history, just for demonstration
history = (
tuple(handoff_message_data.input_history[2:])
if isinstance(handoff_message_data.input_history, tuple)
else handoff_message_data.input_history
)

# or, you can use the HandoffInputData.clone(kwargs) method
return HandoffInputData(
input_history=history,
pre_handoff_items=tuple(handoff_message_data.pre_handoff_items),
new_items=tuple(handoff_message_data.new_items),
)


def main(random_number_tool):
first_agent = Agent(
name="Assistant",
instructions="Be extremely concise.",
tools=[random_number_tool],
)

spanish_agent = Agent(
name="Spanish Assistant",
instructions="You only speak Spanish and are extremely concise.",
handoff_description="A Spanish-speaking assistant.",
)

second_agent = Agent(
name="Assistant",
instructions=(
"Be a helpful assistant. If the user speaks Spanish, handoff to the Spanish assistant."
),
handoffs=[handoff(spanish_agent, input_filter=spanish_handoff_message_filter)],
)

# 1. Send a regular message to the first agent
result = Runner.run_sync(first_agent, input="Hi, my name is Sora.")

print("Step 1 done")

# 2. Ask it to generate a number
result = Runner.run_sync(
first_agent,
input=result.to_input_list()
+ [{"content": "Can you generate a random number between 0 and 100?", "role": "user"}],
)

print("Step 2 done")

# 3. Call the second agent
result = Runner.run_sync(
second_agent,
input=result.to_input_list()
+ [
{
"content": "I live in New York City. Whats the population of the city?",
"role": "user",
}
],
)

print("Step 3 done")

# 4. Cause a handoff to occur
result = Runner.run_sync(
second_agent,
input=result.to_input_list()
+ [
{
"content": "Por favor habla en español. ¿Cuál es mi nombre y dónde vivo?",
"role": "user",
}
],
)

print("Step 4 done")

print("\n===Final messages===\n")

# 5. That should have caused spanish_handoff_message_filter to be called, which means the
# output should be missing the first two messages, and have no tool calls.
# Let's print the messages to see what happened
for message in result.to_input_list():
print(json.dumps(message, indent=2))
# tool_calls = message.tool_calls if isinstance(message, AssistantMessage) else None

# print(f"{message.role}: {message.content}\n - Tool calls: {tool_calls or 'None'}")
"""
$python examples/handoffs/message_filter.py
Step 1 done
Step 2 done
Step 3 done
Step 4 done

===Final messages===

{
"content": "Can you generate a random number between 0 and 100?",
"role": "user"
}
{
"id": "...",
"content": [
{
"annotations": [],
"text": "Sure! Here's a random number between 0 and 100: **42**.",
"type": "output_text"
}
],
"role": "assistant",
"status": "completed",
"type": "message"
}
{
"content": "I live in New York City. Whats the population of the city?",
"role": "user"
}
{
"id": "...",
"content": [
{
"annotations": [],
"text": "As of the most recent estimates, the population of New York City is approximately 8.6 million people. However, this number is constantly changing due to various factors such as migration and birth rates. For the latest and most accurate information, it's always a good idea to check the official data from sources like the U.S. Census Bureau.",
"type": "output_text"
}
],
"role": "assistant",
"status": "completed",
"type": "message"
}
{
"content": "Por favor habla en espa\u00f1ol. \u00bfCu\u00e1l es mi nombre y d\u00f3nde vivo?",
"role": "user"
}
{
"id": "...",
"content": [
{
"annotations": [],
"text": "No tengo acceso a esa informaci\u00f3n personal, solo s\u00e9 lo que me has contado: vives en Nueva York.",
"type": "output_text"
}
],
"role": "assistant",
"status": "completed",
"type": "message"
}
"""

return result.final_output
4 changes: 2 additions & 2 deletions samples-v2/openai_agents/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@
azure-functions
azure-functions-durable
azure-identity
openai==1.98.0
openai-agents==0.2.4
openai==1.107.3
openai-agents==0.3.0
pydantic
3 changes: 2 additions & 1 deletion samples-v2/openai_agents/test_orchestrators.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@
"non_strict_output_type",
"previous_response_id",
"remote_image",
"tools"
"tools",
"message_filter",
]

BASE_URL = "http://localhost:7071/api/orchestrators"
Expand Down
Loading