From 872356c49ff6a58dbb08201b94f89cce30158f45 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Thu, 24 Jul 2025 22:10:46 -0700 Subject: [PATCH 01/15] some updates --- temporalio/bridge/Cargo.lock | 246 ++++++++++++++++++--- temporalio/bridge/sdk-core | 2 +- temporalio/contrib/openai_agents/README.md | 70 +++--- tests/contrib/openai_agents/test_openai.py | 1 + 4 files changed, 248 insertions(+), 71 deletions(-) diff --git a/temporalio/bridge/Cargo.lock b/temporalio/bridge/Cargo.lock index 30864443d..c10c45275 100644 --- a/temporalio/bridge/Cargo.lock +++ b/temporalio/bridge/Cargo.lock @@ -64,6 +64,28 @@ dependencies = [ "derive_arbitrary", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-trait" version = "0.1.88" @@ -87,20 +109,47 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core 0.4.5", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "axum" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ - "axum-core", + "axum-core 0.5.2", "bytes", "futures-util", "http", "http-body", "http-body-util", "itoa", - "matchit", + "matchit 0.8.4", "memchr", "mime", "percent-encoding", @@ -108,7 +157,27 @@ dependencies = [ "rustversion", "serde", "sync_wrapper", - "tower", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", "tower-layer", "tower-service", ] @@ -798,6 +867,12 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + [[package]] name = "governor" version = "0.8.1" @@ -833,13 +908,19 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", "tracing", ] +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + [[package]] name = "hashbrown" version = "0.14.5" @@ -1106,6 +1187,16 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + [[package]] name = "indexmap" version = "2.9.0" @@ -1296,6 +1387,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "matchit" version = "0.8.4" @@ -1444,9 +1541,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" -version = "0.30.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" +checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" dependencies = [ "futures-core", "futures-sink", @@ -1458,23 +1555,25 @@ dependencies = [ [[package]] name = "opentelemetry-http" -version = "0.30.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" +checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", + "tracing", ] [[package]] name = "opentelemetry-otlp" -version = "0.30.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" +checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" dependencies = [ + "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -1484,31 +1583,45 @@ dependencies = [ "reqwest", "thiserror 2.0.12", "tokio", - "tonic", + "tonic 0.12.3", + "tracing", +] + +[[package]] +name = "opentelemetry-prometheus" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "098a71a4430bb712be6130ed777335d2e5b19bc8566de5f2edddfce906def6ab" +dependencies = [ + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "prometheus", "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.30.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc" +checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", - "tonic", + "tonic 0.12.3", ] [[package]] name = "opentelemetry_sdk" -version = "0.30.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" +checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" dependencies = [ "futures-channel", "futures-executor", "futures-util", + "glob", "opentelemetry", "percent-encoding", "rand 0.9.1", @@ -1516,6 +1629,7 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tokio-stream", + "tracing", ] [[package]] @@ -1576,7 +1690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.9.0", ] [[package]] @@ -2181,7 +2295,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-util", - "tower", + "tower 0.5.2", "tower-http", "tower-service", "url", @@ -2291,6 +2405,15 @@ dependencies = [ "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -2605,7 +2728,6 @@ dependencies = [ "async-trait", "backoff", "base64", - "bytes", "derive_builder", "derive_more", "futures-retry", @@ -2620,8 +2742,8 @@ dependencies = [ "temporal-sdk-core-protos", "thiserror 2.0.12", "tokio", - "tonic", - "tower", + "tonic 0.12.3", + "tower 0.5.2", "tracing", "url", "uuid", @@ -2645,7 +2767,7 @@ dependencies = [ "temporal-sdk-core-protos", "tokio", "tokio-stream", - "tonic", + "tonic 0.13.1", "tracing", "url", ] @@ -2676,6 +2798,7 @@ dependencies = [ "mockall", "opentelemetry", "opentelemetry-otlp", + "opentelemetry-prometheus", "opentelemetry_sdk", "parking_lot", "pid", @@ -2700,7 +2823,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tonic", + "tonic 0.12.3", "tracing", "tracing-subscriber", "url", @@ -2712,6 +2835,7 @@ dependencies = [ name = "temporal-sdk-core-api" version = "0.1.0" dependencies = [ + "anyhow", "async-trait", "derive_builder", "derive_more", @@ -2723,8 +2847,7 @@ dependencies = [ "temporal-sdk-core-protos", "thiserror 2.0.12", "toml", - "tonic", - "tracing", + "tonic 0.12.3", "tracing-core", "url", ] @@ -2745,7 +2868,7 @@ dependencies = [ "serde", "serde_json", "thiserror 2.0.12", - "tonic", + "tonic 0.12.3", "tonic-build", "uuid", ] @@ -2940,7 +3063,7 @@ version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ - "indexmap", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", @@ -2956,12 +3079,13 @@ checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" [[package]] name = "tonic" -version = "0.13.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ + "async-stream", "async-trait", - "axum", + "axum 0.7.9", "base64", "bytes", "h2", @@ -2975,21 +3099,51 @@ dependencies = [ "pin-project", "prost", "rustls-native-certs", + "rustls-pemfile", "socket2", "tokio", "tokio-rustls", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", ] [[package]] -name = "tonic-build" +name = "tonic" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac6f67be712d12f0b41328db3137e0d0757645d8904b4cb7d51cd9c2279e847" +checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" +dependencies = [ + "async-trait", + "axum 0.8.4", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", @@ -2999,6 +3153,26 @@ dependencies = [ "syn", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.5.2" @@ -3007,7 +3181,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap", + "indexmap 2.9.0", "pin-project-lite", "slab", "sync_wrapper", @@ -3031,7 +3205,7 @@ dependencies = [ "http-body", "iri-string", "pin-project-lite", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -3732,7 +3906,7 @@ dependencies = [ "flate2", "getrandom 0.3.3", "hmac", - "indexmap", + "indexmap 2.9.0", "lzma-rs", "memchr", "pbkdf2", diff --git a/temporalio/bridge/sdk-core b/temporalio/bridge/sdk-core index b90202240..24a3c23a6 160000 --- a/temporalio/bridge/sdk-core +++ b/temporalio/bridge/sdk-core @@ -1 +1 @@ -Subproject commit b90202240e003d05fbfa91846b9a4e0614b04aa7 +Subproject commit 24a3c23a6dc8842fddb5be3a52a534b863a01a7c diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 05b1a2331..23c9a1b01 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -1,18 +1,21 @@ -# OpenAI Agents SDK Support +# OpenAI Agents SDK Integration for Temporal ⚠️ **Experimental** - This module is not yet stable and may change in the future. For questions, please join the [#python-sdk](https://temporalio.slack.com/archives/CTT84RS0P) Slack channel at [temporalio.slack.com](https://temporalio.slack.com/). -This module provides a bridge between Temporal durable execution and the [OpenAI Agents SDK](https://github.com/openai/openai-agents-python). -## Background +## Introduction -If you want to build production-ready AI agents quickly, you can use this module to combine [Temporal durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) with OpenAI Agents. -Temporal's durable execution provides a crash-proof system foundation, and OpenAI Agents offers a lightweight and yet powerful framework for defining agent functionality. +If you want to build production-ready AI agents quickly, you can use this module to integrate [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) and [Temporal durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution). +Temporal provides a crash-proof system foundation, managing the distributed systems challenges inherent to production agentic systems. +OpenAI Agents SDK offers a lightweight yet powerful framework for defining those agents. +The combination lets you build reliable agentic systems quickly. -## Approach +## Core Concepts + +This section describes description of AI agents. The standard control flow of a single AI agent involves: @@ -67,7 +70,7 @@ class HelloWorldAgent: instructions="You only respond in haikus.", ) - result = await Runner.run(starting_agent=agent, input=prompt) + result = await Runner.run(agent, input=prompt) return result.final_output ``` @@ -86,44 +89,43 @@ import asyncio from datetime import timedelta from temporalio.client import Client -from temporalio.contrib.openai_agents import ModelActivity, ModelActivityParameters, set_open_ai_agent_temporal_overrides -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin, ModelActivityParameters from temporalio.worker import Worker from hello_world_workflow import HelloWorldAgent async def worker_main(): - # Configure the OpenAI Agents SDK to use Temporal activities for LLM API calls - # and for tool calls. - model_params = ModelActivityParameters( - start_to_close_timeout=timedelta(seconds=10) + # Use the plugin to configure Temporal for use with OpenAI Agents SDK + client = await Client.connect( + "localhost:7233", + plugins=[ + OpenAIAgentsPlugin( + model_params=ModelActivityParameters( + start_to_close_timeout=timedelta(seconds=30) + ) + ), + ], ) - with set_open_ai_agent_temporal_overrides(model_params): - # Create a Temporal client connected to server at the given address - # Use the OpenAI data converter to ensure proper serialization/deserialization - client = await Client.connect( - "localhost:7233", - data_converter=pydantic_data_converter, - ) - worker = Worker( - client, - task_queue="my-task-queue", - workflows=[HelloWorldAgent], - activities=[ModelActivity().invoke_model_activity], - ) - await worker.run() + worker = Worker( + client, + task_queue="my-task-queue", + workflows=[HelloWorldAgent], + ) + await worker.run() if __name__ == "__main__": asyncio.run(worker_main()) ``` -We wrap the entire `worker_main` function body in the `set_open_ai_agent_temporal_overrides()` context manager. -This causes a Temporal activity to be invoked whenever the OpenAI Agents SDK invokes an LLM or calls a tool. -We also pass the `pydantic_data_converter` to the Temporal Client, which ensures proper serialization of pydantic models in OpenAI Agents SDK data. -We create a `ModelActivity` which serves as a generic wrapper for LLM calls, and we register this wrapper's invocation point, `ModelActivity().invoke_model_activity`, with the worker. +We use the `OpenAIAgentsPlugin` to configure Temporal for use with OpenAI Agents SDK. +The plugin automatically handles several important setup tasks: +- Configures the Pydantic data converter to ensure proper serialization of OpenAI agent objects +- Sets up tracing interceptors for OpenAI agent interactions +- Registers model execution activities that wrap LLM calls +- Manages the runtime overrides needed for OpenAI agents to work within Temporal workflows In order to launch the agent, use the standard Temporal workflow invocation: @@ -134,7 +136,7 @@ import asyncio from temporalio.client import Client from temporalio.common import WorkflowIDReusePolicy -from temporalio.contrib.pydantic import pydantic_data_converter +from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from hello_world_workflow import HelloWorldAgent @@ -142,7 +144,7 @@ async def main(): # Create client connected to server at the given address client = await Client.connect( "localhost:7233", - data_converter=pydantic_data_converter, + plugins=[OpenAIAgentsPlugin()], ) # Execute a workflow @@ -161,7 +163,7 @@ if __name__ == "__main__": This launcher script executes the Temporal workflow to start the agent. -Note that this basic example works without providing the `pydantic_data_converter` to the Temporal client that executes the workflow, but we include it because more complex uses will generally need it. +Note that we also configure the client with the `OpenAIAgentsPlugin` to ensure proper serialization of OpenAI agent data types when starting and receiving results from the workflow. ## Using Temporal Activities as OpenAI Agents Tools diff --git a/tests/contrib/openai_agents/test_openai.py b/tests/contrib/openai_agents/test_openai.py index bb7ed38a3..19b0da3fd 100644 --- a/tests/contrib/openai_agents/test_openai.py +++ b/tests/contrib/openai_agents/test_openai.py @@ -530,6 +530,7 @@ async def test_tool_workflow(client: Client, use_local_model: bool): @pytest.mark.parametrize("use_local_model", [True, False]) +@pytest.skip(reason="Not running this now") async def test_nexus_tool_workflow( client: Client, env: WorkflowEnvironment, use_local_model: bool ): From ada0daa1ea584e765e5cc21ce20f82cd4636896e Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 21:59:08 -0700 Subject: [PATCH 02/15] ai rewrite --- temporalio/contrib/openai_agents/README.md | 496 +++++++++++++++++---- 1 file changed, 399 insertions(+), 97 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 23c9a1b01..e4c7d4029 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -4,60 +4,54 @@ For questions, please join the [#python-sdk](https://temporalio.slack.com/archives/CTT84RS0P) Slack channel at [temporalio.slack.com](https://temporalio.slack.com/). +## Building Crash-Proof AI Agents -## Introduction +The challenge with AI agents is that they can crash and lose all their progress. +If your agent is halfway through analyzing data, calling APIs, or having a conversation, a system failure means starting over from scratch. -If you want to build production-ready AI agents quickly, you can use this module to integrate [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) and [Temporal durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution). -Temporal provides a crash-proof system foundation, managing the distributed systems challenges inherent to production agentic systems. -OpenAI Agents SDK offers a lightweight yet powerful framework for defining those agents. -The combination lets you build reliable agentic systems quickly. - - -## Core Concepts - -This section describes description of AI agents. - -The standard control flow of a single AI agent involves: - -1. Receiving *input* and handing it to an *LLM*. -2. At the direction of the LLM, calling *tools*, and returning that output back to the LLM. -3. Repeating as necessary, until the LLM produces *output*. - -The diagram below illustrates an AI agent control flow. +This integration combines [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) with [Temporal's durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) to build agents that never lose their work and handle long-running, asynchronous, and human-in-the-loop workflows with ease. ```mermaid -graph TD - A["INPUT"] --> B["LLM"] - B <--> C["TOOLS"] - B --> D["OUTPUT"] +block-beta + columns 4 + + Agents["Agents"]:1 + Tools["Tools"]:1 + Guardrails["Guardrails"]:1 + Tracing["Tracing"]:1 + + space:4 + + AgentFramework["Agent Framework: OpenAI Agents SDK"]:4 + + space:2 + + DurableExecution["Durable Execution: Temporal"]:2 + AIInference["AI Inference: OpenAI Models"]:2 ``` -To provide durable execution, Temporal needs to be able to recover from failures at any step of this process. -To do this, Temporal requires separating an application's deterministic (repeatable) and non-deterministic parts: +## How the Architecture Works + +Understanding the architecture helps you build robust agent systems. Here are the key components: -1. Deterministic pieces, termed *workflows*, execute the same way if re-run with the same inputs. -2. Non-deterministic pieces, termed *activities*, have no limitations—they may perform I/O and any other operations. -Temporal maintains a server-side execution history of all state state passing in and out of a workflow, using it to recover when needed. -See the [Temporal documentation](https://docs.temporal.io/evaluate/understanding-temporal#temporal-application-the-building-blocks) for more information. +**Temporal Server**: Keeps track of your agent's progress. Every decision, API call, and state change is recorded. If anything crashes, the server knows exactly where to resume. [Setup instructions](https://docs.temporal.io/dev-guide/python/foundations#run-a-temporal-service). -How do we apply the Temporal execution model to enable durable execution for AI agents? +**Worker Process**: Runs your actual agent code. This can crash, restart, or even run on different machines. The server will send it work and track what gets completed. -- The core control flow, which is managed by the OpenAI Agents SDK, goes into a Temporal workflow. -- Calls to the LLM provider, which are inherently non-deterministic, go into activities. -- Calls to tools, which could contain arbitrary code, similarly go into activities. +**Temporal Client**: Starts and monitors your agents from your application code. It connects to the server to execute workflows. -This module ensures that LLM calls and tool calls originating from the OpenAI Agents SDK run as Temporal activities. -It also ensures that their inputs and outputs are properly serialized. +The key insight: If your worker crashes, the server remembers everything and restarts from the exact point of failure. -## Basic Example +## Your First Durable Agent -Let's start with a simple example. +An agent is an LLM with instructions that can use tools to accomplish tasks. Let's build one that survives crashes. -The first file, `hello_world_workflow.py`, defines an OpenAI agent within a Temporal workflow. +This integration requires three files that separate concerns for reliability: + +### File 1: Workflow Definition (`hello_world_workflow.py`) ```python -# File: hello_world_workflow.py from temporalio import workflow from agents import Agent, Runner @@ -65,26 +59,20 @@ from agents import Agent, Runner class HelloWorldAgent: @workflow.run async def run(self, prompt: str) -> str: + # An agent is an LLM with instructions and optional tools agent = Agent( name="Assistant", - instructions="You only respond in haikus.", + instructions="You are a helpful assistant who responds in haikus.", ) - + + # Runner executes the agent's conversation loop result = await Runner.run(agent, input=prompt) return result.final_output ``` -If you are familiar with Temporal and with Open AI Agents SDK, this code will look very familiar. -We annotate the `HelloWorldAgent` class with `@workflow.defn` to define a workflow, then use the `@workflow.run` annotation to define the entrypoint. -We use the `Agent` class to define a simple agent, one which always responds with haikus. -Within the workflow, we start the agent using the `Runner`, as is typical, passing through `prompt` as an argument. - -The second file, `run_worker.py`, launches a Temporal worker. -This is a program that connects to the Temporal server and receives work to run, in this case `HelloWorldAgent` invocations. +### File 2: Worker Setup (`run_worker.py`) ```python -# File: run_worker.py - import asyncio from datetime import timedelta @@ -94,9 +82,8 @@ from temporalio.worker import Worker from hello_world_workflow import HelloWorldAgent - async def worker_main(): - # Use the plugin to configure Temporal for use with OpenAI Agents SDK + # Configure Temporal client with OpenAI Agents integration client = await Client.connect( "localhost:7233", plugins=[ @@ -108,30 +95,28 @@ async def worker_main(): ], ) + # Create worker that can execute HelloWorldAgent workflows worker = Worker( client, task_queue="my-task-queue", workflows=[HelloWorldAgent], + activities=[], # Model activities are automatically registered by the plugin ) await worker.run() - if __name__ == "__main__": asyncio.run(worker_main()) ``` -We use the `OpenAIAgentsPlugin` to configure Temporal for use with OpenAI Agents SDK. -The plugin automatically handles several important setup tasks: -- Configures the Pydantic data converter to ensure proper serialization of OpenAI agent objects -- Sets up tracing interceptors for OpenAI agent interactions -- Registers model execution activities that wrap LLM calls -- Manages the runtime overrides needed for OpenAI agents to work within Temporal workflows +The `OpenAIAgentsPlugin` handles critical setup tasks: +- Configures data serialization for OpenAI agent objects +- Sets up tracing for agent interactions +- Automatically runs LLM calls as Temporal activities (you don't need to register these) +- Manages the runtime needed for agents to work within Temporal workflows -In order to launch the agent, use the standard Temporal workflow invocation: +### File 3: Client Execution (`run_hello_world_workflow.py`) ```python -# File: run_hello_world_workflow.py - import asyncio from temporalio.client import Client @@ -141,17 +126,17 @@ from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from hello_world_workflow import HelloWorldAgent async def main(): - # Create client connected to server at the given address + # Connect to Temporal server client = await Client.connect( "localhost:7233", plugins=[OpenAIAgentsPlugin()], ) - # Execute a workflow + # Execute the agent workflow result = await client.execute_workflow( HelloWorldAgent.run, "Tell me about recursion in programming.", - id="my-workflow-id", + id="my-workflow-id", # Use unique IDs in production task_queue="my-task-queue", id_reuse_policy=WorkflowIDReusePolicy.TERMINATE_IF_RUNNING, ) @@ -161,18 +146,34 @@ if __name__ == "__main__": asyncio.run(main()) ``` -This launcher script executes the Temporal workflow to start the agent. +### Running Your Durable Agent -Note that we also configure the client with the `OpenAIAgentsPlugin` to ensure proper serialization of OpenAI agent data types when starting and receiving results from the workflow. +1. Start the Temporal server: `temporal server start-dev` +2. Start the worker: `python run_worker.py` +3. Execute the agent: `python run_hello_world_workflow.py` +Try stopping the worker process during execution and restarting it. The agent will resume exactly where it left off. -## Using Temporal Activities as OpenAI Agents Tools +## Adding Tools That Can Fail and Retry -One of the powerful features of this integration is the ability to convert Temporal activities into OpenAI Agents tools using `activity_as_tool`. -This allows your agent to leverage Temporal's durable execution for tool calls. +Tools let agents interact with the world - calling APIs, accessing databases, reading files. In our integration, you have two patterns for tools: -In the example below, we apply the `@activity.defn` decorator to the `get_weather` function to create a Temporal activity. -We then pass this through the `activity_as_tool` helper function to create an OpenAI Agents tool that is passed to the `Agent`. +### Function Tools (Simple, Run in Workflow) + +For simple computations that don't involve external calls: + +```python +from agents import function_tool + +@function_tool +def calculate_tip(bill: float, percentage: float) -> float: + """Calculate tip amount for a bill.""" + return bill * (percentage / 100) +``` + +### Activity Tools (External Calls, Can Retry) + +For anything that might fail - API calls, database queries, file operations: ```python from dataclasses import dataclass @@ -189,8 +190,14 @@ class Weather: @activity.defn async def get_weather(city: str) -> Weather: - """Get the weather for a given city.""" - return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.") + """Get weather data from external API.""" + # This API call can fail and will retry automatically + # In a real implementation, you'd call an actual weather API + return Weather( + city=city, + temperature_range="14-20C", + conditions="Sunny with light wind" + ) @workflow.defn class WeatherAgent: @@ -200,58 +207,353 @@ class WeatherAgent: name="Weather Assistant", instructions="You are a helpful weather agent.", tools=[ + calculate_tip, # Simple function tool openai_agents.workflow.activity_as_tool( get_weather, start_to_close_timeout=timedelta(seconds=10) - ) + ) # Activity tool with retry capability ], ) - result = await Runner.run(starting_agent=agent, input=question) + result = await Runner.run(agent, input=question) return result.final_output ``` +Don't forget to register activity tools in your worker: + +```python +# In your worker file +worker = Worker( + client, + task_queue="my-task-queue", + workflows=[WeatherAgent], + activities=[get_weather], # Register your activities +) +``` + +### When to Use Each Pattern + +- **Function tools**: Math, string processing, simple logic that can't fail +- **Activity tools**: API calls, database queries, file I/O, anything that might need retries + +Try crashing your worker during an API call. When it restarts, Temporal will retry the failed activity automatically. + +### Configuring Retry Policies + +You can customize how activities retry when they fail: + +```python +from temporalio.common import RetryPolicy +from datetime import timedelta + +@activity.defn +async def unreliable_api_call(data: str) -> str: + """An API call that might fail and need retries.""" + # Simulate an API that fails sometimes + import random + if random.random() < 0.3: # 30% failure rate + raise Exception("API temporarily unavailable") + return f"Processed: {data}" + +# Use it with custom retry policy +agent = Agent( + name="Resilient Agent", + tools=[ + openai_agents.workflow.activity_as_tool( + unreliable_api_call, + start_to_close_timeout=timedelta(seconds=30), + retry_policy=RetryPolicy( + initial_interval=timedelta(seconds=1), + maximum_interval=timedelta(seconds=10), + maximum_attempts=5, + backoff_coefficient=2.0, + ) + ) + ], +) +``` + +This configuration will: +- Retry up to 5 times +- Start with 1-second delays, doubling each time up to 10 seconds +- Handle transient failures automatically while preserving all agent state -### Agent Handoffs +## Conversations That Survive Crashes -The OpenAI Agents SDK supports agent handoffs, where one agent can transfer control to another agent. -In this example, one Temporal workflow wraps the entire multi-agent system: +Agents need memory to have multi-turn conversations. Our integration uses `RunConfig` to maintain conversation state that survives any failure. ```python +from temporalio import workflow +from agents import Agent, Runner, RunConfig + @workflow.defn -class CustomerServiceWorkflow: - def __init__(self): - self.current_agent = self.init_agents() +class ConversationAgent: + @workflow.run + async def run(self, messages: list[str]) -> str: + # RunConfig maintains conversation state across all interactions + config = RunConfig() + + agent = Agent( + name="Customer Support", + instructions="You are a helpful customer support agent. Remember the conversation context.", + ) + + # Process each message while maintaining conversation history + result = None + for message in messages: + result = await Runner.run(agent, input=message, run_config=config) + # The conversation history is automatically preserved in config + + return result.final_output if result else "No messages processed" +``` - def init_agents(self): - faq_agent = Agent( - name="FAQ Agent", - instructions="Answer frequently asked questions", +Usage example: + +```python +# Client code +messages = [ + "Hi, I'm having trouble with my order", + "My order number is 12345", + "I need to change the shipping address" +] + +import uuid + +result = await client.execute_workflow( + ConversationAgent.run, + messages, + id=f"customer-conversation-{uuid.uuid4()}", # Use unique IDs in production + task_queue="my-task-queue", +) +``` + +### Multi-Step Conversations with State + +For more complex scenarios where you need to build up conversation state over time: + +```python +@workflow.defn +class TravelPlanningAgent: + @workflow.run + async def run(self, destination: str, budget: str, interests: list[str]) -> str: + config = RunConfig() + + agent = Agent( + name="Travel Planner", + instructions="You are a travel planning expert. Build comprehensive itineraries.", + tools=[ + openai_agents.workflow.activity_as_tool(search_flights), + openai_agents.workflow.activity_as_tool(find_hotels), + openai_agents.workflow.activity_as_tool(get_attractions), + ] ) - booking_agent = Agent( - name="Booking Agent", - instructions="Help with booking and seat changes", + # Step 1: Gather initial preferences + result = await Runner.run( + agent, + input=f"I want to plan a trip to {destination} with a budget of {budget}", + run_config=config ) - triage_agent = Agent( - name="Triage Agent", - instructions="Route customers to the right agent", - handoffs=[faq_agent, booking_agent], + # Step 2: Refine based on interests + result = await Runner.run( + agent, + input=f"My interests include: {', '.join(interests)}. Please find specific recommendations.", + run_config=config ) - return triage_agent + # Step 3: Create final itinerary + result = await Runner.run( + agent, + input="Create a detailed day-by-day itinerary with all the information gathered.", + run_config=config + ) + + return result.final_output +``` + +The key insight: All conversation history is automatically preserved, and if anything crashes during this multi-step process, it resumes from the exact step where it failed. + +### Production Workflow IDs + +In the examples above, we use simple workflow IDs like `"my-workflow-id"` for clarity. In production, you need unique IDs to avoid conflicts: + +```python +import uuid +from datetime import datetime + +# Option 1: UUID-based (most common) +workflow_id = f"agent-conversation-{uuid.uuid4()}" + +# Option 2: Include timestamp and context +workflow_id = f"customer-support-{customer_id}-{datetime.now().isoformat()}" + +# Option 3: Business-meaningful IDs +workflow_id = f"travel-planning-{user_id}-{trip_date}" + +result = await client.execute_workflow( + MyAgentWorkflow.run, + "user input", + id=workflow_id, + task_queue="my-task-queue", +) +``` + +Choose IDs that help you identify and debug workflows later. Temporal uses these IDs for deduplication, so the same ID will return the existing workflow result if it's already running or completed. + +## Teams of Agents Working Together + +Complex tasks often need specialist agents working together. One agent can hand off work to another that's better suited for specific tasks. + +In Temporal workflows, we need to use factory functions to create agents (this ensures the workflow can recreate identical agents if it needs to recover from a crash): +```python +from temporalio import workflow +from agents import Agent, Runner + +def create_customer_service_agents(): + """Factory function to create agents for workflow determinism.""" + + faq_agent = Agent( + name="FAQ Agent", + instructions="""You answer frequently asked questions about our company. + If you can't answer a question, transfer to the triage agent.""", + handoff_description="Handles frequently asked questions", + ) + + billing_agent = Agent( + name="Billing Agent", + instructions="""You help customers with billing issues, payment problems, and account questions. + If the issue is not billing-related, transfer to the triage agent.""", + handoff_description="Handles billing and payment issues", + ) + + technical_agent = Agent( + name="Technical Support", + instructions="""You help customers with technical problems and troubleshooting. + If the issue is not technical, transfer to the triage agent.""", + handoff_description="Handles technical support issues", + ) + + triage_agent = Agent( + name="Triage Agent", + instructions="""You are the main customer service agent. Analyze customer requests and + transfer them to the appropriate specialist agent. Always be helpful and professional.""", + handoffs=[faq_agent, billing_agent, technical_agent], + ) + + # Set up reverse handoffs so specialists can return to triage + faq_agent.handoffs = [triage_agent] + billing_agent.handoffs = [triage_agent] + technical_agent.handoffs = [triage_agent] + + return triage_agent + +@workflow.defn +class CustomerServiceWorkflow: @workflow.run - async def run(self, customer_message: str) -> str: + async def run(self, customer_message: str, customer_context: dict = None) -> str: + # Create fresh agents for this workflow execution + starting_agent = create_customer_service_agents() + + # Process the customer request result = await Runner.run( - starting_agent=self.current_agent, + agent=starting_agent, input=customer_message, - context=self.context, + context=customer_context or {} ) + return result.final_output ``` +### Agent Context Sharing + +Agents can share context information across handoffs: + +```python +from pydantic import BaseModel + +class CustomerContext(BaseModel): + customer_id: str = None + order_number: str = None + issue_type: str = None + priority: str = "normal" + +@workflow.defn +class AdvancedCustomerService: + @workflow.run + async def run(self, message: str, customer_id: str) -> str: + # Initialize shared context + context = CustomerContext(customer_id=customer_id) + + starting_agent = create_customer_service_agents() + + result = await Runner.run( + agent=starting_agent, + input=message, + context=context + ) + + return result.final_output +``` + +Try crashing the worker during a handoff between agents. When it restarts, the conversation continues with the correct specialist agent, and all context is preserved. + +## Understanding the Trade-offs + +This integration gives you powerful capabilities, but it's important to understand what you gain and what you trade off. + +### What You Gain + +**Complete crash-proof execution**: Your agents will finish their work no matter what fails. Process crashes, network issues, server restarts - none of these affect your agent's ability to complete its task. + +**Automatic retries**: Failed API calls, database timeouts, and other transient errors are automatically retried with configurable backoff strategies. + +**Perfect observability**: Every decision your agent makes is recorded. You can see the complete history of what happened, when, and why. + +**Horizontal scaling**: Run multiple worker processes across different machines. Temporal automatically distributes the work. + +**State management**: Conversation history, context, and agent state are automatically preserved and managed. + +### Current Limitations + +**No real-time streaming**: At this point in time, streaming operations like `Runner.run_streamed()` are not supported. Agents complete their full response before returning results. This limitation may be addressed in future versions. + +**No interactive input during execution**: You can't prompt for user input in the middle of a workflow. Design your workflows to take all necessary input as parameters upfront. + +**Slightly more complex setup**: Instead of a single script, you need three files (workflow, worker, client) and a running Temporal server. + +### When to Use This Integration + +**Ideal for:** +- Long-running agent workflows that can't afford to lose progress +- Production systems requiring high reliability +- Multi-step agent processes with external API calls +- Systems needing audit trails and observability +- Applications that need to scale across multiple workers + +**Consider alternatives for:** +- Simple, single-shot agent requests +- Real-time interactive applications requiring streaming +- Development/prototyping where setup complexity outweighs benefits +- Applications that require immediate responses without any latency + +## Production Considerations + +For production deployments, see the [Temporal production deployment guide](https://docs.temporal.io/production-deployment) for information about: + +- High availability setup +- Monitoring and alerting +- Security configuration +- Performance tuning +- Scaling strategies ## Additional Examples -You can find additional examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). +You can find more comprehensive examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents), including: + +- **Basic Examples**: Simple agent workflows and tool usage +- **Multi-Agent Systems**: Complex handoff patterns and orchestration +- **Advanced Patterns**: State management, error handling, and scaling +- **Real-World Use Cases**: Customer service, data analysis, and workflow automation + +Each example demonstrates different aspects of building production-ready AI agents with durable execution. \ No newline at end of file From fdc74224064ebe3c68d73a1317ada8a4fea793c2 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 22:07:07 -0700 Subject: [PATCH 03/15] formatting --- temporalio/contrib/openai_agents/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index e4c7d4029..d14203fac 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -11,24 +11,24 @@ If your agent is halfway through analyzing data, calling APIs, or having a conve This integration combines [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) with [Temporal's durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) to build agents that never lose their work and handle long-running, asynchronous, and human-in-the-loop workflows with ease. +
+ + ```mermaid block-beta columns 4 - + Agents["Agents"]:1 Tools["Tools"]:1 Guardrails["Guardrails"]:1 Tracing["Tracing"]:1 - - space:4 - - AgentFramework["Agent Framework: OpenAI Agents SDK"]:4 - - space:2 + + AgentFramework["OpenAI Agents SDK"]:4 DurableExecution["Durable Execution: Temporal"]:2 - AIInference["AI Inference: OpenAI Models"]:2 + AIInference["AI Inference: OpenAI and third-party models"]:2 ``` +
## How the Architecture Works From 2b5efc9df0b8cdc69bd96f3597b5e5998b042f6a Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Sat, 26 Jul 2025 22:09:41 -0700 Subject: [PATCH 04/15] remove div --- temporalio/contrib/openai_agents/README.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index d14203fac..93ad3bdcc 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -11,8 +11,6 @@ If your agent is halfway through analyzing data, calling APIs, or having a conve This integration combines [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) with [Temporal's durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) to build agents that never lose their work and handle long-running, asynchronous, and human-in-the-loop workflows with ease. -
- ```mermaid block-beta @@ -24,11 +22,10 @@ block-beta Tracing["Tracing"]:1 AgentFramework["OpenAI Agents SDK"]:4 - + DurableExecution["Durable Execution: Temporal"]:2 AIInference["AI Inference: OpenAI and third-party models"]:2 ``` -
## How the Architecture Works From 285ff8c27721a5bab31a1f846c36142a938e458d Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Mon, 28 Jul 2025 23:58:07 -0600 Subject: [PATCH 05/15] readme updates --- temporalio/contrib/openai_agents/README.md | 615 ++++++++------------- 1 file changed, 223 insertions(+), 392 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 93ad3bdcc..b37962efc 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -1,52 +1,32 @@ # OpenAI Agents SDK Integration for Temporal -⚠️ **Experimental** - This module is not yet stable and may change in the future. +⚠️ **Public Preview** - The interface to this module is subject to change prior to General Availability. We welcome your questions and feedback in the [#python-sdk](https://temporalio.slack.com/archives/CTT84RS0P) Slack channel at [temporalio.slack.com](https://temporalio.slack.com/). -For questions, please join the [#python-sdk](https://temporalio.slack.com/archives/CTT84RS0P) Slack channel at [temporalio.slack.com](https://temporalio.slack.com/). -## Building Crash-Proof AI Agents +## Introduction -The challenge with AI agents is that they can crash and lose all their progress. -If your agent is halfway through analyzing data, calling APIs, or having a conversation, a system failure means starting over from scratch. +This integration combines [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) with [Temporal's durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution). +It allows you to build AI agents that never lose their progress and handle long-running, asynchronous, and human-in-the-loop workflows with ease. -This integration combines [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) with [Temporal's durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution) to build agents that never lose their work and handle long-running, asynchronous, and human-in-the-loop workflows with ease. +Temporal provides a crash-proof system foundation, taking care of the distributed systems challenges inherent to production agentic systems. +OpenAI Agents SDK offers a lightweight yet powerful framework for defining those agents. +The combination lets you build reliable agentic systems quickly. +This document is organized as follows: + - **[Hello World Agent](#hello-world-durable-agent).** Your first durable agent example. + - **[Background Concepts](#core-concepts).** Background on durable execution and AI agents. + - **[Complete Example](#complete-example)** Complete example. + - **Usage Guide.** + - **Agent Patterns.** -```mermaid -block-beta - columns 4 +The [samples repository](https://github.com/temporalio/samples-python/tree/main/openai_agents) contains a number of examples spanning various use cases. - Agents["Agents"]:1 - Tools["Tools"]:1 - Guardrails["Guardrails"]:1 - Tracing["Tracing"]:1 - AgentFramework["OpenAI Agents SDK"]:4 +## Hello World Durable Agent - DurableExecution["Durable Execution: Temporal"]:2 - AIInference["AI Inference: OpenAI and third-party models"]:2 -``` - -## How the Architecture Works - -Understanding the architecture helps you build robust agent systems. Here are the key components: - - -**Temporal Server**: Keeps track of your agent's progress. Every decision, API call, and state change is recorded. If anything crashes, the server knows exactly where to resume. [Setup instructions](https://docs.temporal.io/dev-guide/python/foundations#run-a-temporal-service). - -**Worker Process**: Runs your actual agent code. This can crash, restart, or even run on different machines. The server will send it work and track what gets completed. - -**Temporal Client**: Starts and monitors your agents from your application code. It connects to the server to execute workflows. - -The key insight: If your worker crashes, the server remembers everything and restarts from the exact point of failure. - -## Your First Durable Agent +The code below shows how straightforward it is to wrap an agent wrapped in durable execution. -An agent is an LLM with instructions that can use tools to accomplish tasks. Let's build one that survives crashes. - -This integration requires three files that separate concerns for reliability: - -### File 1: Workflow Definition (`hello_world_workflow.py`) +### File 1: Durable Agent (`hello_world.py`) ```python from temporalio import workflow @@ -56,20 +36,152 @@ from agents import Agent, Runner class HelloWorldAgent: @workflow.run async def run(self, prompt: str) -> str: - # An agent is an LLM with instructions and optional tools agent = Agent( name="Assistant", - instructions="You are a helpful assistant who responds in haikus.", + instructions="You only respond in haikus.", ) - - # Runner executes the agent's conversation loop + result = await Runner.run(agent, input=prompt) return result.final_output ``` -### File 2: Worker Setup (`run_worker.py`) +If you are familiar with Temporal and with Open AI Agents SDK, this code will look very familiar. +We annotate the `HelloWorldAgent` class with `@workflow.defn` to define a workflow, then use the `@workflow.run` annotation to define the entrypoint. + +We use the `Agent` class to define a simple agent, instructing it to always responds with haikus. +Within the workflow, we start the agent using the `Runner`, as is typical, passing through `prompt` as an argument. + +We will [complete this example below](#complete-example). +However, before digging further into the code, it we will share some more background to set the stage. + +## Background Concepts + +We encourage you to form a thorough understanding of AI agents and durable execution with Temporal. +Understanding this will make it easer to design and build durable agents. +If you are well versed in these topics, you may skim this section or skip ahead. + +### AI Agents + +In the OpenAI Agents SDK, an agent is an AI model configured with instructions, tools, MCP servers, guardrails, handoffs, context, and more. + +We describe each of these briefly: + +- *AI model*. An LLM such as OpenAI's GPT, Google's Gemini, or one of many others. +- *Instructions*. Also known as a system prompt, the instructions contain the initial input to the model, which configures it for the job it will do. +- *Tools*. Typically, Python functions that the model may choose to invoke. Tools are functions with text-descriptions that explain their functionality to the model. +- *MCP servers*. Best known for providing tools, MCP offers a pluggable standard for interoperability, including file-like resources, prompt templates, and human approvals. MCP servers may be accessed over the network or run in a local process. +- *Guardrails*. Checks on the input or the output of an agent to ensure compliance or safety. Guardrails may be implemented as regular code or as AI agents. +- *Handoffs*. A handoff occurs when an agent delegates a task to another agent. During a handoff the conversation history remains the same, and passes to a new agent with its own model, instructions, tools. +- *Context*. This is an overloaded term. Here, context refers to a framework object that is shared across tools and other code, but is not passed to the model. + + +Now, let's look at how these pieces can fit together. +In one popular pattern, the model receives user input, then performs reasoning to select a tool to call. +The response from the tool is fed back into the model, which may perform additional tool calls, iterating until the task is complete. + +The diagram below illustrates this flow. + +```text + +-------------------+ + | User Input | + +-------------------+ + | + v + +---------------------+ + | Reasoning (Model) | <--+ + +---------------------+ | + | | + (decides which action) | + v | + +---------------------+ | + | Action | | + | (e.g., use a Tool) | | + +---------------------+ | + | | + v | + +---------------------+ | + | Observation | | + | (Tool Output) | | + +---------------------+ | + | | + +----------------+ + (loop: uses new info to reason + again, until task is complete) +``` + +Even in a simple example like this, there are many places where something can go wrong. +Tools call APIs that are sometimes down and models have rate limits, requiring retries. +The longer the agent runs, the more costly it is to start the job over. +In the next section, we turn to durable execution, which can handle such failures seamlessly. + +### Durable Execution + +In Temporal's durable execution implementation, a program that crashes or encounters an exception while interacting with a model or API will retry until it can successfully complete. + +Temporal relies heavily on a replay mechanism to recover from failures. +As the program makes progress, Temporal saves key inputs and decisions, allowing a re-started program to pick up right where it left off. + +The key to making this work is to separate the applications repeatable (deterministic) and non-repeatable (non-deterministic) parts: + +1. Deterministic pieces, termed *workflows*, execute the same way if re-run with the same inputs. +2. Non-deterministic pieces, termed *activities*, have no limitations—they may perform I/O and any other operations. + +In the AI agent described in the previous section, model and tool calls run in activities, and the control flow linking them together runs in the workflow. + +In more complex examples, the control flow may be described as *agent orchestration*. +Agent orchestration runs within the Temporal workflow, while model calls and any tool calls involving I/O run in activities. + +The diagram below shows the overall architecture of an agentic application in Temporal. +The Temporal Server is responsible to tracking program execution and making sure associated state is preserved reliably. +Temporal Server manages data in encrypted form. +All data processing occurs on the Worker, which runs the workflow and activities. + + +```text + +---------------------+ + | Temporal Server | (Stores workflow state, + +---------------------+ schedules activities, + ^ persists progress) + | + Save state, | Schedule Tasks, + progress, | load state on resume + timeouts | + | ++------------------------------------------------------+ +| Worker | +| +----------------------------------------------+ | +| | Workflow Code | | +| | (Agent Orchestration Loop) | | +| +----------------------------------------------+ | +| | | | | +| v v v | +| +-----------+ +-----------+ +-------------+ | +| | Activity | | Activity | | Activity | | +| | (Tool 1) | | (Tool 2) | | (Model API) | | +| +-----------+ +-----------+ +-------------+ | +| | | | | ++------------------------------------------------------+ + | | | + v v v + [External APIs, services, databases, etc.] +``` + + +See the [Temporal documentation](https://docs.temporal.io/evaluate/understanding-temporal#temporal-application-the-building-blocks) for more information. + + +## Complete Example + +To make the [Hello World durable agent](#hello-world-durable-agent) available in Temporal, we need to create a worker program. +To see it run, we also need a client to launch it. +We show these files below. + + +### File 2: Launch Worker (`run_worker.py`) ```python +# File: run_worker.py + import asyncio from datetime import timedelta @@ -79,8 +191,9 @@ from temporalio.worker import Worker from hello_world_workflow import HelloWorldAgent + async def worker_main(): - # Configure Temporal client with OpenAI Agents integration + # Use the plugin to configure Temporal for use with OpenAI Agents SDK client = await Client.connect( "localhost:7233", plugins=[ @@ -92,28 +205,31 @@ async def worker_main(): ], ) - # Create worker that can execute HelloWorldAgent workflows worker = Worker( client, task_queue="my-task-queue", workflows=[HelloWorldAgent], - activities=[], # Model activities are automatically registered by the plugin ) await worker.run() + if __name__ == "__main__": asyncio.run(worker_main()) ``` -The `OpenAIAgentsPlugin` handles critical setup tasks: -- Configures data serialization for OpenAI agent objects -- Sets up tracing for agent interactions -- Automatically runs LLM calls as Temporal activities (you don't need to register these) -- Manages the runtime needed for agents to work within Temporal workflows +We use the `OpenAIAgentsPlugin` to configure Temporal for use with OpenAI Agents SDK. +The plugin automatically handles several important setup tasks: +- Ensures proper serialization by of Pydantic types +- Propagates context for [OpenAI Agents tracing](https://openai.github.io/openai-agents-python/tracing/). +- Registers an activity for invoking model calls with the Temporal worker. +- Configures OpenAI Agents SDK to run model calls as Temporal activities. + ### File 3: Client Execution (`run_hello_world_workflow.py`) ```python +# File: run_hello_world_workflow.py + import asyncio from temporalio.client import Client @@ -123,17 +239,17 @@ from temporalio.contrib.openai_agents import OpenAIAgentsPlugin from hello_world_workflow import HelloWorldAgent async def main(): - # Connect to Temporal server + # Create client connected to server at the given address client = await Client.connect( "localhost:7233", plugins=[OpenAIAgentsPlugin()], ) - # Execute the agent workflow + # Execute a workflow result = await client.execute_workflow( HelloWorldAgent.run, "Tell me about recursion in programming.", - id="my-workflow-id", # Use unique IDs in production + id="my-workflow-id", task_queue="my-task-queue", id_reuse_policy=WorkflowIDReusePolicy.TERMINATE_IF_RUNNING, ) @@ -143,34 +259,19 @@ if __name__ == "__main__": asyncio.run(main()) ``` -### Running Your Durable Agent - -1. Start the Temporal server: `temporal server start-dev` -2. Start the worker: `python run_worker.py` -3. Execute the agent: `python run_hello_world_workflow.py` +This file is a standard Temporal launch script. +We also configure the client with the `OpenAIAgentsPlugin` to ensure serialization is compatible with the worker. -Try stopping the worker process during execution and restarting it. The agent will resume exactly where it left off. -## Adding Tools That Can Fail and Retry +To run this example, see the detailed instructions in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). -Tools let agents interact with the world - calling APIs, accessing databases, reading files. In our integration, you have two patterns for tools: +## Using Temporal Activities as OpenAI Agents Tools -### Function Tools (Simple, Run in Workflow) +One of the powerful features of this integration is the ability to convert Temporal activities into OpenAI Agents tools using `activity_as_tool`. +This allows your agent to leverage Temporal's durable execution for tool calls. -For simple computations that don't involve external calls: - -```python -from agents import function_tool - -@function_tool -def calculate_tip(bill: float, percentage: float) -> float: - """Calculate tip amount for a bill.""" - return bill * (percentage / 100) -``` - -### Activity Tools (External Calls, Can Retry) - -For anything that might fail - API calls, database queries, file operations: +In the example below, we apply the `@activity.defn` decorator to the `get_weather` function to create a Temporal activity. +We then pass this through the `activity_as_tool` helper function to create an OpenAI Agents tool that is passed to the `Agent`. ```python from dataclasses import dataclass @@ -187,14 +288,8 @@ class Weather: @activity.defn async def get_weather(city: str) -> Weather: - """Get weather data from external API.""" - # This API call can fail and will retry automatically - # In a real implementation, you'd call an actual weather API - return Weather( - city=city, - temperature_range="14-20C", - conditions="Sunny with light wind" - ) + """Get the weather for a given city.""" + return Weather(city=city, temperature_range="14-20C", conditions="Sunny with wind.") @workflow.defn class WeatherAgent: @@ -204,353 +299,89 @@ class WeatherAgent: name="Weather Assistant", instructions="You are a helpful weather agent.", tools=[ - calculate_tip, # Simple function tool openai_agents.workflow.activity_as_tool( get_weather, start_to_close_timeout=timedelta(seconds=10) - ) # Activity tool with retry capability + ) ], ) - result = await Runner.run(agent, input=question) + result = await Runner.run(starting_agent=agent, input=question) return result.final_output ``` -Don't forget to register activity tools in your worker: - -```python -# In your worker file -worker = Worker( - client, - task_queue="my-task-queue", - workflows=[WeatherAgent], - activities=[get_weather], # Register your activities -) -``` - -### When to Use Each Pattern - -- **Function tools**: Math, string processing, simple logic that can't fail -- **Activity tools**: API calls, database queries, file I/O, anything that might need retries - -Try crashing your worker during an API call. When it restarts, Temporal will retry the failed activity automatically. +## Calling Tools Directly -### Configuring Retry Policies - -You can customize how activities retry when they fail: - -```python -from temporalio.common import RetryPolicy -from datetime import timedelta - -@activity.defn -async def unreliable_api_call(data: str) -> str: - """An API call that might fail and need retries.""" - # Simulate an API that fails sometimes - import random - if random.random() < 0.3: # 30% failure rate - raise Exception("API temporarily unavailable") - return f"Processed: {data}" - -# Use it with custom retry policy -agent = Agent( - name="Resilient Agent", - tools=[ - openai_agents.workflow.activity_as_tool( - unreliable_api_call, - start_to_close_timeout=timedelta(seconds=30), - retry_policy=RetryPolicy( - initial_interval=timedelta(seconds=1), - maximum_interval=timedelta(seconds=10), - maximum_attempts=5, - backoff_coefficient=2.0, - ) - ) - ], -) -``` - -This configuration will: -- Retry up to 5 times -- Start with 1-second delays, doubling each time up to 10 seconds -- Handle transient failures automatically while preserving all agent state - -## Conversations That Survive Crashes - -Agents need memory to have multi-turn conversations. Our integration uses `RunConfig` to maintain conversation state that survives any failure. +For simple computations that don't involve external calls you can call the tool directly from the workflow: ```python from temporalio import workflow -from agents import Agent, Runner, RunConfig +from agents import Agent, Runner +from agents import function_tool + +@function_tool +def calculate_circle_area(radius: float) -> float: + """Calculate the area of a circle given its radius.""" + import math + return math.pi * radius ** 2 @workflow.defn -class ConversationAgent: +class MathAssistantAgent: @workflow.run - async def run(self, messages: list[str]) -> str: - # RunConfig maintains conversation state across all interactions - config = RunConfig() - + async def run(self, message: str) -> str: agent = Agent( - name="Customer Support", - instructions="You are a helpful customer support agent. Remember the conversation context.", + name="Math Assistant", + instructions="You are a helpful math assistant. Use the available tools to help with calculations.", + tools=[calculate_circle_area], ) - - # Process each message while maintaining conversation history - result = None - for message in messages: - result = await Runner.run(agent, input=message, run_config=config) - # The conversation history is automatically preserved in config - - return result.final_output if result else "No messages processed" + result = await Runner.run(agent, input=message) + return result.final_output ``` -Usage example: +Note that any tools designed to run in the workflow must respect the workflow execution restrictions, meaning no I/O or non-deterministic operations. +Of course, you can always invoke an activity from the workflow if needed. -```python -# Client code -messages = [ - "Hi, I'm having trouble with my order", - "My order number is 12345", - "I need to change the shipping address" -] - -import uuid - -result = await client.execute_workflow( - ConversationAgent.run, - messages, - id=f"customer-conversation-{uuid.uuid4()}", # Use unique IDs in production - task_queue="my-task-queue", -) -``` -### Multi-Step Conversations with State +## Agent Handoffs -For more complex scenarios where you need to build up conversation state over time: +The OpenAI Agents SDK supports agent handoffs, where one agent transfers control of execution to another agent. +In this example, one Temporal workflow wraps the multi-agent system: ```python @workflow.defn -class TravelPlanningAgent: - @workflow.run - async def run(self, destination: str, budget: str, interests: list[str]) -> str: - config = RunConfig() - - agent = Agent( - name="Travel Planner", - instructions="You are a travel planning expert. Build comprehensive itineraries.", - tools=[ - openai_agents.workflow.activity_as_tool(search_flights), - openai_agents.workflow.activity_as_tool(find_hotels), - openai_agents.workflow.activity_as_tool(get_attractions), - ] - ) - - # Step 1: Gather initial preferences - result = await Runner.run( - agent, - input=f"I want to plan a trip to {destination} with a budget of {budget}", - run_config=config +class CustomerServiceWorkflow: + def __init__(self): + self.current_agent = self.init_agents() + + def init_agents(self): + faq_agent = Agent( + name="FAQ Agent", + instructions="Answer frequently asked questions", ) - # Step 2: Refine based on interests - result = await Runner.run( - agent, - input=f"My interests include: {', '.join(interests)}. Please find specific recommendations.", - run_config=config + booking_agent = Agent( + name="Booking Agent", + instructions="Help with booking and seat changes", ) - # Step 3: Create final itinerary - result = await Runner.run( - agent, - input="Create a detailed day-by-day itinerary with all the information gathered.", - run_config=config + triage_agent = Agent( + name="Triage Agent", + instructions="Route customers to the right agent", + handoffs=[faq_agent, booking_agent], ) - return result.final_output -``` - -The key insight: All conversation history is automatically preserved, and if anything crashes during this multi-step process, it resumes from the exact step where it failed. - -### Production Workflow IDs - -In the examples above, we use simple workflow IDs like `"my-workflow-id"` for clarity. In production, you need unique IDs to avoid conflicts: - -```python -import uuid -from datetime import datetime - -# Option 1: UUID-based (most common) -workflow_id = f"agent-conversation-{uuid.uuid4()}" - -# Option 2: Include timestamp and context -workflow_id = f"customer-support-{customer_id}-{datetime.now().isoformat()}" - -# Option 3: Business-meaningful IDs -workflow_id = f"travel-planning-{user_id}-{trip_date}" - -result = await client.execute_workflow( - MyAgentWorkflow.run, - "user input", - id=workflow_id, - task_queue="my-task-queue", -) -``` + return triage_agent -Choose IDs that help you identify and debug workflows later. Temporal uses these IDs for deduplication, so the same ID will return the existing workflow result if it's already running or completed. - -## Teams of Agents Working Together - -Complex tasks often need specialist agents working together. One agent can hand off work to another that's better suited for specific tasks. - -In Temporal workflows, we need to use factory functions to create agents (this ensures the workflow can recreate identical agents if it needs to recover from a crash): - -```python -from temporalio import workflow -from agents import Agent, Runner - -def create_customer_service_agents(): - """Factory function to create agents for workflow determinism.""" - - faq_agent = Agent( - name="FAQ Agent", - instructions="""You answer frequently asked questions about our company. - If you can't answer a question, transfer to the triage agent.""", - handoff_description="Handles frequently asked questions", - ) - - billing_agent = Agent( - name="Billing Agent", - instructions="""You help customers with billing issues, payment problems, and account questions. - If the issue is not billing-related, transfer to the triage agent.""", - handoff_description="Handles billing and payment issues", - ) - - technical_agent = Agent( - name="Technical Support", - instructions="""You help customers with technical problems and troubleshooting. - If the issue is not technical, transfer to the triage agent.""", - handoff_description="Handles technical support issues", - ) - - triage_agent = Agent( - name="Triage Agent", - instructions="""You are the main customer service agent. Analyze customer requests and - transfer them to the appropriate specialist agent. Always be helpful and professional.""", - handoffs=[faq_agent, billing_agent, technical_agent], - ) - - # Set up reverse handoffs so specialists can return to triage - faq_agent.handoffs = [triage_agent] - billing_agent.handoffs = [triage_agent] - technical_agent.handoffs = [triage_agent] - - return triage_agent - -@workflow.defn -class CustomerServiceWorkflow: @workflow.run - async def run(self, customer_message: str, customer_context: dict = None) -> str: - # Create fresh agents for this workflow execution - starting_agent = create_customer_service_agents() - - # Process the customer request + async def run(self, customer_message: str) -> str: result = await Runner.run( - agent=starting_agent, + starting_agent=self.current_agent, input=customer_message, - context=customer_context or {} - ) - - return result.final_output -``` - -### Agent Context Sharing - -Agents can share context information across handoffs: - -```python -from pydantic import BaseModel - -class CustomerContext(BaseModel): - customer_id: str = None - order_number: str = None - issue_type: str = None - priority: str = "normal" - -@workflow.defn -class AdvancedCustomerService: - @workflow.run - async def run(self, message: str, customer_id: str) -> str: - # Initialize shared context - context = CustomerContext(customer_id=customer_id) - - starting_agent = create_customer_service_agents() - - result = await Runner.run( - agent=starting_agent, - input=message, - context=context + context=self.context, ) - return result.final_output ``` -Try crashing the worker during a handoff between agents. When it restarts, the conversation continues with the correct specialist agent, and all context is preserved. - -## Understanding the Trade-offs - -This integration gives you powerful capabilities, but it's important to understand what you gain and what you trade off. - -### What You Gain - -**Complete crash-proof execution**: Your agents will finish their work no matter what fails. Process crashes, network issues, server restarts - none of these affect your agent's ability to complete its task. - -**Automatic retries**: Failed API calls, database timeouts, and other transient errors are automatically retried with configurable backoff strategies. - -**Perfect observability**: Every decision your agent makes is recorded. You can see the complete history of what happened, when, and why. - -**Horizontal scaling**: Run multiple worker processes across different machines. Temporal automatically distributes the work. - -**State management**: Conversation history, context, and agent state are automatically preserved and managed. - -### Current Limitations - -**No real-time streaming**: At this point in time, streaming operations like `Runner.run_streamed()` are not supported. Agents complete their full response before returning results. This limitation may be addressed in future versions. - -**No interactive input during execution**: You can't prompt for user input in the middle of a workflow. Design your workflows to take all necessary input as parameters upfront. - -**Slightly more complex setup**: Instead of a single script, you need three files (workflow, worker, client) and a running Temporal server. - -### When to Use This Integration - -**Ideal for:** -- Long-running agent workflows that can't afford to lose progress -- Production systems requiring high reliability -- Multi-step agent processes with external API calls -- Systems needing audit trails and observability -- Applications that need to scale across multiple workers - -**Consider alternatives for:** -- Simple, single-shot agent requests -- Real-time interactive applications requiring streaming -- Development/prototyping where setup complexity outweighs benefits -- Applications that require immediate responses without any latency - -## Production Considerations - -For production deployments, see the [Temporal production deployment guide](https://docs.temporal.io/production-deployment) for information about: - -- High availability setup -- Monitoring and alerting -- Security configuration -- Performance tuning -- Scaling strategies ## Additional Examples -You can find more comprehensive examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents), including: - -- **Basic Examples**: Simple agent workflows and tool usage -- **Multi-Agent Systems**: Complex handoff patterns and orchestration -- **Advanced Patterns**: State management, error handling, and scaling -- **Real-World Use Cases**: Customer service, data analysis, and workflow automation - -Each example demonstrates different aspects of building production-ready AI agents with durable execution. \ No newline at end of file +You can find additional examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). From 878d04da70ead84b642814b404e9600d4ce3816b Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 07:52:31 -0600 Subject: [PATCH 06/15] wip --- temporalio/contrib/openai_agents/README.md | 66 ++++++++++++---------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index b37962efc..96b53332a 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -1,30 +1,32 @@ # OpenAI Agents SDK Integration for Temporal -⚠️ **Public Preview** - The interface to this module is subject to change prior to General Availability. We welcome your questions and feedback in the [#python-sdk](https://temporalio.slack.com/archives/CTT84RS0P) Slack channel at [temporalio.slack.com](https://temporalio.slack.com/). +⚠️ **Public Preview** - The interface to this module is subject to change prior to General Availability. +We welcome questions and feedback in the [#python-sdk](https://temporalio.slack.com/archives/CTT84RS0P) Slack channel at [temporalio.slack.com](https://temporalio.slack.com/). ## Introduction This integration combines [OpenAI Agents SDK](https://github.com/openai/openai-agents-python) with [Temporal's durable execution](https://docs.temporal.io/evaluate/understanding-temporal#durable-execution). -It allows you to build AI agents that never lose their progress and handle long-running, asynchronous, and human-in-the-loop workflows with ease. +It allows you to build durable agents that never lose their progress and handle long-running, asynchronous, and human-in-the-loop workflows with production-grade reliability. +Temporal and OpenAI Agents SDK are complementary technologies, both of which contribute to simplifying what it takes to build highly capable, high-quality AI systems. Temporal provides a crash-proof system foundation, taking care of the distributed systems challenges inherent to production agentic systems. OpenAI Agents SDK offers a lightweight yet powerful framework for defining those agents. -The combination lets you build reliable agentic systems quickly. This document is organized as follows: - **[Hello World Agent](#hello-world-durable-agent).** Your first durable agent example. - **[Background Concepts](#core-concepts).** Background on durable execution and AI agents. - - **[Complete Example](#complete-example)** Complete example. - - **Usage Guide.** - - **Agent Patterns.** + - **[Full Example](#full-example)** Complete example. + - **[Usage Guide].** + [TODO: Complete links] + -The [samples repository](https://github.com/temporalio/samples-python/tree/main/openai_agents) contains a number of examples spanning various use cases. +The [samples repository](https://github.com/temporalio/samples-python/tree/main/openai_agents) contains examples including basic usage, common agent patterns, and more complete samples. ## Hello World Durable Agent -The code below shows how straightforward it is to wrap an agent wrapped in durable execution. +The code below shows how to wrap an agent for durable execution. ### File 1: Durable Agent (`hello_world.py`) @@ -45,20 +47,23 @@ class HelloWorldAgent: return result.final_output ``` +In this example, Temporal provides the durable execution wrapper: the `HelloWorldAgent.run` method. +The content of that method, is regular OpenAI Agents SDK code. + If you are familiar with Temporal and with Open AI Agents SDK, this code will look very familiar. -We annotate the `HelloWorldAgent` class with `@workflow.defn` to define a workflow, then use the `@workflow.run` annotation to define the entrypoint. +We the `@workflow.defn` annotations on the `HelloWorldAgent` indicates that this class will contain durable execution and the `@workflow.run` annotation defines the entrypoint. +We use the `Agent` class from OpenAI Agents SDK to define a simple agent, instructing it to always responds with haikus. +We then run that agent, using the `Runner` class from OpenAI Agents SDK, passing through `prompt` as an argument. -We use the `Agent` class to define a simple agent, instructing it to always responds with haikus. -Within the workflow, we start the agent using the `Runner`, as is typical, passing through `prompt` as an argument. -We will [complete this example below](#complete-example). -However, before digging further into the code, it we will share some more background to set the stage. +We will [complete this example below](#full-example). +Before digging further into the code, we will review some background that will make it easier to understand. ## Background Concepts -We encourage you to form a thorough understanding of AI agents and durable execution with Temporal. -Understanding this will make it easer to design and build durable agents. -If you are well versed in these topics, you may skim this section or skip ahead. +We encourage you to review this section thoroughly to gain a solid understanding of AI agents and durable execution with Temporal. +This knowledge will make it easier to design and build durable agents. +If you are already well versed in these topics, feel free to skim this section or skip ahead. ### AI Agents @@ -74,10 +79,9 @@ We describe each of these briefly: - *Handoffs*. A handoff occurs when an agent delegates a task to another agent. During a handoff the conversation history remains the same, and passes to a new agent with its own model, instructions, tools. - *Context*. This is an overloaded term. Here, context refers to a framework object that is shared across tools and other code, but is not passed to the model. - -Now, let's look at how these pieces can fit together. -In one popular pattern, the model receives user input, then performs reasoning to select a tool to call. -The response from the tool is fed back into the model, which may perform additional tool calls, iterating until the task is complete. +Now, let's see how these components work together. +In a common pattern, the model first receives user input and then reasons about which tool to invoke. +The tool's response is passed back to the model, which may call additional tools, repeating this loop until the task is complete. The diagram below illustrates this flow. @@ -109,27 +113,31 @@ The diagram below illustrates this flow. again, until task is complete) ``` -Even in a simple example like this, there are many places where something can go wrong. -Tools call APIs that are sometimes down and models have rate limits, requiring retries. +Even in a simple example like this, there are many places where things can go wrong. +Tools call APIs that sometimes fail, while models can encounter rate limits, requiring retries. The longer the agent runs, the more costly it is to start the job over. -In the next section, we turn to durable execution, which can handle such failures seamlessly. +We next describe durable execution, which handles such failures seamlessly. ### Durable Execution In Temporal's durable execution implementation, a program that crashes or encounters an exception while interacting with a model or API will retry until it can successfully complete. -Temporal relies heavily on a replay mechanism to recover from failures. +Temporal relies primarily on a replay mechanism to recover from failures. As the program makes progress, Temporal saves key inputs and decisions, allowing a re-started program to pick up right where it left off. The key to making this work is to separate the applications repeatable (deterministic) and non-repeatable (non-deterministic) parts: -1. Deterministic pieces, termed *workflows*, execute the same way if re-run with the same inputs. -2. Non-deterministic pieces, termed *activities*, have no limitations—they may perform I/O and any other operations. +1. Deterministic pieces, termed *workflows*, execute the same way when re-run with the same inputs. +2. Non-deterministic pieces, termed *activities*, can run arbitrary code, performing I/O and any other operations. + +Workflow code can run for extended periods and, if interrupted, resume exactly where it left off. +Activity code faces no restrictions on I/O or external interactions, but if it fails part-way through it restarts from the beginning. -In the AI agent described in the previous section, model and tool calls run in activities, and the control flow linking them together runs in the workflow. +In the AI-agent example above, model invocations and tool calls run inside activities, while the logic that coordinates them lives in the workflow. +This pattern generalizes to more sophisticated agents. +We refer to that coordinating logic as *agent orchestration*. -In more complex examples, the control flow may be described as *agent orchestration*. -Agent orchestration runs within the Temporal workflow, while model calls and any tool calls involving I/O run in activities. +As a general rule, agent orchestration code executes within the Temporal workflow, whereas model calls and any I/O-bound tool invocations execute as Temporal activities. The diagram below shows the overall architecture of an agentic application in Temporal. The Temporal Server is responsible to tracking program execution and making sure associated state is preserved reliably. From 9eb2b4a1255423784ce0416f1fe0e84bdcf52e89 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 08:10:44 -0600 Subject: [PATCH 07/15] wip --- temporalio/contrib/openai_agents/README.md | 66 +++++----------------- 1 file changed, 15 insertions(+), 51 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 96b53332a..e2fedc3d4 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -17,8 +17,8 @@ This document is organized as follows: - **[Hello World Agent](#hello-world-durable-agent).** Your first durable agent example. - **[Background Concepts](#core-concepts).** Background on durable execution and AI agents. - **[Full Example](#full-example)** Complete example. - - **[Usage Guide].** - [TODO: Complete links] + - **[Tool Calling](#tool-calling).** + - **[Feature Support](#feature-support).** The [samples repository](https://github.com/temporalio/samples-python/tree/main/openai_agents) contains examples including basic usage, common agent patterns, and more complete samples. @@ -140,9 +140,8 @@ We refer to that coordinating logic as *agent orchestration*. As a general rule, agent orchestration code executes within the Temporal workflow, whereas model calls and any I/O-bound tool invocations execute as Temporal activities. The diagram below shows the overall architecture of an agentic application in Temporal. -The Temporal Server is responsible to tracking program execution and making sure associated state is preserved reliably. -Temporal Server manages data in encrypted form. -All data processing occurs on the Worker, which runs the workflow and activities. +The Temporal Server is responsible to tracking program execution and making sure associated state is preserved reliably (i.e., stored to a database, possibly replicated across cloud regions). +Temporal Server manages data in encrypted form, so all data processing occurs on the Worker, which runs the workflow and activities. ```text @@ -180,7 +179,7 @@ See the [Temporal documentation](https://docs.temporal.io/evaluate/understanding ## Complete Example -To make the [Hello World durable agent](#hello-world-durable-agent) available in Temporal, we need to create a worker program. +To make the [Hello World durable agent](#hello-world-durable-agent) shown earlier available in Temporal, we need to create a worker program. To see it run, we also need a client to launch it. We show these files below. @@ -273,9 +272,11 @@ We also configure the client with the `OpenAIAgentsPlugin` to ensure serializati To run this example, see the detailed instructions in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). -## Using Temporal Activities as OpenAI Agents Tools +## Tool Calling -One of the powerful features of this integration is the ability to convert Temporal activities into OpenAI Agents tools using `activity_as_tool`. +### Temporal Activities as OpenAI Agents Tools + +One of the powerful features of this integration is the ability to convert Temporal activities into agent tools using `activity_as_tool`. This allows your agent to leverage Temporal's durable execution for tool calls. In the example below, we apply the `@activity.defn` decorator to the `get_weather` function to create a Temporal activity. @@ -317,9 +318,9 @@ class WeatherAgent: return result.final_output ``` -## Calling Tools Directly +### Calling OpenAI Agents Tools inside Temporal Workflows -For simple computations that don't involve external calls you can call the tool directly from the workflow: +For simple computations that don't involve external calls you can call the tool directly from the workflow by using the standard OpenAI Agents SDK `@functiontool` annotation. ```python from temporalio import workflow @@ -345,51 +346,14 @@ class MathAssistantAgent: return result.final_output ``` -Note that any tools designed to run in the workflow must respect the workflow execution restrictions, meaning no I/O or non-deterministic operations. -Of course, you can always invoke an activity from the workflow if needed. +Note that any tools that run in the workflow must respect the workflow execution restrictions, meaning no I/O or non-deterministic operations. +Such function tools are, however, regular Temporal workflow code, from which you can always invoke an activity if needed. -## Agent Handoffs +You can find additional examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). -The OpenAI Agents SDK supports agent handoffs, where one agent transfers control of execution to another agent. -In this example, one Temporal workflow wraps the multi-agent system: -```python -@workflow.defn -class CustomerServiceWorkflow: - def __init__(self): - self.current_agent = self.init_agents() - - def init_agents(self): - faq_agent = Agent( - name="FAQ Agent", - instructions="Answer frequently asked questions", - ) - - booking_agent = Agent( - name="Booking Agent", - instructions="Help with booking and seat changes", - ) - - triage_agent = Agent( - name="Triage Agent", - instructions="Route customers to the right agent", - handoffs=[faq_agent, booking_agent], - ) - - return triage_agent - - @workflow.run - async def run(self, customer_message: str) -> str: - result = await Runner.run( - starting_agent=self.current_agent, - input=customer_message, - context=self.context, - ) - return result.final_output -``` +## Feature Support -## Additional Examples -You can find additional examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). From f9ff6b03aa63bc82ede012030f1857b57d1aeca6 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 08:40:07 -0600 Subject: [PATCH 08/15] wip --- temporalio/contrib/openai_agents/README.md | 109 +++++++++++++++++++-- 1 file changed, 101 insertions(+), 8 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index e2fedc3d4..a5740115b 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -14,12 +14,11 @@ Temporal provides a crash-proof system foundation, taking care of the distribute OpenAI Agents SDK offers a lightweight yet powerful framework for defining those agents. This document is organized as follows: - - **[Hello World Agent](#hello-world-durable-agent).** Your first durable agent example. + - **[Hello World Durable Agent](#hello-world-durable-agent).** Your first durable agent example. - **[Background Concepts](#core-concepts).** Background on durable execution and AI agents. - - **[Full Example](#full-example)** Complete example. - - **[Tool Calling](#tool-calling).** - - **[Feature Support](#feature-support).** - + - **[Full Example](#full-example)** Running the Hello World Durable Agent example. + - **[Tool Calling](#tool-calling).** Calling agent Tools in Temporal. + - **[Feature Support](#feature-support).** Compatibility matrix. The [samples repository](https://github.com/temporalio/samples-python/tree/main/openai_agents) contains examples including basic usage, common agent patterns, and more complete samples. @@ -347,13 +346,107 @@ class MathAssistantAgent: ``` Note that any tools that run in the workflow must respect the workflow execution restrictions, meaning no I/O or non-deterministic operations. +Of course, code running in the workflow can invoke a Temporal activity at any time. -Such function tools are, however, regular Temporal workflow code, from which you can always invoke an activity if needed. - -You can find additional examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). +Tools that run in the workflow can also update OpenAI Agents context, which is read-only for tools run as Temporal activities. ## Feature Support +This integration is presently subject to certain limitations. +Streaming and voice agents are not supported. +Certain tools are not suitable for a distributed computing environment, so these have been disabled as well. + +### Model Providers + +| Model Provider | Supported | +|----------------|-----------| +| OpenAI | Yes | +| LiteLLM | Yes | + + +### Model Response format + +| Model Response | Supported | +| --- | --- | +| Get Response | Yes | +| Streaming | No | + + +# Tools + +## Tool Type + +Tools that are not suited to a distributed setting are disabled + +| Tool Type | Supported | +| --- | --- | +| FunctionTool | Yes | +| LocalShellTool | No | +| WebSearchTool | Yes | +| FileSearchTool | Yes | +| HostedMCPTool | Yes | +| ImageGenerationTool | Yes | +| CodeInterpreterTool | Yes | +| ComputerTool | No | + +## Tool Context + +| Context Propagation | Supported | +| --- | --- | +| Activity Tool receives copy of context | Yes | +| Activity Tool can update context | No | +| Function Tool received context | Yes | +| Function Tool can update context | Yes | + +# MCP + +Presently, MCP is supported only via `HostedMCPTool`. +| MCP Class | Supported | +| --- | --- | +| MCPServerStdio | No | +| MCPServerSse | No | +| MCPServerStreamableHttp | No | + +# Guardrails + +| Guardrail Type | Supported | +| --- | --- | +| Code | Yes | +| Agent | Yes | + +# Sessions + +SQLite storage is not suited to a distributed environment. + +| Feature | Supported | +| --- | --- | --- | +| SQLiteSession | No | + +# Tracing + +| Tracing Provider | Supported | Notes | +| --- | --- | --- | +| OpenAI platform | Yes | | + +Generating a comprehensive list of 3rd party tracing providers is out of scope. + +### Voice + +| Mode | Supported | +| --- | --- | +| Voice agents (pipelines) | No | +| Realtime agents | No | + +## Utilities + +| Utility | Supported | +| --- | --- | +| REPL | No | + + +## Additional Examples + +You can find additional examples in the [Temporal Python Samples Repository](https://github.com/temporalio/samples-python/tree/main/openai_agents). From a424941fb309f6aa8332b1dd7f34a7412e3108f5 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 08:41:33 -0600 Subject: [PATCH 09/15] wip --- temporalio/contrib/openai_agents/README.md | 25 +++++++++++----------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index a5740115b..375f67489 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -373,9 +373,9 @@ Certain tools are not suitable for a distributed computing environment, so these | Streaming | No | -# Tools +### Tools -## Tool Type +#### Tool Type Tools that are not suited to a distributed setting are disabled @@ -390,7 +390,7 @@ Tools that are not suited to a distributed setting are disabled | CodeInterpreterTool | Yes | | ComputerTool | No | -## Tool Context +#### Tool Context | Context Propagation | Supported | | --- | --- | @@ -399,7 +399,7 @@ Tools that are not suited to a distributed setting are disabled | Function Tool received context | Yes | | Function Tool can update context | Yes | -# MCP +### MCP Presently, MCP is supported only via `HostedMCPTool`. @@ -409,14 +409,14 @@ Presently, MCP is supported only via `HostedMCPTool`. | MCPServerSse | No | | MCPServerStreamableHttp | No | -# Guardrails +### Guardrails | Guardrail Type | Supported | | --- | --- | | Code | Yes | | Agent | Yes | -# Sessions +### Sessions SQLite storage is not suited to a distributed environment. @@ -424,26 +424,25 @@ SQLite storage is not suited to a distributed environment. | --- | --- | --- | | SQLiteSession | No | -# Tracing +### Tracing | Tracing Provider | Supported | Notes | | --- | --- | --- | | OpenAI platform | Yes | | -Generating a comprehensive list of 3rd party tracing providers is out of scope. -### Voice +### Voice | Mode | Supported | | --- | --- | | Voice agents (pipelines) | No | | Realtime agents | No | -## Utilities +### Utilities -| Utility | Supported | -| --- | --- | -| REPL | No | +| Utility | Supported | Notes | +| --- | --- | --- | +| REPL | No | Not applicable in distributed setting | ## Additional Examples From a5f9403d57fd4ce7ed993dd874e37779b957c73d Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 08:46:21 -0600 Subject: [PATCH 10/15] wip --- temporalio/contrib/openai_agents/README.md | 24 ++++++++++++++-------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 375f67489..4078e8dae 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -367,6 +367,8 @@ Certain tools are not suitable for a distributed computing environment, so these ### Model Response format +This integration does not presently support streaming. + | Model Response | Supported | | --- | --- | | Get Response | Yes | @@ -377,7 +379,7 @@ Certain tools are not suitable for a distributed computing environment, so these #### Tool Type -Tools that are not suited to a distributed setting are disabled +`LocalShellTool` and `ComputerTool` are not suited to a distributed computing setting. | Tool Type | Supported | | --- | --- | @@ -392,6 +394,8 @@ Tools that are not suited to a distributed setting are disabled #### Tool Context +As described in [Tool Calling](#tool-calling), context propagation is read-only when Temporal activities are used as tools. + | Context Propagation | Supported | | --- | --- | | Activity Tool receives copy of context | Yes | @@ -401,7 +405,7 @@ Tools that are not suited to a distributed setting are disabled ### MCP -Presently, MCP is supported only via `HostedMCPTool`. +Presently, MCP is supported only via `HostedMCPTool`, which is implemented from within OpenAI. | MCP Class | Supported | | --- | --- | @@ -421,14 +425,14 @@ Presently, MCP is supported only via `HostedMCPTool`. SQLite storage is not suited to a distributed environment. | Feature | Supported | -| --- | --- | --- | +| --- | --- | | SQLiteSession | No | ### Tracing -| Tracing Provider | Supported | Notes | -| --- | --- | --- | -| OpenAI platform | Yes | | +| Tracing Provider | Supported | +| --- | --- | +| OpenAI platform | Yes | ### Voice @@ -440,9 +444,11 @@ SQLite storage is not suited to a distributed environment. ### Utilities -| Utility | Supported | Notes | -| --- | --- | --- | -| REPL | No | Not applicable in distributed setting | +The REPL utility is not suited to a distributed setting. + +| Utility | Supported | +| --- | --- | +| REPL | No | ## Additional Examples From cf4f750804861020b6aea1044983918d6f8ccb08 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 08:47:51 -0600 Subject: [PATCH 11/15] wip --- temporalio/contrib/openai_agents/README.md | 85 +++++++++++----------- 1 file changed, 41 insertions(+), 44 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 4078e8dae..b3d6f098f 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -360,20 +360,18 @@ Certain tools are not suitable for a distributed computing environment, so these ### Model Providers | Model Provider | Supported | -|----------------|-----------| -| OpenAI | Yes | -| LiteLLM | Yes | - +|:--------------|:---------:| +| OpenAI | Yes | +| LiteLLM | Yes | ### Model Response format This integration does not presently support streaming. | Model Response | Supported | -| --- | --- | -| Get Response | Yes | -| Streaming | No | - +|:--------------|:---------:| +| Get Response | Yes | +| Streaming | No | ### Tools @@ -381,74 +379,73 @@ This integration does not presently support streaming. `LocalShellTool` and `ComputerTool` are not suited to a distributed computing setting. -| Tool Type | Supported | -| --- | --- | -| FunctionTool | Yes | -| LocalShellTool | No | -| WebSearchTool | Yes | -| FileSearchTool | Yes | -| HostedMCPTool | Yes | -| ImageGenerationTool | Yes | -| CodeInterpreterTool | Yes | -| ComputerTool | No | +| Tool Type | Supported | +|:-------------------|:---------:| +| FunctionTool | Yes | +| LocalShellTool | No | +| WebSearchTool | Yes | +| FileSearchTool | Yes | +| HostedMCPTool | Yes | +| ImageGenerationTool | Yes | +| CodeInterpreterTool | Yes | +| ComputerTool | No | #### Tool Context As described in [Tool Calling](#tool-calling), context propagation is read-only when Temporal activities are used as tools. -| Context Propagation | Supported | -| --- | --- | -| Activity Tool receives copy of context | Yes | -| Activity Tool can update context | No | -| Function Tool received context | Yes | -| Function Tool can update context | Yes | +| Context Propagation | Supported | +|:----------------------------------------|:---------:| +| Activity Tool receives copy of context | Yes | +| Activity Tool can update context | No | +| Function Tool received context | Yes | +| Function Tool can update context | Yes | ### MCP Presently, MCP is supported only via `HostedMCPTool`, which is implemented from within OpenAI. -| MCP Class | Supported | -| --- | --- | -| MCPServerStdio | No | -| MCPServerSse | No | -| MCPServerStreamableHttp | No | +| MCP Class | Supported | +|:-----------------------|:---------:| +| MCPServerStdio | No | +| MCPServerSse | No | +| MCPServerStreamableHttp| No | ### Guardrails | Guardrail Type | Supported | -| --- | --- | -| Code | Yes | -| Agent | Yes | +|:---------------|:---------:| +| Code | Yes | +| Agent | Yes | ### Sessions SQLite storage is not suited to a distributed environment. -| Feature | Supported | -| --- | --- | -| SQLiteSession | No | +| Feature | Supported | +|:---------------|:---------:| +| SQLiteSession | No | ### Tracing | Tracing Provider | Supported | -| --- | --- | -| OpenAI platform | Yes | - +|:-----------------|:---------:| +| OpenAI platform | Yes | ### Voice -| Mode | Supported | -| --- | --- | -| Voice agents (pipelines) | No | -| Realtime agents | No | +| Mode | Supported | +|:------------------------|:---------:| +| Voice agents (pipelines)| No | +| Realtime agents | No | ### Utilities The REPL utility is not suited to a distributed setting. | Utility | Supported | -| --- | --- | -| REPL | No | +|:--------|:---------:| +| REPL | No | ## Additional Examples From f431deb7733a274dccfa77447a08b61add04ac95 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 08:49:50 -0600 Subject: [PATCH 12/15] fix merge issues --- temporalio/bridge/Cargo.lock | 246 +++------------------ tests/contrib/openai_agents/test_openai.py | 1 - 2 files changed, 36 insertions(+), 211 deletions(-) diff --git a/temporalio/bridge/Cargo.lock b/temporalio/bridge/Cargo.lock index c10c45275..30864443d 100644 --- a/temporalio/bridge/Cargo.lock +++ b/temporalio/bridge/Cargo.lock @@ -64,28 +64,6 @@ dependencies = [ "derive_arbitrary", ] -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "async-trait" version = "0.1.88" @@ -109,47 +87,20 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" -[[package]] -name = "axum" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" -dependencies = [ - "async-trait", - "axum-core 0.4.5", - "bytes", - "futures-util", - "http", - "http-body", - "http-body-util", - "itoa", - "matchit 0.7.3", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper", - "tower 0.5.2", - "tower-layer", - "tower-service", -] - [[package]] name = "axum" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ - "axum-core 0.5.2", + "axum-core", "bytes", "futures-util", "http", "http-body", "http-body-util", "itoa", - "matchit 0.8.4", + "matchit", "memchr", "mime", "percent-encoding", @@ -157,27 +108,7 @@ dependencies = [ "rustversion", "serde", "sync_wrapper", - "tower 0.5.2", - "tower-layer", - "tower-service", -] - -[[package]] -name = "axum-core" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http", - "http-body", - "http-body-util", - "mime", - "pin-project-lite", - "rustversion", - "sync_wrapper", + "tower", "tower-layer", "tower-service", ] @@ -867,12 +798,6 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" -[[package]] -name = "glob" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" - [[package]] name = "governor" version = "0.8.1" @@ -908,19 +833,13 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.9.0", + "indexmap", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.5" @@ -1187,16 +1106,6 @@ dependencies = [ "icu_properties", ] -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.9.0" @@ -1387,12 +1296,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matchit" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" - [[package]] name = "matchit" version = "0.8.4" @@ -1541,9 +1444,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" -version = "0.29.1" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" dependencies = [ "futures-core", "futures-sink", @@ -1555,25 +1458,23 @@ dependencies = [ [[package]] name = "opentelemetry-http" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" dependencies = [ "async-trait", "bytes", "http", "opentelemetry", "reqwest", - "tracing", ] [[package]] name = "opentelemetry-otlp" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "futures-core", "http", "opentelemetry", "opentelemetry-http", @@ -1583,45 +1484,31 @@ dependencies = [ "reqwest", "thiserror 2.0.12", "tokio", - "tonic 0.12.3", - "tracing", -] - -[[package]] -name = "opentelemetry-prometheus" -version = "0.29.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "098a71a4430bb712be6130ed777335d2e5b19bc8566de5f2edddfce906def6ab" -dependencies = [ - "once_cell", - "opentelemetry", - "opentelemetry_sdk", - "prometheus", + "tonic", "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc" dependencies = [ "opentelemetry", "opentelemetry_sdk", "prost", - "tonic 0.12.3", + "tonic", ] [[package]] name = "opentelemetry_sdk" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" dependencies = [ "futures-channel", "futures-executor", "futures-util", - "glob", "opentelemetry", "percent-encoding", "rand 0.9.1", @@ -1629,7 +1516,6 @@ dependencies = [ "thiserror 2.0.12", "tokio", "tokio-stream", - "tracing", ] [[package]] @@ -1690,7 +1576,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.9.0", + "indexmap", ] [[package]] @@ -2295,7 +2181,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-util", - "tower 0.5.2", + "tower", "tower-http", "tower-service", "url", @@ -2405,15 +2291,6 @@ dependencies = [ "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "rustls-pki-types" version = "1.12.0" @@ -2728,6 +2605,7 @@ dependencies = [ "async-trait", "backoff", "base64", + "bytes", "derive_builder", "derive_more", "futures-retry", @@ -2742,8 +2620,8 @@ dependencies = [ "temporal-sdk-core-protos", "thiserror 2.0.12", "tokio", - "tonic 0.12.3", - "tower 0.5.2", + "tonic", + "tower", "tracing", "url", "uuid", @@ -2767,7 +2645,7 @@ dependencies = [ "temporal-sdk-core-protos", "tokio", "tokio-stream", - "tonic 0.13.1", + "tonic", "tracing", "url", ] @@ -2798,7 +2676,6 @@ dependencies = [ "mockall", "opentelemetry", "opentelemetry-otlp", - "opentelemetry-prometheus", "opentelemetry_sdk", "parking_lot", "pid", @@ -2823,7 +2700,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tonic 0.12.3", + "tonic", "tracing", "tracing-subscriber", "url", @@ -2835,7 +2712,6 @@ dependencies = [ name = "temporal-sdk-core-api" version = "0.1.0" dependencies = [ - "anyhow", "async-trait", "derive_builder", "derive_more", @@ -2847,7 +2723,8 @@ dependencies = [ "temporal-sdk-core-protos", "thiserror 2.0.12", "toml", - "tonic 0.12.3", + "tonic", + "tracing", "tracing-core", "url", ] @@ -2868,7 +2745,7 @@ dependencies = [ "serde", "serde_json", "thiserror 2.0.12", - "tonic 0.12.3", + "tonic", "tonic-build", "uuid", ] @@ -3063,7 +2940,7 @@ version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ - "indexmap 2.9.0", + "indexmap", "serde", "serde_spanned", "toml_datetime", @@ -3077,39 +2954,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" -[[package]] -name = "tonic" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" -dependencies = [ - "async-stream", - "async-trait", - "axum 0.7.9", - "base64", - "bytes", - "h2", - "http", - "http-body", - "http-body-util", - "hyper", - "hyper-timeout", - "hyper-util", - "percent-encoding", - "pin-project", - "prost", - "rustls-native-certs", - "rustls-pemfile", - "socket2", - "tokio", - "tokio-rustls", - "tokio-stream", - "tower 0.4.13", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tonic" version = "0.13.1" @@ -3117,7 +2961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", - "axum 0.8.4", + "axum", "base64", "bytes", "h2", @@ -3130,10 +2974,12 @@ dependencies = [ "percent-encoding", "pin-project", "prost", + "rustls-native-certs", "socket2", "tokio", + "tokio-rustls", "tokio-stream", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -3141,9 +2987,9 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.12.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +checksum = "eac6f67be712d12f0b41328db3137e0d0757645d8904b4cb7d51cd9c2279e847" dependencies = [ "prettyplease", "proc-macro2", @@ -3153,26 +2999,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "indexmap 1.9.3", - "pin-project", - "pin-project-lite", - "rand 0.8.5", - "slab", - "tokio", - "tokio-util", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower" version = "0.5.2" @@ -3181,7 +3007,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.9.0", + "indexmap", "pin-project-lite", "slab", "sync_wrapper", @@ -3205,7 +3031,7 @@ dependencies = [ "http-body", "iri-string", "pin-project-lite", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", ] @@ -3906,7 +3732,7 @@ dependencies = [ "flate2", "getrandom 0.3.3", "hmac", - "indexmap 2.9.0", + "indexmap", "lzma-rs", "memchr", "pbkdf2", diff --git a/tests/contrib/openai_agents/test_openai.py b/tests/contrib/openai_agents/test_openai.py index 8b8848f52..ed5e1ffa4 100644 --- a/tests/contrib/openai_agents/test_openai.py +++ b/tests/contrib/openai_agents/test_openai.py @@ -543,7 +543,6 @@ async def test_tool_workflow(client: Client, use_local_model: bool): @pytest.mark.parametrize("use_local_model", [True, False]) -@pytest.skip(reason="Not running this now") async def test_nexus_tool_workflow( client: Client, env: WorkflowEnvironment, use_local_model: bool ): From fb5549f406fa3ef2462316316cf99cc7fd8117a8 Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 11:00:05 -0600 Subject: [PATCH 13/15] typos --- temporalio/contrib/openai_agents/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index b3d6f098f..139844d6c 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -50,8 +50,8 @@ In this example, Temporal provides the durable execution wrapper: the `HelloWorl The content of that method, is regular OpenAI Agents SDK code. If you are familiar with Temporal and with Open AI Agents SDK, this code will look very familiar. -We the `@workflow.defn` annotations on the `HelloWorldAgent` indicates that this class will contain durable execution and the `@workflow.run` annotation defines the entrypoint. -We use the `Agent` class from OpenAI Agents SDK to define a simple agent, instructing it to always responds with haikus. +The `@workflow.defn` annotation on the `HelloWorldAgent` indicates that this class will contain durable execution logic. The `@workflow.run` annotation defines the entry point. +We use the `Agent` class from OpenAI Agents SDK to define a simple agent, instructing it to always respond with haikus. We then run that agent, using the `Runner` class from OpenAI Agents SDK, passing through `prompt` as an argument. @@ -225,7 +225,7 @@ if __name__ == "__main__": We use the `OpenAIAgentsPlugin` to configure Temporal for use with OpenAI Agents SDK. The plugin automatically handles several important setup tasks: -- Ensures proper serialization by of Pydantic types +- Ensures proper serialization of Pydantic types - Propagates context for [OpenAI Agents tracing](https://openai.github.io/openai-agents-python/tracing/). - Registers an activity for invoking model calls with the Temporal worker. - Configures OpenAI Agents SDK to run model calls as Temporal activities. @@ -441,7 +441,7 @@ SQLite storage is not suited to a distributed environment. ### Utilities -The REPL utility is not suited to a distributed setting. +The REPL utility is not suitable for a distributed setting. | Utility | Supported | |:--------|:---------:| From e973294286367ef9764a4d291b78e014cc563b1f Mon Sep 17 00:00:00 2001 From: Johann Schleier-Smith Date: Tue, 29 Jul 2025 12:10:33 -0600 Subject: [PATCH 14/15] clarify --- temporalio/contrib/openai_agents/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/temporalio/contrib/openai_agents/README.md b/temporalio/contrib/openai_agents/README.md index 139844d6c..d28ceb7b5 100644 --- a/temporalio/contrib/openai_agents/README.md +++ b/temporalio/contrib/openai_agents/README.md @@ -403,7 +403,8 @@ As described in [Tool Calling](#tool-calling), context propagation is read-only ### MCP -Presently, MCP is supported only via `HostedMCPTool`, which is implemented from within OpenAI. +Presently, MCP is supported only via `HostedMCPTool`, which uses the OpenAI Responses API and cloud MCP client behind it. +The OpenAI Agents SDK also supports MCP clients that run in application code, but this integration does not. | MCP Class | Supported | |:-----------------------|:---------:| From 4155b8733a53247ad5256441b4bc12f924225d1c Mon Sep 17 00:00:00 2001 From: Tim Conley Date: Tue, 29 Jul 2025 11:26:14 -0700 Subject: [PATCH 15/15] Revert core change --- temporalio/bridge/sdk-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/temporalio/bridge/sdk-core b/temporalio/bridge/sdk-core index 24a3c23a6..b90202240 160000 --- a/temporalio/bridge/sdk-core +++ b/temporalio/bridge/sdk-core @@ -1 +1 @@ -Subproject commit 24a3c23a6dc8842fddb5be3a52a534b863a01a7c +Subproject commit b90202240e003d05fbfa91846b9a4e0614b04aa7