Skip to content

Commit 20d7022

Browse files
Merge pull request #113 from scaleapi/release-please--branches--main--changes--next
release: 0.4.17
2 parents 7acfc22 + dba5a0b commit 20d7022

File tree

7 files changed

+75
-86
lines changed

7 files changed

+75
-86
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "0.4.16"
2+
".": "0.4.17"
33
}

CHANGELOG.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,15 @@
11
# Changelog
22

3+
## 0.4.17 (2025-09-29)
4+
5+
Full Changelog: [v0.4.16...v0.4.17](https://github.com/scaleapi/agentex-python/compare/v0.4.16...v0.4.17)
6+
7+
### Chores
8+
9+
* **internal:** codegen related update ([2fdc0e7](https://github.com/scaleapi/agentex-python/commit/2fdc0e75ea3874cf896cdbb119b50a4165b2e942))
10+
* **internal:** version bump ([0a59ad4](https://github.com/scaleapi/agentex-python/commit/0a59ad40b55b3577ef2addcea2fe4c0e4f002d49))
11+
* **internal:** version bump ([6174ef1](https://github.com/scaleapi/agentex-python/commit/6174ef1573a539f5e0f57bc625a67da31311afb6))
12+
313
## 0.4.16 (2025-09-16)
414

515
Full Changelog: [v0.4.15...v0.4.16](https://github.com/scaleapi/agentex-python/compare/v0.4.15...v0.4.16)

pyproject.toml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "agentex-sdk"
3-
version = "0.4.16"
3+
version = "0.4.17"
44
description = "The official Python library for the agentex API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"
@@ -86,7 +86,6 @@ dev-dependencies = [
8686
"dirty-equals>=0.6.0",
8787
"importlib-metadata>=6.7.0",
8888
"rich>=13.7.1",
89-
"nest_asyncio==1.6.0",
9089
"pytest-xdist>=3.6.1",
9190
"debugpy>=1.8.15",
9291
]

src/agentex/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "agentex"
4-
__version__ = "0.4.16" # x-release-please-version
4+
__version__ = "0.4.17" # x-release-please-version

src/agentex/lib/core/services/adk/providers/openai.py

Lines changed: 55 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,6 @@ async def run_agent_auto_send(
358358
},
359359
) as span:
360360
heartbeat_if_in_workflow("run agent auto send")
361-
362361
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
363362
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
364363
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
@@ -396,12 +395,9 @@ async def run_agent_auto_send(
396395
result = await Runner.run(
397396
starting_agent=agent, input=input_list, previous_response_id=previous_response_id
398397
)
399-
else:
400-
result = await Runner.run(starting_agent=agent, input=input_list)
401-
402-
if span:
403-
span.output = {
404-
"new_items": [
398+
item.raw_item.model_dump()
399+
if isinstance(item.raw_item, BaseModel)
400+
else item.raw_item
405401
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
406402
for item in result.new_items
407403
],
@@ -431,7 +427,6 @@ async def run_agent_auto_send(
431427

432428
elif item.type == "tool_call_item":
433429
tool_call_item = item.raw_item
434-
435430
# Extract tool call information using the helper method
436431
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
437432
tool_call_map[call_id] = tool_call_item
@@ -557,9 +552,15 @@ async def run_agent_streamed(
557552
) as span:
558553
heartbeat_if_in_workflow("run agent streamed")
559554

560-
async with mcp_server_context(mcp_server_params, mcp_timeout_seconds) as servers:
555+
async with mcp_server_context(
556+
mcp_server_params, mcp_timeout_seconds
557+
) as servers:
561558
tools = [tool.to_oai_function_tool() for tool in tools] if tools else []
562-
handoffs = [Agent(**handoff.model_dump()) for handoff in handoffs] if handoffs else []
559+
handoffs = (
560+
[Agent(**handoff.model_dump()) for handoff in handoffs]
561+
if handoffs
562+
else []
563+
)
563564
agent_kwargs = {
564565
"name": agent_name,
565566
"instructions": agent_instructions,
@@ -572,7 +573,9 @@ async def run_agent_streamed(
572573
"tool_use_behavior": tool_use_behavior,
573574
}
574575
if model_settings is not None:
575-
agent_kwargs["model_settings"] = model_settings.to_oai_model_settings()
576+
agent_kwargs["model_settings"] = (
577+
model_settings.to_oai_model_settings()
578+
)
576579
if input_guardrails is not None:
577580
agent_kwargs["input_guardrails"] = input_guardrails
578581
if output_guardrails is not None:
@@ -600,7 +603,9 @@ async def run_agent_streamed(
600603
if span:
601604
span.output = {
602605
"new_items": [
603-
item.raw_item.model_dump() if isinstance(item.raw_item, BaseModel) else item.raw_item
606+
item.raw_item.model_dump()
607+
if isinstance(item.raw_item, BaseModel)
608+
else item.raw_item
604609
for item in result.new_items
605610
],
606611
"final_output": result.final_output,
@@ -733,7 +738,6 @@ async def run_agent_streamed_auto_send(
733738
if event.type == "run_item_stream_event":
734739
if event.item.type == "tool_call_item":
735740
tool_call_item = event.item.raw_item
736-
737741
# Extract tool call information using the helper method
738742
call_id, tool_name, tool_arguments = self._extract_tool_call_info(tool_call_item)
739743
tool_call_map[call_id] = tool_call_item
@@ -746,10 +750,12 @@ async def run_agent_streamed_auto_send(
746750
)
747751

748752
# Create tool request using streaming context (immediate completion)
749-
async with self.streaming_service.streaming_task_message_context(
750-
task_id=task_id,
751-
initial_content=tool_request_content,
752-
) as streaming_context:
753+
async with (
754+
self.streaming_service.streaming_task_message_context(
755+
task_id=task_id,
756+
initial_content=tool_request_content,
757+
) as streaming_context
758+
):
753759
# The message has already been persisted, but we still need to send an upda
754760
await streaming_context.stream_update(
755761
update=StreamTaskMessageFull(
@@ -775,9 +781,12 @@ async def run_agent_streamed_auto_send(
775781
)
776782

777783
# Create tool response using streaming context (immediate completion)
778-
async with self.streaming_service.streaming_task_message_context(
779-
task_id=task_id, initial_content=tool_response_content
780-
) as streaming_context:
784+
async with (
785+
self.streaming_service.streaming_task_message_context(
786+
task_id=task_id,
787+
initial_content=tool_response_content
788+
) as streaming_context
789+
):
781790
# The message has already been persisted, but we still need to send an update
782791
await streaming_context.stream_update(
783792
update=StreamTaskMessageFull(
@@ -803,10 +812,14 @@ async def run_agent_streamed_auto_send(
803812
),
804813
)
805814
# Open the streaming context
806-
item_id_to_streaming_context[item_id] = await streaming_context.open()
815+
item_id_to_streaming_context[
816+
item_id
817+
] = await streaming_context.open()
807818
unclosed_item_ids.add(item_id)
808819
else:
809-
streaming_context = item_id_to_streaming_context[item_id]
820+
streaming_context = item_id_to_streaming_context[
821+
item_id
822+
]
810823

811824
# Stream the delta through the streaming service
812825
await streaming_context.stream_update(
@@ -836,10 +849,14 @@ async def run_agent_streamed_auto_send(
836849
),
837850
)
838851
# Open the streaming context
839-
item_id_to_streaming_context[item_id] = await streaming_context.open()
852+
item_id_to_streaming_context[
853+
item_id
854+
] = await streaming_context.open()
840855
unclosed_item_ids.add(item_id)
841856
else:
842-
streaming_context = item_id_to_streaming_context[item_id]
857+
streaming_context = item_id_to_streaming_context[
858+
item_id
859+
]
843860

844861
# Stream the summary delta through the streaming service
845862
await streaming_context.stream_update(
@@ -873,10 +890,14 @@ async def run_agent_streamed_auto_send(
873890
),
874891
)
875892
# Open the streaming context
876-
item_id_to_streaming_context[item_id] = await streaming_context.open()
893+
item_id_to_streaming_context[
894+
item_id
895+
] = await streaming_context.open()
877896
unclosed_item_ids.add(item_id)
878897
else:
879-
streaming_context = item_id_to_streaming_context[item_id]
898+
streaming_context = item_id_to_streaming_context[
899+
item_id
900+
]
880901

881902
# Stream the content delta through the streaming service
882903
await streaming_context.stream_update(
@@ -904,7 +925,6 @@ async def run_agent_streamed_auto_send(
904925
# to close the streaming context, but they do!!!
905926
# They output both a ResponseReasoningSummaryTextDoneEvent and a ResponseReasoningSummaryPartDoneEvent
906927
# I have no idea why they do this.
907-
908928
elif isinstance(event.data, ResponseReasoningTextDoneEvent):
909929
# Handle reasoning content text completion
910930
item_id = event.data.item_id
@@ -920,7 +940,9 @@ async def run_agent_streamed_auto_send(
920940

921941
# Finish the streaming context (sends DONE event and updates message)
922942
if item_id in item_id_to_streaming_context:
923-
streaming_context = item_id_to_streaming_context[item_id]
943+
streaming_context = item_id_to_streaming_context[
944+
item_id
945+
]
924946
await streaming_context.close()
925947
if item_id in unclosed_item_ids:
926948
unclosed_item_ids.remove(item_id)
@@ -930,17 +952,17 @@ async def run_agent_streamed_auto_send(
930952
# Create a copy to avoid modifying set during iteration
931953
remaining_items = list(unclosed_item_ids)
932954
for item_id in remaining_items:
933-
if (
934-
item_id in unclosed_item_ids and item_id in item_id_to_streaming_context
935-
): # Check if still unclosed
936-
streaming_context = item_id_to_streaming_context[item_id]
955+
if (item_id in unclosed_item_ids and
956+
item_id in item_id_to_streaming_context): # Check if still unclosed
957+
streaming_context = item_id_to_streaming_context[
958+
item_id
959+
]
937960
await streaming_context.close()
938961
unclosed_item_ids.discard(item_id)
939962

940963
except InputGuardrailTripwireTriggered as e:
941964
# Handle guardrail trigger by sending a rejection message
942965
rejection_message = "I'm sorry, but I cannot process this request due to a guardrail. Please try a different question."
943-
944966
# Try to extract rejection message from the guardrail result
945967
if hasattr(e, "guardrail_result") and hasattr(e.guardrail_result, "output"):
946968
output_info = getattr(e.guardrail_result.output, "output_info", {})
@@ -971,7 +993,6 @@ async def run_agent_streamed_auto_send(
971993
type="full",
972994
),
973995
)
974-
975996
# Re-raise to let the activity handle it
976997
raise
977998

@@ -1009,7 +1030,6 @@ async def run_agent_streamed_auto_send(
10091030
type="full",
10101031
),
10111032
)
1012-
10131033
# Re-raise to let the activity handle it
10141034
raise
10151035

src/agentex/lib/sdk/fastacp/fastacp.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
logger = make_logger(__name__)
2525

26+
2627
class FastACP:
2728
"""Factory for creating FastACP instances
2829

tests/test_client.py

Lines changed: 6 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,10 @@
66
import os
77
import sys
88
import json
9-
import time
109
import asyncio
1110
import inspect
12-
import subprocess
1311
import tracemalloc
1412
from typing import Any, Union, cast
15-
from textwrap import dedent
1613
from unittest import mock
1714
from typing_extensions import Literal
1815

@@ -23,14 +20,17 @@
2320

2421
from agentex import Agentex, AsyncAgentex, APIResponseValidationError
2522
from agentex._types import Omit
23+
from agentex._utils import asyncify
2624
from agentex._models import BaseModel, FinalRequestOptions
2725
from agentex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
2826
from agentex._base_client import (
2927
DEFAULT_TIMEOUT,
3028
HTTPX_DEFAULT_TIMEOUT,
3129
BaseClient,
30+
OtherPlatform,
3231
DefaultHttpxClient,
3332
DefaultAsyncHttpxClient,
33+
get_platform,
3434
make_request_options,
3535
)
3636

@@ -1643,50 +1643,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
16431643

16441644
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
16451645

1646-
def test_get_platform(self) -> None:
1647-
# A previous implementation of asyncify could leave threads unterminated when
1648-
# used with nest_asyncio.
1649-
#
1650-
# Since nest_asyncio.apply() is global and cannot be un-applied, this
1651-
# test is run in a separate process to avoid affecting other tests.
1652-
test_code = dedent("""
1653-
import asyncio
1654-
import nest_asyncio
1655-
import threading
1656-
1657-
from agentex._utils import asyncify
1658-
from agentex._base_client import get_platform
1659-
1660-
async def test_main() -> None:
1661-
result = await asyncify(get_platform)()
1662-
print(result)
1663-
for thread in threading.enumerate():
1664-
print(thread.name)
1665-
1666-
nest_asyncio.apply()
1667-
asyncio.run(test_main())
1668-
""")
1669-
with subprocess.Popen(
1670-
[sys.executable, "-c", test_code],
1671-
text=True,
1672-
) as process:
1673-
timeout = 10 # seconds
1674-
1675-
start_time = time.monotonic()
1676-
while True:
1677-
return_code = process.poll()
1678-
if return_code is not None:
1679-
if return_code != 0:
1680-
raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")
1681-
1682-
# success
1683-
break
1684-
1685-
if time.monotonic() - start_time > timeout:
1686-
process.kill()
1687-
raise AssertionError("calling get_platform using asyncify resulted in a hung process")
1688-
1689-
time.sleep(0.1)
1646+
async def test_get_platform(self) -> None:
1647+
platform = await asyncify(get_platform)()
1648+
assert isinstance(platform, (str, OtherPlatform))
16901649

16911650
async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
16921651
# Test that the proxy environment variables are set correctly

0 commit comments

Comments
 (0)