Skip to content

Commit 770de74

Browse files
committed
Merge remote-tracking branch 'upstream/main' into peterj/autogenupdate0424
* upstream/main: Remove `name` field from OpenAI Assistant Message (microsoft#6388) Introduce workbench (microsoft#6340) TEST/change gpt4, gpt4o serise to gpt4.1nano (microsoft#6375) update website version (microsoft#6364) Add self-debugging loop to `CodeExecutionAgent` (microsoft#6306) Fix: deserialize model_context in AssistantAgent and SocietyOfMindAgent and CodeExecutorAgent (microsoft#6337) Add azure ai agent (microsoft#6191) Avoid re-registering a message type already registered (microsoft#6354) Added support for exposing GPUs to docker code executor (microsoft#6339) fix: ollama fails when tools use optional args (microsoft#6343) Add an example using autogen-core and FastAPI to create streaming responses (microsoft#6335) FEAT: SelectorGroupChat could using stream inner select_prompt (microsoft#6286) Add experimental notice to canvas (microsoft#6349) DOC: add extentions - autogen-oaiapi and autogen-contextplus (microsoft#6338) fix: ensure serialized messages are passed to LLMStreamStartEvent (microsoft#6344) Generalize Continuous SystemMessage merging via model_info[“multiple_system_messages”] instead of `startswith("gemini-")` (microsoft#6345) Agentchat canvas (microsoft#6215) Signed-off-by: Peter Jausovec <[email protected]>
2 parents 2b09598 + f059262 commit 770de74

File tree

59 files changed

+4664
-166
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+4664
-166
lines changed

.github/ISSUE_TEMPLATE/1-bug_report.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ body:
9090
multiple: false
9191
options:
9292
- "Python dev (main branch)"
93+
- "Python 0.5.4"
9394
- "Python 0.5.3"
9495
- "Python 0.5.2"
9596
- "Python 0.5.1"

.github/workflows/docs.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ jobs:
3333
[
3434
# For main use the workflow target
3535
{ ref: "${{github.ref}}", dest-dir: dev, uv-version: "0.5.13", sphinx-release-override: "dev" },
36-
{ ref: "python-v0.5.3", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
36+
{ ref: "python-v0.5.4", dest-dir: stable, uv-version: "0.5.13", sphinx-release-override: "stable" },
3737
{ ref: "v0.4.0.post1", dest-dir: "0.4.0", uv-version: "0.5.13", sphinx-release-override: "" },
3838
{ ref: "v0.4.1", dest-dir: "0.4.1", uv-version: "0.5.13", sphinx-release-override: "" },
3939
{ ref: "v0.4.2", dest-dir: "0.4.2", uv-version: "0.5.13", sphinx-release-override: "" },
@@ -47,6 +47,7 @@ jobs:
4747
{ ref: "python-v0.5.1", dest-dir: "0.5.1", uv-version: "0.5.13", sphinx-release-override: "" },
4848
{ ref: "python-v0.5.2", dest-dir: "0.5.2", uv-version: "0.5.13", sphinx-release-override: "" },
4949
{ ref: "python-v0.5.3", dest-dir: "0.5.3", uv-version: "0.5.13", sphinx-release-override: "" },
50+
{ ref: "python-v0.5.4", dest-dir: "0.5.4", uv-version: "0.5.13", sphinx-release-override: "" },
5051
]
5152
steps:
5253
- name: Checkout

docs/switcher.json

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,16 @@
55
"url": "/autogen/dev/"
66
},
77
{
8-
"name": "0.5.3 (stable)",
8+
"name": "0.5.4 (stable)",
99
"version": "stable",
1010
"url": "/autogen/stable/",
1111
"preferred": true
1212
},
13+
{
14+
"name": "0.5.3",
15+
"version": "0.5.3",
16+
"url": "/autogen/0.5.3/"
17+
},
1318
{
1419
"name": "0.5.2",
1520
"version": "0.5.2",

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1337,7 +1337,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self:
13371337
model_client=ChatCompletionClient.load_component(config.model_client),
13381338
tools=[BaseTool.load_component(tool) for tool in config.tools] if config.tools else None,
13391339
handoffs=config.handoffs,
1340-
model_context=None,
1340+
model_context=ChatCompletionContext.load_component(config.model_context) if config.model_context else None,
13411341
memory=[Memory.load_component(memory) for memory in config.memory] if config.memory else None,
13421342
description=config.description,
13431343
system_message=config.system_message,

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py

Lines changed: 172 additions & 94 deletions
Large diffs are not rendered by default.

python/packages/autogen-agentchat/src/autogen_agentchat/agents/_society_of_mind_agent.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,7 @@ def _to_config(self) -> SocietyOfMindAgentConfig:
286286
description=self.description,
287287
instruction=self._instruction,
288288
response_prompt=self._response_prompt,
289+
model_context=self._model_context.dump_component(),
289290
)
290291

291292
@classmethod
@@ -299,4 +300,5 @@ def _from_config(cls, config: SocietyOfMindAgentConfig) -> Self:
299300
description=config.description or cls.DEFAULT_DESCRIPTION,
300301
instruction=config.instruction or cls.DEFAULT_INSTRUCTION,
301302
response_prompt=config.response_prompt or cls.DEFAULT_RESPONSE_PROMPT,
303+
model_context=ChatCompletionContext.load_component(config.model_context) if config.model_context else None,
302304
)

python/packages/autogen-agentchat/src/autogen_agentchat/messages.py

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -433,22 +433,33 @@ def to_text(self) -> str:
433433

434434

435435
class CodeGenerationEvent(BaseAgentEvent):
436-
"""An event signaling code generation for execution."""
436+
"""An event signaling code generation event."""
437+
438+
retry_attempt: int
439+
"Retry number, 0 means first generation"
437440

438441
content: str
439442
"The complete content as string."
440443

441-
type: Literal["CodeGenerationEvent"] = "CodeGenerationEvent"
442-
443444
code_blocks: List[CodeBlock]
445+
"List of code blocks present in content"
446+
447+
type: Literal["CodeGenerationEvent"] = "CodeGenerationEvent"
444448

445449
def to_text(self) -> str:
446450
return self.content
447451

448452

449453
class CodeExecutionEvent(BaseAgentEvent):
450-
type: Literal["CodeExecutionEvent"] = "CodeExecutionEvent"
454+
"""An event signaling code execution event."""
455+
456+
retry_attempt: int
457+
"Retry number, 0 means first execution"
458+
451459
result: CodeResult
460+
"Code Execution Result"
461+
462+
type: Literal["CodeExecutionEvent"] = "CodeExecutionEvent"
452463

453464
def to_text(self) -> str:
454465
return self.result.output
@@ -531,6 +542,18 @@ def to_text(self) -> str:
531542
return str(self.content)
532543

533544

545+
class SelectorEvent(BaseAgentEvent):
546+
"""An event emitted from the `SelectorGroupChat`."""
547+
548+
content: str
549+
"""The content of the event."""
550+
551+
type: Literal["SelectorEvent"] = "SelectorEvent"
552+
553+
def to_text(self) -> str:
554+
return str(self.content)
555+
556+
534557
class MessageFactory:
535558
""":meta private:
536559

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,8 @@ def __init__(
7373
for agent in participants:
7474
for message_type in agent.produced_message_types:
7575
try:
76-
if issubclass(message_type, StructuredMessage):
76+
is_registered = self._message_factory.is_registered(message_type) # type: ignore[reportUnknownArgumentType]
77+
if issubclass(message_type, StructuredMessage) and not is_registered:
7778
self._message_factory.register(message_type) # type: ignore[reportUnknownArgumentType]
7879
except TypeError:
7980
# Not a class or not a valid subclassable type (skip)

python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py

Lines changed: 39 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,14 @@
55
from typing import Any, Awaitable, Callable, Dict, List, Mapping, Optional, Sequence, Union, cast
66

77
from autogen_core import AgentRuntime, Component, ComponentModel
8-
from autogen_core.models import AssistantMessage, ChatCompletionClient, ModelFamily, SystemMessage, UserMessage
8+
from autogen_core.models import (
9+
AssistantMessage,
10+
ChatCompletionClient,
11+
CreateResult,
12+
ModelFamily,
13+
SystemMessage,
14+
UserMessage,
15+
)
916
from pydantic import BaseModel
1017
from typing_extensions import Self
1118

@@ -16,6 +23,8 @@
1623
BaseAgentEvent,
1724
BaseChatMessage,
1825
MessageFactory,
26+
ModelClientStreamingChunkEvent,
27+
SelectorEvent,
1928
)
2029
from ...state import SelectorManagerState
2130
from ._base_group_chat import BaseGroupChat
@@ -56,6 +65,7 @@ def __init__(
5665
max_selector_attempts: int,
5766
candidate_func: Optional[CandidateFuncType],
5867
emit_team_events: bool,
68+
model_client_streaming: bool = False,
5969
) -> None:
6070
super().__init__(
6171
name,
@@ -79,6 +89,7 @@ def __init__(
7989
self._max_selector_attempts = max_selector_attempts
8090
self._candidate_func = candidate_func
8191
self._is_candidate_func_async = iscoroutinefunction(self._candidate_func)
92+
self._model_client_streaming = model_client_streaming
8293

8394
async def validate_group_state(self, messages: List[BaseChatMessage] | None) -> None:
8495
pass
@@ -194,7 +205,26 @@ async def _select_speaker(self, roles: str, participants: List[str], history: st
194205
num_attempts = 0
195206
while num_attempts < max_attempts:
196207
num_attempts += 1
197-
response = await self._model_client.create(messages=select_speaker_messages)
208+
if self._model_client_streaming:
209+
chunk: CreateResult | str = ""
210+
async for _chunk in self._model_client.create_stream(messages=select_speaker_messages):
211+
chunk = _chunk
212+
if self._emit_team_events:
213+
if isinstance(chunk, str):
214+
await self._output_message_queue.put(
215+
ModelClientStreamingChunkEvent(content=cast(str, _chunk), source=self._name)
216+
)
217+
else:
218+
assert isinstance(chunk, CreateResult)
219+
assert isinstance(chunk.content, str)
220+
await self._output_message_queue.put(
221+
SelectorEvent(content=chunk.content, source=self._name)
222+
)
223+
# The last chunk must be CreateResult.
224+
assert isinstance(chunk, CreateResult)
225+
response = chunk
226+
else:
227+
response = await self._model_client.create(messages=select_speaker_messages)
198228
assert isinstance(response.content, str)
199229
select_speaker_messages.append(AssistantMessage(content=response.content, source="selector"))
200230
# NOTE: we use all participant names to check for mentions, even if the previous speaker is not allowed.
@@ -281,6 +311,7 @@ class SelectorGroupChatConfig(BaseModel):
281311
# selector_func: ComponentModel | None
282312
max_selector_attempts: int = 3
283313
emit_team_events: bool = False
314+
model_client_streaming: bool = False
284315

285316

286317
class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]):
@@ -311,6 +342,7 @@ class SelectorGroupChat(BaseGroupChat, Component[SelectorGroupChatConfig]):
311342
selection using model. If the function returns an empty list or `None`, `SelectorGroupChat` will raise a `ValueError`.
312343
This function is only used if `selector_func` is not set. The `allow_repeated_speaker` will be ignored if set.
313344
emit_team_events (bool, optional): Whether to emit team events through :meth:`BaseGroupChat.run_stream`. Defaults to False.
345+
model_client_streaming (bool, optional): Whether to use streaming for the model client. (This is useful for reasoning models like QwQ). Defaults to False.
314346
315347
Raises:
316348
ValueError: If the number of participants is less than two or if the selector prompt is invalid.
@@ -453,6 +485,7 @@ def __init__(
453485
candidate_func: Optional[CandidateFuncType] = None,
454486
custom_message_types: List[type[BaseAgentEvent | BaseChatMessage]] | None = None,
455487
emit_team_events: bool = False,
488+
model_client_streaming: bool = False,
456489
):
457490
super().__init__(
458491
participants,
@@ -473,6 +506,7 @@ def __init__(
473506
self._selector_func = selector_func
474507
self._max_selector_attempts = max_selector_attempts
475508
self._candidate_func = candidate_func
509+
self._model_client_streaming = model_client_streaming
476510

477511
def _create_group_chat_manager_factory(
478512
self,
@@ -505,6 +539,7 @@ def _create_group_chat_manager_factory(
505539
self._max_selector_attempts,
506540
self._candidate_func,
507541
self._emit_team_events,
542+
self._model_client_streaming,
508543
)
509544

510545
def _to_config(self) -> SelectorGroupChatConfig:
@@ -518,6 +553,7 @@ def _to_config(self) -> SelectorGroupChatConfig:
518553
max_selector_attempts=self._max_selector_attempts,
519554
# selector_func=self._selector_func.dump_component() if self._selector_func else None,
520555
emit_team_events=self._emit_team_events,
556+
model_client_streaming=self._model_client_streaming,
521557
)
522558

523559
@classmethod
@@ -536,4 +572,5 @@ def _from_config(cls, config: SelectorGroupChatConfig) -> Self:
536572
# if config.selector_func
537573
# else None,
538574
emit_team_events=config.emit_team_events,
575+
model_client_streaming=config.model_client_streaming,
539576
)
Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
import pytest
2+
from autogen_agentchat.agents import (
3+
AssistantAgent,
4+
CodeExecutorAgent,
5+
SocietyOfMindAgent,
6+
)
7+
from autogen_agentchat.teams import RoundRobinGroupChat
8+
from autogen_core.model_context import (
9+
BufferedChatCompletionContext,
10+
ChatCompletionContext,
11+
HeadAndTailChatCompletionContext,
12+
TokenLimitedChatCompletionContext,
13+
UnboundedChatCompletionContext,
14+
)
15+
from autogen_ext.code_executors.local import LocalCommandLineCodeExecutor
16+
from autogen_ext.models.replay import ReplayChatCompletionClient
17+
18+
19+
@pytest.mark.parametrize(
20+
"model_context_class",
21+
[
22+
UnboundedChatCompletionContext(),
23+
BufferedChatCompletionContext(buffer_size=5),
24+
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
25+
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
26+
],
27+
)
28+
def test_serialize_and_deserialize_model_context_on_assistant_agent(model_context_class: ChatCompletionContext) -> None:
29+
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
30+
agent = AssistantAgent(
31+
name="assistant",
32+
model_client=ReplayChatCompletionClient([]),
33+
description="An assistant agent.",
34+
model_context=model_context_class,
35+
)
36+
37+
# Serialize the agent
38+
serialized_agent = agent.dump_component()
39+
# Deserialize the agent
40+
deserialized_agent = AssistantAgent.load_component(serialized_agent)
41+
42+
# Check that the deserialized agent has the same model context as the original agent
43+
original_model_context = agent.model_context
44+
deserialized_model_context = deserialized_agent.model_context
45+
46+
assert isinstance(original_model_context, type(deserialized_model_context))
47+
assert isinstance(deserialized_model_context, type(original_model_context))
48+
assert original_model_context.dump_component() == deserialized_model_context.dump_component()
49+
50+
51+
@pytest.mark.parametrize(
52+
"model_context_class",
53+
[
54+
UnboundedChatCompletionContext(),
55+
BufferedChatCompletionContext(buffer_size=5),
56+
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
57+
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
58+
],
59+
)
60+
def test_serialize_and_deserialize_model_context_on_society_of_mind_agent(
61+
model_context_class: ChatCompletionContext,
62+
) -> None:
63+
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
64+
agent1 = AssistantAgent(
65+
name="assistant1", model_client=ReplayChatCompletionClient([]), description="An assistant agent."
66+
)
67+
agent2 = AssistantAgent(
68+
name="assistant2", model_client=ReplayChatCompletionClient([]), description="An assistant agent."
69+
)
70+
team = RoundRobinGroupChat(
71+
participants=[agent1, agent2],
72+
)
73+
agent = SocietyOfMindAgent(
74+
name="assistant",
75+
model_client=ReplayChatCompletionClient([]),
76+
description="An assistant agent.",
77+
team=team,
78+
model_context=model_context_class,
79+
)
80+
81+
# Serialize the agent
82+
serialized_agent = agent.dump_component()
83+
# Deserialize the agent
84+
deserialized_agent = SocietyOfMindAgent.load_component(serialized_agent)
85+
86+
# Check that the deserialized agent has the same model context as the original agent
87+
original_model_context = agent.model_context
88+
deserialized_model_context = deserialized_agent.model_context
89+
90+
assert isinstance(original_model_context, type(deserialized_model_context))
91+
assert isinstance(deserialized_model_context, type(original_model_context))
92+
assert original_model_context.dump_component() == deserialized_model_context.dump_component()
93+
94+
95+
@pytest.mark.parametrize(
96+
"model_context_class",
97+
[
98+
UnboundedChatCompletionContext(),
99+
BufferedChatCompletionContext(buffer_size=5),
100+
TokenLimitedChatCompletionContext(model_client=ReplayChatCompletionClient([]), token_limit=5),
101+
HeadAndTailChatCompletionContext(head_size=3, tail_size=2),
102+
],
103+
)
104+
def test_serialize_and_deserialize_model_context_on_code_executor_agent(
105+
model_context_class: ChatCompletionContext,
106+
) -> None:
107+
"""Test the serialization and deserialization of the message context on the AssistantAgent."""
108+
agent = CodeExecutorAgent(
109+
name="assistant",
110+
code_executor=LocalCommandLineCodeExecutor(),
111+
description="An assistant agent.",
112+
model_context=model_context_class,
113+
)
114+
115+
# Serialize the agent
116+
serialized_agent = agent.dump_component()
117+
# Deserialize the agent
118+
deserialized_agent = CodeExecutorAgent.load_component(serialized_agent)
119+
120+
# Check that the deserialized agent has the same model context as the original agent
121+
original_model_context = agent.model_context
122+
deserialized_model_context = deserialized_agent.model_context
123+
124+
assert isinstance(original_model_context, type(deserialized_model_context))
125+
assert isinstance(deserialized_model_context, type(original_model_context))
126+
assert original_model_context.dump_component() == deserialized_model_context.dump_component()

0 commit comments

Comments
 (0)