Skip to content

Commit d0b445c

Browse files
committed
fix(langextract): bridge ContextTraceEmitter so real agent events flow into sink
Problem ------- The merged langextract integration (#1412, PR #1413) wired LangextractSink to get_default_emitter()/ActionEvent only. That pipeline has just two producers in the entire core SDK (RouterAgent token usage, PlanningAgent plan_created), so a typical single-agent run with '--observe langextract' or 'praisonai langextract render <yaml>' produced an empty HTML file. The base agent runtime (chat_mixin, tool_execution, unified_execution_mixin) actually emits rich lifecycle events via ContextTraceEmitter / ContextTraceSinkProtocol in praisonaiagents.trace.context_events. Fix (wrapper-only, zero core changes) ------------------------------------- - observability/langextract.py: add _ContextToActionBridge adapter that forwards ContextEvent -> ActionEvent (agent_start, tool_call_start/end, llm_response->OUTPUT, agent_end). Expose LangextractSink.context_sink(). - cli/app.py: _setup_langextract_observability now also installs a ContextTraceEmitter wired to the bridge. - cli/commands/langextract.py: 'render' command does the same wiring for the duration of the workflow run. - tests: add 3 regression tests covering the bridge and setup path. Incidental fix -------------- agents_generator.py:1112 had a local 'import os' inside an 'if acp/lsp enabled' block that shadowed the module-level import and caused UnboundLocalError at line 1179 for any YAML run with acp/lsp disabled. Removed the redundant local import. Verification ------------ - 19/19 langextract unit tests pass (16 existing + 3 new). - Real agentic e2e: Agent.start() with --observe langextract now produces non-empty trace.html (3036 B) + trace.jsonl (1356 B) with agent_run and final_output extractions. - 'praisonai langextract render simple.yaml' end-to-end now produces render.html and render.jsonl; previously printed 'Trace was not rendered'.
1 parent a6094f5 commit d0b445c

5 files changed

Lines changed: 204 additions & 3 deletions

File tree

src/praisonai/praisonai/agents_generator.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1109,7 +1109,6 @@ def _run_praisonai(self, config, topic, tools_dict):
11091109
if acp_enabled or lsp_enabled:
11101110
try:
11111111
import asyncio
1112-
import os
11131112
from praisonai.cli.features.interactive_runtime import InteractiveRuntime, RuntimeConfig
11141113
from praisonai.cli.features.agent_tools import create_agent_centric_tools
11151114
import nest_asyncio

src/praisonai/praisonai/cli/app.py

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,10 +63,28 @@ def _setup_langextract_observability(*, verbose: bool = False) -> None:
6363
# Ensure sink is closed on exit to write the trace file
6464
atexit.register(sink.close)
6565

66-
# Set up action-level trace emitter
66+
# Set up action-level trace emitter (covers RouterAgent / PlanningAgent)
6767
emitter = TraceEmitter(sink=sink, enabled=True)
6868
set_default_emitter(emitter)
69-
69+
70+
# Bridge the context emitter so regular Agent.start / tool calls / LLM
71+
# responses are captured as well. Without this, typical single-agent
72+
# flows produce an empty trace (no agent_start/end, no tool events).
73+
try:
74+
from praisonaiagents.trace.context_events import (
75+
ContextTraceEmitter,
76+
set_context_emitter,
77+
)
78+
context_emitter = ContextTraceEmitter(
79+
sink=sink.context_sink(),
80+
session_id="praisonai-cli",
81+
enabled=True,
82+
)
83+
set_context_emitter(context_emitter)
84+
except Exception as e: # pragma: no cover - defensive
85+
if verbose:
86+
typer.echo(f"Warning: could not bridge context emitter: {e}", err=True)
87+
7088
except ImportError:
7189
# Gracefully degrade if langextract not installed
7290
if verbose:

src/praisonai/praisonai/cli/commands/langextract.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,22 @@ def render(
7979
# Set up trace emitter for the duration of the run
8080
emitter = TraceEmitter(sink=sink, enabled=True)
8181
set_default_emitter(emitter)
82+
83+
# Also bridge the context emitter so real agent runtime events
84+
# (agent_start/end, tool_call_*, llm_response) are captured.
85+
try:
86+
from praisonaiagents.trace.context_events import (
87+
ContextTraceEmitter,
88+
set_context_emitter,
89+
)
90+
context_emitter = ContextTraceEmitter(
91+
sink=sink.context_sink(),
92+
session_id="praisonai-langextract-render",
93+
enabled=True,
94+
)
95+
set_context_emitter(context_emitter)
96+
except Exception:
97+
pass
8298

8399
try:
84100
# Run the workflow

src/praisonai/praisonai/observability/langextract.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,88 @@
2626
)
2727

2828

29+
class _ContextToActionBridge:
30+
"""
31+
Adapter that implements ``ContextTraceSinkProtocol`` and forwards
32+
``ContextEvent``s to a ``LangextractSink`` as equivalent ``ActionEvent``s.
33+
34+
The base agent runtime (``chat_mixin``, ``tool_execution``,
35+
``unified_execution_mixin``) emits lifecycle events via
36+
``ContextTraceEmitter`` only. This bridge lets the langextract sink
37+
observe those events without touching the core SDK.
38+
"""
39+
40+
__slots__ = ("_sink",)
41+
42+
# Subset of ContextEventType values we care about (strings to avoid
43+
# importing ContextEventType at module load time).
44+
_CTX_AGENT_START = "agent_start"
45+
_CTX_AGENT_END = "agent_end"
46+
_CTX_TOOL_START = "tool_call_start"
47+
_CTX_TOOL_END = "tool_call_end"
48+
_CTX_LLM_RESPONSE = "llm_response"
49+
50+
def __init__(self, sink: "LangextractSink") -> None:
51+
self._sink = sink
52+
53+
def emit(self, event: Any) -> None: # ContextEvent duck-typed
54+
et = getattr(event, "event_type", None)
55+
et_value = et.value if hasattr(et, "value") else et
56+
data = getattr(event, "data", {}) or {}
57+
ts = getattr(event, "timestamp", 0.0)
58+
agent = getattr(event, "agent_name", None)
59+
60+
if et_value == self._CTX_AGENT_START:
61+
self._sink.emit(ActionEvent(
62+
event_type=ActionEventType.AGENT_START.value,
63+
timestamp=ts,
64+
agent_name=agent,
65+
metadata={"input": data.get("input") or data.get("goal") or ""},
66+
))
67+
elif et_value == self._CTX_AGENT_END:
68+
self._sink.emit(ActionEvent(
69+
event_type=ActionEventType.AGENT_END.value,
70+
timestamp=ts,
71+
agent_name=agent,
72+
status="ok",
73+
))
74+
elif et_value == self._CTX_TOOL_START:
75+
self._sink.emit(ActionEvent(
76+
event_type=ActionEventType.TOOL_START.value,
77+
timestamp=ts,
78+
agent_name=agent,
79+
tool_name=data.get("tool_name"),
80+
tool_args=data.get("arguments"),
81+
))
82+
elif et_value == self._CTX_TOOL_END:
83+
self._sink.emit(ActionEvent(
84+
event_type=ActionEventType.TOOL_END.value,
85+
timestamp=ts,
86+
agent_name=agent,
87+
tool_name=data.get("tool_name"),
88+
duration_ms=(data.get("duration_ms") or 0.0),
89+
status=data.get("status") or "ok",
90+
tool_result_summary=str(data.get("result"))[:500] if data.get("result") is not None else None,
91+
))
92+
elif et_value == self._CTX_LLM_RESPONSE:
93+
# Treat LLM response as an OUTPUT event so the final text shows
94+
# up in the rendered HTML.
95+
content = data.get("response_content") or data.get("content") or ""
96+
self._sink.emit(ActionEvent(
97+
event_type=ActionEventType.OUTPUT.value,
98+
timestamp=ts,
99+
agent_name=agent,
100+
tool_result_summary=content,
101+
))
102+
103+
def flush(self) -> None:
104+
pass
105+
106+
def close(self) -> None:
107+
# The owning sink handles render/close — nothing to do here.
108+
pass
109+
110+
29111
@dataclass
30112
class LangextractSinkConfig:
31113
"""Configuration for the langextract trace sink."""
@@ -61,6 +143,17 @@ def __init__(self, config: Optional[LangextractSinkConfig] = None) -> None:
61143
self._source_text: Optional[str] = None
62144
self._closed = False
63145

146+
# ---- Context-emitter bridge -------------------------------------------
147+
148+
def context_sink(self) -> "_ContextToActionBridge":
149+
"""
150+
Return a ``ContextTraceSinkProtocol`` adapter that forwards core
151+
``ContextEvent``s into this sink as ``ActionEvent``s. Use with
152+
``praisonaiagents.trace.context_events.set_context_emitter`` (or
153+
``trace_context``) to capture real agent runtime events.
154+
"""
155+
return _ContextToActionBridge(self)
156+
64157
# ---- TraceSinkProtocol -------------------------------------------------
65158

66159
def emit(self, event: ActionEvent) -> None:

src/praisonai/tests/unit/test_langextract_sink.py

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -342,5 +342,80 @@ def test_observe_invalid_provider_error(self):
342342
)
343343

344344

345+
class TestLangextractContextBridge:
346+
"""Regression tests for the ContextTraceEmitter bridge.
347+
348+
The base agent runtime (chat_mixin, tool_execution, unified_execution_mixin)
349+
emits ``ContextEvent``s only. Without the bridge, a single-agent run
350+
produces zero events in the langextract sink.
351+
"""
352+
353+
def test_context_sink_returns_bridge(self):
354+
from praisonai.observability import LangextractSink
355+
sink = LangextractSink()
356+
bridge = sink.context_sink()
357+
assert hasattr(bridge, "emit")
358+
assert hasattr(bridge, "flush")
359+
assert hasattr(bridge, "close")
360+
361+
def test_bridge_maps_context_events_to_action_events(self):
362+
from praisonai.observability import LangextractSink
363+
from praisonaiagents.trace.context_events import (
364+
ContextEvent,
365+
ContextEventType,
366+
)
367+
368+
sink = LangextractSink()
369+
bridge = sink.context_sink()
370+
371+
bridge.emit(ContextEvent(
372+
event_type=ContextEventType.AGENT_START,
373+
timestamp=1.0, session_id="s",
374+
agent_name="writer",
375+
data={"input": "Write a haiku"},
376+
))
377+
bridge.emit(ContextEvent(
378+
event_type=ContextEventType.TOOL_CALL_START,
379+
timestamp=2.0, session_id="s",
380+
agent_name="writer",
381+
data={"tool_name": "search", "arguments": {"q": "x"}},
382+
))
383+
bridge.emit(ContextEvent(
384+
event_type=ContextEventType.TOOL_CALL_END,
385+
timestamp=3.0, session_id="s",
386+
agent_name="writer",
387+
data={"tool_name": "search", "result": "ok", "duration_ms": 12.0},
388+
))
389+
bridge.emit(ContextEvent(
390+
event_type=ContextEventType.LLM_RESPONSE,
391+
timestamp=4.0, session_id="s",
392+
agent_name="writer",
393+
data={"response_content": "final haiku"},
394+
))
395+
bridge.emit(ContextEvent(
396+
event_type=ContextEventType.AGENT_END,
397+
timestamp=5.0, session_id="s",
398+
agent_name="writer",
399+
data={},
400+
))
401+
402+
types = [e.event_type for e in sink._events]
403+
assert "agent_start" in types
404+
assert "tool_start" in types
405+
assert "tool_end" in types
406+
assert "output" in types
407+
assert "agent_end" in types
408+
assert sink._source_text == "Write a haiku"
409+
410+
def test_setup_observability_registers_context_emitter(self):
411+
"""`--observe langextract` must install the bridge on the context emitter."""
412+
import praisonai.cli.app as cli_app
413+
from praisonaiagents.trace.context_events import get_context_emitter
414+
415+
cli_app._setup_langextract_observability(verbose=False)
416+
emitter = get_context_emitter()
417+
assert emitter.enabled, "context emitter should be enabled after setup"
418+
419+
345420
if __name__ == "__main__":
346421
pytest.main([__file__, "-v"])

0 commit comments

Comments
 (0)