Skip to content

Commit 77024fb

Browse files
authored
Support Autogen (#242)
* autogen kickstart * enhance autogen * run autogen * finish autogen * add readme * disable cache
1 parent c9ce384 commit 77024fb

File tree

12 files changed

+321
-28
lines changed

12 files changed

+321
-28
lines changed

README.md

Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -242,30 +242,31 @@ By default, prompt and completion data are captured. If you would like to opt ou
242242

243243
Langtrace automatically captures traces from the following vendors:
244244

245-
| Vendor | Type | Typescript SDK | Python SDK |
246-
| ------------ | --------------- | ------------------ | ------------------------------- |
247-
| OpenAI | LLM | :white_check_mark: | :white_check_mark: |
248-
| Anthropic | LLM | :white_check_mark: | :white_check_mark: |
249-
| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
250-
| Cohere | LLM | :white_check_mark: | :white_check_mark: |
251-
| Groq | LLM | :x: | :white_check_mark: |
252-
| Perplexity | LLM | :white_check_mark: | :white_check_mark: |
253-
| Gemini | LLM | :x: | :white_check_mark: |
254-
| Mistral | LLM | :x: | :white_check_mark: |
255-
| Langchain | Framework | :x: | :white_check_mark: |
256-
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
257-
| Langgraph | Framework | :x: | :white_check_mark: |
258-
| DSPy | Framework | :x: | :white_check_mark: |
259-
| CrewAI | Framework | :x: | :white_check_mark: |
260-
| Ollama | Framework | :x: | :white_check_mark: |
261-
| VertexAI | Framework | :x: | :white_check_mark: |
262-
| Vercel AI SDK| Framework | :white_check_mark: | :x: |
263-
| EmbedChain | Framework | :x: | :white_check_mark: |
264-
| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
265-
| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
266-
| QDrant | Vector Database | :white_check_mark: | :white_check_mark: |
267-
| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
268-
| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) |
245+
| Vendor | Type | Typescript SDK | Python SDK |
246+
| ------------- | --------------- | ------------------ | ------------------------------- |
247+
| OpenAI | LLM | :white_check_mark: | :white_check_mark: |
248+
| Anthropic | LLM | :white_check_mark: | :white_check_mark: |
249+
| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
250+
| Cohere | LLM | :white_check_mark: | :white_check_mark: |
251+
| Groq | LLM | :x: | :white_check_mark: |
252+
| Perplexity | LLM | :white_check_mark: | :white_check_mark: |
253+
| Gemini | LLM | :x: | :white_check_mark: |
254+
| Mistral | LLM | :x: | :white_check_mark: |
255+
| Langchain | Framework | :x: | :white_check_mark: |
256+
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
257+
| Langgraph | Framework | :x: | :white_check_mark: |
258+
| DSPy | Framework | :x: | :white_check_mark: |
259+
| CrewAI | Framework | :x: | :white_check_mark: |
260+
| Ollama | Framework | :x: | :white_check_mark: |
261+
| VertexAI | Framework | :x: | :white_check_mark: |
262+
| Vercel AI SDK | Framework | :white_check_mark: | :x: |
263+
| EmbedChain | Framework | :x: | :white_check_mark: |
264+
| Autogen | Framework | :x: | :white_check_mark: |
265+
| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
266+
| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
267+
| QDrant | Vector Database | :white_check_mark: | :white_check_mark: |
268+
| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
269+
| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) |
269270

270271
---
271272

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from .main import main as autogen_main
2+
from .main import comedy_show
3+
4+
5+
class AutoGenRunner:
6+
def run(self):
7+
# autogen_main()
8+
comedy_show()
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
from langtrace_python_sdk import langtrace
2+
from autogen import ConversableAgent
3+
from dotenv import load_dotenv
4+
from autogen.coding import LocalCommandLineCodeExecutor
5+
import tempfile
6+
7+
8+
load_dotenv()
9+
langtrace.init(write_spans_to_console=False)
10+
# agentops.init(api_key=os.getenv("AGENTOPS_API_KEY"))
11+
# Create a temporary directory to store the code files.
12+
temp_dir = tempfile.TemporaryDirectory()
13+
14+
15+
# Create a local command line code executor.
16+
executor = LocalCommandLineCodeExecutor(
17+
timeout=10, # Timeout for each code execution in seconds.
18+
work_dir=temp_dir.name, # Use the temporary directory to store the code files.
19+
)
20+
21+
22+
def main():
23+
24+
agent = ConversableAgent(
25+
"chatbot",
26+
llm_config={"config_list": [{"model": "gpt-4"}], "cache_seed": None},
27+
code_execution_config=False, # Turn off code execution, by default it is off.
28+
function_map=None, # No registered functions, by default it is None.
29+
human_input_mode="NEVER", # Never ask for human input.
30+
)
31+
32+
reply = agent.generate_reply(
33+
messages=[{"content": "Tell me a joke.", "role": "user"}]
34+
)
35+
return reply
36+
37+
38+
def comedy_show():
39+
cathy = ConversableAgent(
40+
name="cathy",
41+
system_message="Your name is Cathy and you are a part of a duo of comedians.",
42+
llm_config={
43+
"config_list": [{"model": "gpt-4o-mini", "temperature": 0.9}],
44+
"cache_seed": None,
45+
},
46+
description="Cathy is a comedian",
47+
max_consecutive_auto_reply=10,
48+
code_execution_config={
49+
"executor": executor
50+
}, # Use the local command line code executor.
51+
function_map=None,
52+
chat_messages=None,
53+
silent=True,
54+
default_auto_reply="Sorry, I don't know what to say.",
55+
human_input_mode="NEVER", # Never ask for human input.
56+
)
57+
58+
joe = ConversableAgent(
59+
"joe",
60+
system_message="Your name is Joe and you are a part of a duo of comedians.",
61+
llm_config={
62+
"config_list": [{"model": "gpt-4o-mini", "temperature": 0.7}],
63+
"cache_seed": None,
64+
},
65+
human_input_mode="NEVER", # Never ask for human input.
66+
)
67+
68+
result = joe.initiate_chat(
69+
recipient=cathy, message="Cathy, tell me a joke.", max_turns=2
70+
)
71+
72+
return result

src/langtrace_python_sdk/constants/instrumentation/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
"GEMINI": "Gemini",
3232
"MISTRAL": "Mistral",
3333
"EMBEDCHAIN": "Embedchain",
34+
"AUTOGEN": "Autogen",
3435
}
3536

3637
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY = "langtrace_additional_attributes"

src/langtrace_python_sdk/instrumentation/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
from .weaviate import WeaviateInstrumentation
1515
from .ollama import OllamaInstrumentor
1616
from .dspy import DspyInstrumentation
17+
from .autogen import AutogenInstrumentation
1718
from .vertexai import VertexAIInstrumentation
1819
from .gemini import GeminiInstrumentation
1920
from .mistral import MistralInstrumentation
@@ -37,6 +38,7 @@
3738
"WeaviateInstrumentation",
3839
"OllamaInstrumentor",
3940
"DspyInstrumentation",
41+
"AutogenInstrumentation",
4042
"VertexAIInstrumentation",
4143
"GeminiInstrumentation",
4244
"MistralInstrumentation",
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .instrumentation import AutogenInstrumentation
2+
3+
__all__ = ["AutogenInstrumentation"]
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
2+
from opentelemetry.trace import get_tracer
3+
from wrapt import wrap_function_wrapper as _W
4+
from importlib_metadata import version as v
5+
from .patch import patch_generate_reply, patch_initiate_chat
6+
7+
8+
class AutogenInstrumentation(BaseInstrumentor):
9+
def instrumentation_dependencies(self):
10+
return ["autogen >= 0.1.0"]
11+
12+
def _instrument(self, **kwargs):
13+
print("Instrumneting autogen")
14+
tracer_provider = kwargs.get("tracer_provider")
15+
tracer = get_tracer(__name__, "", tracer_provider)
16+
version = v("autogen")
17+
# conversable_agent.intiate_chat
18+
# conversable_agent.register_function
19+
# agent.Agent
20+
# AgentCreation
21+
# Tools --> Register_for_llm, register_for_execution, register_for_function
22+
try:
23+
_W(
24+
module="autogen.agentchat.conversable_agent",
25+
name="ConversableAgent.initiate_chat",
26+
wrapper=patch_initiate_chat(
27+
"conversable_agent.initiate_chat", version, tracer
28+
),
29+
)
30+
31+
_W(
32+
module="autogen.agentchat.conversable_agent",
33+
name="ConversableAgent.generate_reply",
34+
wrapper=patch_generate_reply(
35+
"conversable_agent.generate_reply", version, tracer
36+
),
37+
)
38+
except Exception as e:
39+
pass
40+
41+
def _uninstrument(self, **kwargs):
42+
pass
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
from langtrace_python_sdk.utils.llm import (
2+
get_langtrace_attributes,
3+
get_extra_attributes,
4+
get_span_name,
5+
set_span_attributes,
6+
get_llm_request_attributes,
7+
set_event_completion,
8+
set_usage_attributes,
9+
)
10+
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
11+
from langtrace.trace_attributes import FrameworkSpanAttributes
12+
from opentelemetry.trace.status import Status, StatusCode
13+
from langtrace.trace_attributes import SpanAttributes
14+
from opentelemetry.trace import Tracer, SpanKind
15+
16+
from langtrace_python_sdk.utils import deduce_args_and_kwargs, set_span_attribute
17+
import json
18+
19+
20+
def patch_initiate_chat(name, version, tracer: Tracer):
21+
def traced_method(wrapped, instance, args, kwargs):
22+
all_params = deduce_args_and_kwargs(wrapped, *args, **kwargs)
23+
all_params["recipient"] = json.dumps(parse_agent(all_params.get("recipient")))
24+
span_attributes = {
25+
**get_langtrace_attributes(
26+
service_provider=SERVICE_PROVIDERS["AUTOGEN"],
27+
version=version,
28+
vendor_type="framework",
29+
),
30+
"sender": json.dumps(parse_agent(instance)),
31+
**all_params,
32+
}
33+
attributes = FrameworkSpanAttributes(**span_attributes)
34+
35+
with tracer.start_as_current_span(
36+
name=get_span_name(name), kind=SpanKind.CLIENT
37+
) as span:
38+
try:
39+
set_span_attributes(span, attributes)
40+
result = wrapped(*args, **kwargs)
41+
# set_response_attributes(span, result)
42+
return result
43+
except Exception as err:
44+
# Record the exception in the span
45+
span.record_exception(err)
46+
47+
# Set the span status to indicate an error
48+
span.set_status(Status(StatusCode.ERROR, str(err)))
49+
50+
# Reraise the exception to ensure it's not swallowed
51+
raise
52+
53+
return traced_method
54+
55+
56+
def patch_generate_reply(name, version, tracer: Tracer):
57+
58+
def traced_method(wrapped, instance, args, kwargs):
59+
60+
llm_config = instance.llm_config
61+
kwargs = {
62+
**kwargs,
63+
**llm_config.get("config_list")[0],
64+
}
65+
service_provider = SERVICE_PROVIDERS["AUTOGEN"]
66+
67+
span_attributes = {
68+
**get_langtrace_attributes(
69+
version=version,
70+
service_provider=service_provider,
71+
vendor_type="framework",
72+
),
73+
**get_llm_request_attributes(
74+
kwargs,
75+
prompts=kwargs.get("messages"),
76+
),
77+
**get_extra_attributes(),
78+
}
79+
attributes = FrameworkSpanAttributes(**span_attributes)
80+
81+
with tracer.start_as_current_span(
82+
name=get_span_name(name), kind=SpanKind.CLIENT
83+
) as span:
84+
try:
85+
86+
result = wrapped(*args, **kwargs)
87+
88+
# if caching is disabled, return result as langtrace will instrument the rest.
89+
if "cache_seed" in llm_config and llm_config.get("cache_seed") is None:
90+
return result
91+
92+
set_span_attributes(span, attributes)
93+
set_event_completion(span, [{"role": "assistant", "content": result}])
94+
total_cost, response_model = list(instance.get_total_usage().keys())
95+
set_span_attribute(
96+
span, SpanAttributes.LLM_RESPONSE_MODEL, response_model
97+
)
98+
set_usage_attributes(
99+
span, instance.get_total_usage().get(response_model)
100+
)
101+
102+
return result
103+
104+
except Exception as err:
105+
# Record the exception in the span
106+
span.record_exception(err)
107+
108+
# Set the span status to indicate an error
109+
span.set_status(Status(StatusCode.ERROR, str(err)))
110+
111+
# Reraise the exception to ensure it's not swallowed
112+
raise
113+
114+
return traced_method
115+
116+
117+
def set_response_attributes(span, result):
118+
summary = getattr(result, "summary", None)
119+
if summary:
120+
set_span_attribute(span, "autogen.chat.summary", summary)
121+
122+
123+
def parse_agent(agent):
124+
125+
return {
126+
"name": getattr(agent, "name", None),
127+
"description": getattr(agent, "description", None),
128+
"system_message": str(getattr(agent, "system_message", None)),
129+
"silent": getattr(agent, "silent", None),
130+
"llm_config": str(getattr(agent, "llm_config", None)),
131+
"human_input_mode": getattr(agent, "human_input_mode", None),
132+
}

src/langtrace_python_sdk/langtrace.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
OpenAIInstrumentation,
5454
PineconeInstrumentation,
5555
QdrantInstrumentation,
56+
AutogenInstrumentation,
5657
VertexAIInstrumentation,
5758
WeaviateInstrumentation,
5859
)
@@ -141,6 +142,7 @@ def init(
141142
"google-cloud-aiplatform": VertexAIInstrumentation(),
142143
"google-generativeai": GeminiInstrumentation(),
143144
"mistralai": MistralInstrumentation(),
145+
"autogen": AutogenInstrumentation(),
144146
}
145147

146148
init_instrumentations(disable_instrumentations, all_instrumentations)

src/langtrace_python_sdk/utils/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from .sdk_version_checker import SDKVersionChecker
33
from opentelemetry.trace import Span
44
from langtrace.trace_attributes import SpanAttributes
5+
import inspect
56
import os
67

78

@@ -28,6 +29,19 @@ def set_event_prompt(span: Span, prompt):
2829
)
2930

3031

32+
def deduce_args_and_kwargs(func, *args, **kwargs):
33+
sig = inspect.signature(func)
34+
bound_args = sig.bind(*args, **kwargs)
35+
bound_args.apply_defaults()
36+
37+
all_params = {}
38+
for param_name, param in sig.parameters.items():
39+
if param_name in bound_args.arguments:
40+
all_params[param_name] = bound_args.arguments[param_name]
41+
42+
return all_params
43+
44+
3145
def check_if_sdk_is_outdated():
3246
SDKVersionChecker().check()
3347
return

0 commit comments

Comments
 (0)