Skip to content

Commit 6737fbc

Browse files
authored
Merge pull request #334 from Scale3-Labs/rohit/S3EN-2778-release-main
Rohit/s3 en 2778 release main
2 parents 0ac2c2e + 76429a2 commit 6737fbc

File tree

10 files changed

+211
-14
lines changed

10 files changed

+211
-14
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from examples.azureopenai_example.completion import chat_completion
2+
from langtrace_python_sdk import with_langtrace_root_span, langtrace
3+
4+
langtrace.init()
5+
6+
class AzureOpenAIRunner:
7+
@with_langtrace_root_span("AzureOpenAI")
8+
def run(self):
9+
chat_completion()
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
import os
2+
from langchain_openai import AzureChatOpenAI
3+
4+
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
5+
6+
model = AzureChatOpenAI(
7+
azure_endpoint=os.environ['AZURE_OPENAI_ENDPOINT'],
8+
azure_deployment=os.environ['AZURE_OPENAI_DEPLOYMENT_NAME'],
9+
openai_api_version=os.environ['AZURE_OPENAI_API_VERSION'],
10+
)
11+
12+
@with_langtrace_root_span()
13+
def chat_completion():
14+
messages = [
15+
(
16+
"system",
17+
"You are a helpful assistant that translates English to French. Translate the user sentence.",
18+
),
19+
("human", "I love programming."),
20+
]
21+
result = model.invoke(messages)
22+
print(result)

src/examples/langchain_example/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from langtrace_python_sdk import with_langtrace_root_span
33

44
from .groq_example import groq_basic, groq_streaming
5+
from .langgraph_example_tools import basic_graph_tools
56

67

78
class LangChainRunner:
@@ -10,6 +11,7 @@ def run(self):
1011
basic_app()
1112
rag()
1213
load_and_split()
14+
basic_graph_tools()
1315

1416

1517
class GroqRunner:
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
from typing import Annotated
2+
3+
from langchain_anthropic import ChatAnthropic
4+
from langchain_core.messages import HumanMessage
5+
from langchain_core.pydantic_v1 import BaseModel
6+
from typing_extensions import TypedDict
7+
from langchain_core.pydantic_v1 import BaseModel, Field
8+
from langchain_core.tools import Tool
9+
from langgraph.checkpoint.memory import MemorySaver
10+
from langgraph.graph import StateGraph
11+
from langgraph.graph.message import add_messages
12+
from langgraph.prebuilt import ToolNode, tools_condition
13+
from langchain_core.messages import AIMessage, ToolMessage
14+
15+
from langtrace_python_sdk import langtrace
16+
17+
langtrace.init()
18+
19+
primes = {998: 7901, 999: 7907, 1000: 7919}
20+
21+
22+
class PrimeInput(BaseModel):
23+
n: int = Field()
24+
25+
26+
def is_prime(n: int) -> bool:
27+
if n <= 1 or (n % 2 == 0 and n > 2):
28+
return False
29+
for i in range(3, int(n**0.5) + 1, 2):
30+
if n % i == 0:
31+
return False
32+
return True
33+
34+
35+
def get_prime(n: int, primes: dict = primes) -> str:
36+
return str(primes.get(int(n)))
37+
38+
39+
async def aget_prime(n: int, primes: dict = primes) -> str:
40+
return str(primes.get(int(n)))
41+
42+
43+
class State(TypedDict):
44+
messages: Annotated[list, add_messages]
45+
# This flag is new
46+
ask_human: bool
47+
48+
49+
class RequestAssistance(BaseModel):
50+
"""Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.
51+
52+
To use this function, relay the user's 'request' so the expert can provide the right guidance.
53+
"""
54+
55+
request: str
56+
57+
58+
llm = ChatAnthropic(model="claude-3-haiku-20240307")
59+
# We can bind the llm to a tool definition, a pydantic model, or a json schema
60+
llm_with_tools = llm.bind_tools([RequestAssistance])
61+
tools = [
62+
Tool(
63+
name="GetPrime",
64+
func=get_prime,
65+
description="A tool that returns the `n`th prime number",
66+
args_schema=PrimeInput,
67+
coroutine=aget_prime,
68+
),
69+
]
70+
71+
72+
def chatbot(state: State):
73+
response = llm_with_tools.invoke(state["messages"])
74+
ask_human = False
75+
if (
76+
response.tool_calls
77+
and response.tool_calls[0]["name"] == RequestAssistance.__name__
78+
):
79+
ask_human = True
80+
return {"messages": [response], "ask_human": ask_human}
81+
82+
83+
graph_builder = StateGraph(State)
84+
85+
graph_builder.add_node("chatbot", chatbot)
86+
graph_builder.add_node("tools", ToolNode(tools=tools))
87+
88+
89+
def create_response(response: str, ai_message: AIMessage):
90+
return ToolMessage(
91+
content=response,
92+
tool_call_id=ai_message.tool_calls[0]["id"],
93+
)
94+
95+
96+
def human_node(state: State):
97+
new_messages = []
98+
if not isinstance(state["messages"][-1], ToolMessage):
99+
# Typically, the user will have updated the state during the interrupt.
100+
# If they choose not to, we will include a placeholder ToolMessage to
101+
# let the LLM continue.
102+
new_messages.append(
103+
create_response("No response from human.", state["messages"][-1])
104+
)
105+
return {
106+
# Append the new messages
107+
"messages": new_messages,
108+
# Unset the flag
109+
"ask_human": False,
110+
}
111+
112+
113+
def select_next_node(state: State):
114+
if state["ask_human"]:
115+
return "human"
116+
# Otherwise, we can route as before
117+
return tools_condition(state)
118+
119+
120+
def basic_graph_tools():
121+
graph_builder.add_node("human", human_node)
122+
graph_builder.add_conditional_edges(
123+
"chatbot",
124+
select_next_node,
125+
{"human": "human", "tools": "tools", "__end__": "__end__"},
126+
)
127+
graph_builder.add_edge("tools", "chatbot")
128+
graph_builder.add_edge("human", "chatbot")
129+
graph_builder.set_entry_point("chatbot")
130+
memory = MemorySaver()
131+
graph = graph_builder.compile(
132+
checkpointer=memory,
133+
interrupt_before=["human"],
134+
)
135+
136+
config = {"configurable": {"thread_id": "1"}}
137+
events = graph.stream(
138+
{
139+
"messages": [
140+
(
141+
"user",
142+
"I'm learning LangGraph. Could you do some research on it for me?",
143+
)
144+
]
145+
},
146+
config,
147+
stream_mode="values",
148+
)
149+
for event in events:
150+
if "messages" in event:
151+
event["messages"][-1]

src/langtrace_python_sdk/instrumentation/anthropic/patch.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
from langtrace.trace_attributes import Event, SpanAttributes, LLMSpanAttributes
1919
from langtrace_python_sdk.utils import set_span_attribute
2020
from langtrace_python_sdk.utils.silently_fail import silently_fail
21+
import json
2122

2223
from langtrace_python_sdk.utils.llm import (
2324
StreamWrapper,
@@ -99,15 +100,20 @@ def set_response_attributes(
99100
set_span_attribute(
100101
span, SpanAttributes.LLM_RESPONSE_MODEL, result.model
101102
)
102-
content_item = result.content[0]
103-
completion = [
104-
{
105-
"role": result.role or "assistant",
106-
"content": content_item.text,
107-
"type": content_item.type,
108-
}
109-
]
110-
set_event_completion(span, completion)
103+
if hasattr(result, "content") and result.content[0] is not None:
104+
content = result.content[0]
105+
typ = content.type
106+
role = result.role if result.role else "assistant"
107+
if typ == "tool_result" or typ == "tool_use":
108+
content = content.json() # type: ignore
109+
set_span_attribute(
110+
span, SpanAttributes.LLM_TOOL_RESULTS, json.dumps(content)
111+
)
112+
if typ == "text":
113+
content = result.content[0].text
114+
set_event_completion(
115+
span, [{"type": typ, role: role, content: content}]
116+
)
111117

112118
if (
113119
hasattr(result, "system_fingerprint")

src/langtrace_python_sdk/instrumentation/anthropic/types.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@ def __init__(
6868

6969
class ContentItem:
7070
role: str
71-
content: str
7271
text: str
7372
type: str
7473

src/langtrace_python_sdk/utils/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
126126
tools = kwargs.get("tools", None)
127127
return {
128128
SpanAttributes.LLM_OPERATION_NAME: operation_name,
129-
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model"),
129+
SpanAttributes.LLM_REQUEST_MODEL: model or kwargs.get("model") or "gpt-3.5-turbo",
130130
SpanAttributes.LLM_IS_STREAMING: kwargs.get("stream"),
131131
SpanAttributes.LLM_REQUEST_TEMPERATURE: kwargs.get("temperature"),
132132
SpanAttributes.LLM_TOP_K: top_k,
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "2.3.5"
1+
__version__ = "2.3.7"

src/run_example.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22

33
ENABLED_EXAMPLES = {
44
"anthropic": False,
5+
"azureopenai": True,
56
"chroma": False,
67
"cohere": False,
78
"fastapi": False,
8-
"langchain": False,
9+
"langchain": True,
910
"llamaindex": False,
1011
"hiveagent": False,
11-
"openai": True,
12+
"openai": False,
1213
"perplexity": False,
1314
"pinecone": False,
1415
"qdrant": False,
@@ -110,3 +111,9 @@
110111

111112
print(Fore.BLUE + "Running Mistral example" + Fore.RESET)
112113
MistralRunner().run()
114+
115+
if ENABLED_EXAMPLES["azureopenai"]:
116+
from examples.azureopenai_example import AzureOpenAIRunner
117+
118+
print(Fore.BLUE + "Running Azure OpenAI example" + Fore.RESET)
119+
AzureOpenAIRunner().run()

src/tests/anthropic/test_anthropic.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ def test_anthropic(anthropic_client, exporter):
2727
"stream": False,
2828
"max_tokens": 1024,
2929
}
30+
3031
anthropic_client.messages.create(**kwargs)
3132
spans = exporter.get_finished_spans()
3233
completion_span = spans[-1]

0 commit comments

Comments
 (0)