Skip to content

Commit 510e4b8

Browse files
committed
Merge branch 'main' of github.com:Scale3-Labs/langtrace-python-sdk into release
2 parents 04fd825 + 6737fbc commit 510e4b8

File tree

25 files changed

+721
-242
lines changed

25 files changed

+721
-242
lines changed

.vscode/settings.json

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
1-
21
{
32
"[python]": {
43
"editor.defaultFormatter": "ms-python.black-formatter",
54
},
65
"editor.formatOnSave": true,
6+
"python.testing.pytestArgs": [
7+
"src"
8+
],
9+
"python.testing.unittestEnabled": false,
10+
"python.testing.pytestEnabled": true,
711
}

mypy.ini

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
[mypy]
2+
strict = True
3+
disable_error_code = import-untyped
4+
disallow_untyped_calls = True # Disallow function calls without type annotations
5+
disallow_untyped_defs = True # Disallow defining functions without type annotations
6+
disallow_any_explicit = True # Disallow explicit use of `Any`
7+
disallow_any_generics = True # Disallow generic types without specific type parameters
8+
disallow_incomplete_defs = True # Disallow defining incomplete function signatures
9+
no_implicit_optional = True # Disallow implicitly Optional types
10+
warn_unused_configs = True # Warn about unused configurations
11+
warn_redundant_casts = True # Warn about unnecessary type casts
12+
warn_return_any = True # Warn if a function returns `Any`
13+
warn_unreachable = True # Warn about unreachable code
14+
# Ignore external modules or allow specific imports
15+
follow_imports = skip
16+
ignore_missing_imports = True

src/examples/anthropic_example/completion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
_ = load_dotenv(find_dotenv())
99

10-
langtrace.init()
10+
langtrace.init(write_spans_to_console=True)
1111

1212

1313
@with_langtrace_root_span("messages_create")
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from examples.azureopenai_example.completion import chat_completion
2+
from langtrace_python_sdk import with_langtrace_root_span, langtrace
3+
4+
langtrace.init()
5+
6+
class AzureOpenAIRunner:
7+
@with_langtrace_root_span("AzureOpenAI")
8+
def run(self):
9+
chat_completion()
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
import os
2+
from langchain_openai import AzureChatOpenAI
3+
4+
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
5+
6+
model = AzureChatOpenAI(
7+
azure_endpoint=os.environ['AZURE_OPENAI_ENDPOINT'],
8+
azure_deployment=os.environ['AZURE_OPENAI_DEPLOYMENT_NAME'],
9+
openai_api_version=os.environ['AZURE_OPENAI_API_VERSION'],
10+
)
11+
12+
@with_langtrace_root_span()
13+
def chat_completion():
14+
messages = [
15+
(
16+
"system",
17+
"You are a helpful assistant that translates English to French. Translate the user sentence.",
18+
),
19+
("human", "I love programming."),
20+
]
21+
result = model.invoke(messages)
22+
print(result)

src/examples/langchain_example/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from langtrace_python_sdk import with_langtrace_root_span
33

44
from .groq_example import groq_basic, groq_streaming
5+
from .langgraph_example_tools import basic_graph_tools
56

67

78
class LangChainRunner:
@@ -10,6 +11,7 @@ def run(self):
1011
basic_app()
1112
rag()
1213
load_and_split()
14+
basic_graph_tools()
1315

1416

1517
class GroqRunner:
Lines changed: 151 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,151 @@
1+
from typing import Annotated
2+
3+
from langchain_anthropic import ChatAnthropic
4+
from langchain_core.messages import HumanMessage
5+
from langchain_core.pydantic_v1 import BaseModel
6+
from typing_extensions import TypedDict
7+
from langchain_core.pydantic_v1 import BaseModel, Field
8+
from langchain_core.tools import Tool
9+
from langgraph.checkpoint.memory import MemorySaver
10+
from langgraph.graph import StateGraph
11+
from langgraph.graph.message import add_messages
12+
from langgraph.prebuilt import ToolNode, tools_condition
13+
from langchain_core.messages import AIMessage, ToolMessage
14+
15+
from langtrace_python_sdk import langtrace
16+
17+
langtrace.init()
18+
19+
primes = {998: 7901, 999: 7907, 1000: 7919}
20+
21+
22+
class PrimeInput(BaseModel):
23+
n: int = Field()
24+
25+
26+
def is_prime(n: int) -> bool:
27+
if n <= 1 or (n % 2 == 0 and n > 2):
28+
return False
29+
for i in range(3, int(n**0.5) + 1, 2):
30+
if n % i == 0:
31+
return False
32+
return True
33+
34+
35+
def get_prime(n: int, primes: dict = primes) -> str:
36+
return str(primes.get(int(n)))
37+
38+
39+
async def aget_prime(n: int, primes: dict = primes) -> str:
40+
return str(primes.get(int(n)))
41+
42+
43+
class State(TypedDict):
44+
messages: Annotated[list, add_messages]
45+
# This flag is new
46+
ask_human: bool
47+
48+
49+
class RequestAssistance(BaseModel):
50+
"""Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions.
51+
52+
To use this function, relay the user's 'request' so the expert can provide the right guidance.
53+
"""
54+
55+
request: str
56+
57+
58+
llm = ChatAnthropic(model="claude-3-haiku-20240307")
59+
# We can bind the llm to a tool definition, a pydantic model, or a json schema
60+
llm_with_tools = llm.bind_tools([RequestAssistance])
61+
tools = [
62+
Tool(
63+
name="GetPrime",
64+
func=get_prime,
65+
description="A tool that returns the `n`th prime number",
66+
args_schema=PrimeInput,
67+
coroutine=aget_prime,
68+
),
69+
]
70+
71+
72+
def chatbot(state: State):
73+
response = llm_with_tools.invoke(state["messages"])
74+
ask_human = False
75+
if (
76+
response.tool_calls
77+
and response.tool_calls[0]["name"] == RequestAssistance.__name__
78+
):
79+
ask_human = True
80+
return {"messages": [response], "ask_human": ask_human}
81+
82+
83+
graph_builder = StateGraph(State)
84+
85+
graph_builder.add_node("chatbot", chatbot)
86+
graph_builder.add_node("tools", ToolNode(tools=tools))
87+
88+
89+
def create_response(response: str, ai_message: AIMessage):
90+
return ToolMessage(
91+
content=response,
92+
tool_call_id=ai_message.tool_calls[0]["id"],
93+
)
94+
95+
96+
def human_node(state: State):
97+
new_messages = []
98+
if not isinstance(state["messages"][-1], ToolMessage):
99+
# Typically, the user will have updated the state during the interrupt.
100+
# If they choose not to, we will include a placeholder ToolMessage to
101+
# let the LLM continue.
102+
new_messages.append(
103+
create_response("No response from human.", state["messages"][-1])
104+
)
105+
return {
106+
# Append the new messages
107+
"messages": new_messages,
108+
# Unset the flag
109+
"ask_human": False,
110+
}
111+
112+
113+
def select_next_node(state: State):
114+
if state["ask_human"]:
115+
return "human"
116+
# Otherwise, we can route as before
117+
return tools_condition(state)
118+
119+
120+
def basic_graph_tools():
121+
graph_builder.add_node("human", human_node)
122+
graph_builder.add_conditional_edges(
123+
"chatbot",
124+
select_next_node,
125+
{"human": "human", "tools": "tools", "__end__": "__end__"},
126+
)
127+
graph_builder.add_edge("tools", "chatbot")
128+
graph_builder.add_edge("human", "chatbot")
129+
graph_builder.set_entry_point("chatbot")
130+
memory = MemorySaver()
131+
graph = graph_builder.compile(
132+
checkpointer=memory,
133+
interrupt_before=["human"],
134+
)
135+
136+
config = {"configurable": {"thread_id": "1"}}
137+
events = graph.stream(
138+
{
139+
"messages": [
140+
(
141+
"user",
142+
"I'm learning LangGraph. Could you do some research on it for me?",
143+
)
144+
]
145+
},
146+
config,
147+
stream_mode="values",
148+
)
149+
for event in events:
150+
if "messages" in event:
151+
event["messages"][-1]

src/examples/openai_example/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,3 +20,4 @@ def run(self):
2020
chat_completion_example()
2121
embeddings_create_example()
2222
function_example()
23+
image_edit()

src/langtrace_python_sdk/instrumentation/anthropic/instrumentation.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,38 +16,39 @@
1616

1717
import importlib.metadata
1818
import logging
19-
from typing import Collection
19+
from typing import Collection, Any
2020

2121
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
22+
from opentelemetry.trace import TracerProvider
2223
from opentelemetry.trace import get_tracer
2324
from wrapt import wrap_function_wrapper
24-
25+
from typing import Any
2526
from langtrace_python_sdk.instrumentation.anthropic.patch import messages_create
2627

2728
logging.basicConfig(level=logging.FATAL)
2829

2930

30-
class AnthropicInstrumentation(BaseInstrumentor):
31+
class AnthropicInstrumentation(BaseInstrumentor): # type: ignore[misc]
3132
"""
32-
The AnthropicInstrumentation class represents the Anthropic instrumentation
33+
The AnthropicInstrumentation class represents the Anthropic instrumentation.
3334
"""
3435

3536
def instrumentation_dependencies(self) -> Collection[str]:
3637
return ["anthropic >= 0.19.1"]
3738

38-
def _instrument(self, **kwargs):
39-
tracer_provider = kwargs.get("tracer_provider")
39+
def _instrument(self, **kwargs: dict[str, Any]) -> None:
40+
tracer_provider: TracerProvider = kwargs.get("tracer_provider") # type: ignore
4041
tracer = get_tracer(__name__, "", tracer_provider)
4142
version = importlib.metadata.version("anthropic")
4243

4344
wrap_function_wrapper(
4445
"anthropic.resources.messages",
4546
"Messages.create",
46-
messages_create("anthropic.messages.create", version, tracer),
47+
messages_create(version, tracer),
4748
)
4849

49-
def _instrument_module(self, module_name):
50+
def _instrument_module(self, module_name: str) -> None:
5051
pass
5152

52-
def _uninstrument(self, **kwargs):
53+
def _uninstrument(self, **kwargs: dict[str, Any]) -> None:
5354
pass

0 commit comments

Comments
 (0)