Skip to content

Commit 31628a7

Browse files
committed
Merge branch 'development' of github.com:Scale3-Labs/langtrace-python-sdk into ali/s3en-2136-instrument-litellm
2 parents b3dbcfa + fc1b186 commit 31628a7

File tree

37 files changed

+1301
-465
lines changed

37 files changed

+1301
-465
lines changed

README.md

Lines changed: 21 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -232,23 +232,27 @@ prompt = get_prompt_from_registry(<Registry ID>, options={"prompt_version": 1, "
232232

233233
Langtrace automatically captures traces from the following vendors:
234234

235-
| Vendor | Type | Typescript SDK | Python SDK |
236-
| ------------ | --------------- | ------------------ | ------------------ |
237-
| OpenAI | LLM | :white_check_mark: | :white_check_mark: |
238-
| Anthropic | LLM | :white_check_mark: | :white_check_mark: |
239-
| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
240-
| Cohere | LLM | :white_check_mark: | :white_check_mark: |
241-
| Groq | LLM | :x: | :white_check_mark: |
242-
| Langchain | Framework | :x: | :white_check_mark: |
243-
| Langgraph | Framework | :x: | :white_check_mark: |
244-
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
245-
| DSPy | Framework | :x: | :white_check_mark: |
246-
| CrewAI | Framework | :x: | :white_check_mark: |
247-
| Ollama | Framework | :x: | :white_check_mark: |
248-
| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
249-
| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
250-
| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
251-
| QDrant | Vector Database | :x: | :white_check_mark: |
235+
| Vendor | Type | Typescript SDK | Python SDK |
236+
| ------------ | --------------- | ------------------ | ------------------------------- |
237+
| OpenAI | LLM | :white_check_mark: | :white_check_mark: |
238+
| Anthropic | LLM | :white_check_mark: | :white_check_mark: |
239+
| Azure OpenAI | LLM | :white_check_mark: | :white_check_mark: |
240+
| Cohere | LLM | :white_check_mark: | :white_check_mark: |
241+
| Groq | LLM | :x: | :white_check_mark: |
242+
| Perplexity | LLM | :white_check_mark: | :white_check_mark: |
243+
| Gemini | LLM | :x: | :white_check_mark: |
244+
| Langchain | Framework | :x: | :white_check_mark: |
245+
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
246+
| Langgraph | Framework | :x: | :white_check_mark: |
247+
| DSPy | Framework | :x: | :white_check_mark: |
248+
| CrewAI | Framework | :x: | :white_check_mark: |
249+
| Ollama | Framework | :x: | :white_check_mark: |
250+
| VertexAI | Framework | :x: | :white_check_mark: |
251+
| Pinecone | Vector Database | :white_check_mark: | :white_check_mark: |
252+
| ChromaDB | Vector Database | :white_check_mark: | :white_check_mark: |
253+
| QDrant | Vector Database | :white_check_mark: | :white_check_mark: |
254+
| Weaviate | Vector Database | :white_check_mark: | :white_check_mark: |
255+
| PGVector | Vector Database | :white_check_mark: | :white_check_mark: (SQLAlchemy) |
252256

253257
---
254258

pyproject.toml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,9 @@ dev = [
5050
"qdrant_client",
5151
"weaviate-client",
5252
"ollama",
53-
"groq"
53+
"groq",
54+
"google-generativeai",
55+
"google-cloud-aiplatform"
5456
]
5557

5658
test = [
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
from .main import basic
2+
3+
4+
class GeminiRunner:
5+
def run(self):
6+
basic()
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
tools = [
2+
{
3+
"function_declarations": [
4+
{
5+
"name": "find_movies",
6+
"description": "find movie titles currently playing in theaters based on any description, genre, title words, etc.",
7+
"parameters": {
8+
"type": "object",
9+
"properties": {
10+
"location": {
11+
"type": "string",
12+
"description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616",
13+
},
14+
"description": {
15+
"type": "string",
16+
"description": "Any kind of description including category or genre, title words, attributes, etc.",
17+
},
18+
},
19+
"required": ["description"],
20+
},
21+
},
22+
{
23+
"name": "find_theaters",
24+
"description": "find theaters based on location and optionally movie title which is currently playing in theaters",
25+
"parameters": {
26+
"type": "object",
27+
"properties": {
28+
"location": {
29+
"type": "string",
30+
"description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616",
31+
},
32+
"movie": {"type": "string", "description": "Any movie title"},
33+
},
34+
"required": ["location"],
35+
},
36+
},
37+
{
38+
"name": "get_showtimes",
39+
"description": "Find the start times for movies playing in a specific theater",
40+
"parameters": {
41+
"type": "object",
42+
"properties": {
43+
"location": {
44+
"type": "string",
45+
"description": "The city and state, e.g. San Francisco, CA or a zip code e.g. 95616",
46+
},
47+
"movie": {"type": "string", "description": "Any movie title"},
48+
"theater": {
49+
"type": "string",
50+
"description": "Name of the theater",
51+
},
52+
"date": {
53+
"type": "string",
54+
"description": "Date for requested showtime",
55+
},
56+
},
57+
"required": ["location", "movie", "theater", "date"],
58+
},
59+
},
60+
]
61+
}
62+
]
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
from langtrace_python_sdk import langtrace
2+
import google.generativeai as genai
3+
from dotenv import load_dotenv
4+
import os
5+
import asyncio
6+
import pathlib
7+
from .function_tools import tools
8+
9+
load_dotenv()
10+
11+
langtrace.init(write_spans_to_console=False, batch=False)
12+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
13+
14+
15+
async def async_demo():
16+
task1 = asyncio.create_task(async_generate())
17+
task2 = asyncio.create_task(async_generate(stream=True))
18+
return await asyncio.gather(task1, task2)
19+
20+
21+
def basic():
22+
generate()
23+
generate(stream=True, with_tools=True)
24+
25+
# image_to_text()
26+
# audio_to_text()
27+
asyncio.run(async_demo())
28+
29+
30+
def generate(stream=False, with_tools=False):
31+
model = genai.GenerativeModel(
32+
"gemini-1.5-pro", system_instruction="You are a cat. Your name is Neko."
33+
)
34+
35+
response = model.generate_content(
36+
"Write a story about a AI and magic",
37+
stream=stream,
38+
tools=tools if with_tools else None,
39+
)
40+
if stream:
41+
for res in response:
42+
if res.text:
43+
print(res.text)
44+
else:
45+
print(response.text)
46+
47+
48+
async def async_generate(stream=False):
49+
model = genai.GenerativeModel(
50+
"gemini-1.5-pro", system_instruction="You are a cat. Your name is Neko."
51+
)
52+
response = await model.generate_content_async(
53+
"Write a story about a AI and magic", stream=stream
54+
)
55+
if stream:
56+
async for chunk in response:
57+
if chunk.text:
58+
print(chunk.text)
59+
else:
60+
print(response.text)
61+
62+
63+
def image_to_text(stream=False):
64+
model = genai.GenerativeModel("gemini-1.5-flash")
65+
image1 = {
66+
"mime_type": "image/jpeg",
67+
"data": pathlib.Path("src/examples/gemini_example/jetpack.jpg").read_bytes(),
68+
}
69+
70+
prompt = "Describe me this picture. What do you see in it."
71+
response = model.generate_content([prompt, image1], stream=stream)
72+
if stream:
73+
for res in response:
74+
print(res.text)
75+
else:
76+
print(response.text)
77+
78+
79+
# def audio_to_text(stream=False):
80+
# model = genai.GenerativeModel("gemini-1.5-flash")
81+
# audio = genai.upload_file(
82+
# pathlib.Path("src/examples/gemini_example/voice_note.mp3")
83+
# )
84+
85+
# prompt = "Summarize this voice recording."
86+
# response = model.generate_content([prompt, audio], stream=stream)
87+
# if stream:
88+
# for res in response:
89+
# print(res.text)
90+
# else:
91+
# print(response.text)

src/examples/openai_example/embeddings_create.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,6 @@ def embeddings_create():
1616
result = client.embeddings.create(
1717
model="text-embedding-ada-002",
1818
input="Once upon a time, there was a pirate.",
19+
encoding_format="float",
1920
)
2021
return result

src/examples/openai_example/images_edit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ def image_edit():
2323

2424
response = client.images.edit(
2525
model="dall-e-2",
26-
image=open("./resources/lounge_flamingo.png", "rb"),
27-
mask=open("./resources/mask.png", "rb"),
26+
image=open("src/examples/openai_example/resources/lounge_flamingo.png", "rb"),
27+
mask=open("src/examples/openai_example/resources/mask.png", "rb"),
2828
prompt="A sunlit indoor lounge area with a pool and duck standing in side with flamingo.",
2929
n=1,
3030
size="1024x1024",
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
from .main import basic
2+
3+
4+
class VertexAIRunner:
5+
def run(self):
6+
basic()

0 commit comments

Comments
 (0)