Skip to content

Commit 8a3b715

Browse files
authored
Merge pull request #454 from Scale3-Labs/development
Release `3.3.23`: Support `google.genai`
2 parents 7789af4 + 55e822b commit 8a3b715

File tree

10 files changed

+209
-5
lines changed

10 files changed

+209
-5
lines changed

src/examples/gemini_example/main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@ async def async_demo():
2020

2121
def basic():
2222
generate()
23-
generate(stream=True, with_tools=True)
23+
# generate(stream=True, with_tools=True)
2424

2525
# image_to_text()
2626
# audio_to_text()
27-
asyncio.run(async_demo())
27+
# asyncio.run(async_demo())
2828

2929

3030
def generate(stream=False, with_tools=False):
3131
model = genai.GenerativeModel(
32-
"gemini-1.5-pro", system_instruction="You are a cat. Your name is Neko."
32+
"gemini-2.0-flash-exp", system_instruction="You are a cat. Your name is Neko."
3333
)
3434

3535
response = model.generate_content(
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .main import generate_content, generate_content_streaming
2+
3+
4+
class GoogleGenaiRunner:
5+
def run(self):
6+
# generate_content()
7+
generate_content_streaming()
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
from google import genai
2+
from dotenv import load_dotenv
3+
import os
4+
from langtrace_python_sdk import langtrace
5+
6+
load_dotenv()
7+
langtrace.init(write_spans_to_console=False)
8+
9+
10+
def generate_content():
11+
# Only run this block for Google AI API
12+
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
13+
response = client.models.generate_content(
14+
model="gemini-2.0-flash-exp", contents="What is your name?"
15+
)
16+
17+
print(response.text)
18+
19+
20+
def generate_content_streaming():
21+
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
22+
response = client.models.generate_content_stream(
23+
model="gemini-2.0-flash-exp", contents="What is your name?"
24+
)
25+
26+
for chunk in response:
27+
pass

src/langtrace_python_sdk/instrumentation/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from .pymongo import PyMongoInstrumentation
2525
from .cerebras import CerebrasInstrumentation
2626
from .milvus import MilvusInstrumentation
27+
from .google_genai import GoogleGenaiInstrumentation
2728

2829
__all__ = [
2930
"AnthropicInstrumentation",
@@ -52,4 +53,5 @@
5253
"AWSBedrockInstrumentation",
5354
"CerebrasInstrumentation",
5455
"MilvusInstrumentation",
56+
"GoogleGenaiInstrumentation",
5557
]
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .instrumentation import GoogleGenaiInstrumentation
2+
3+
__all__ = ["GoogleGenaiInstrumentation"]
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from typing import Collection
2+
from importlib_metadata import version as v
3+
from wrapt import wrap_function_wrapper as _W
4+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5+
from opentelemetry.trace import get_tracer
6+
from .patch import patch_google_genai, patch_google_genai_streaming
7+
8+
9+
class GoogleGenaiInstrumentation(BaseInstrumentor):
10+
def instrumentation_dependencies(self) -> Collection[str]:
11+
return ["google-genai >= 0.1.0", "google-generativeai < 1.0.0"]
12+
13+
def _instrument(self, **kwargs):
14+
trace_provider = kwargs.get("tracer_provider")
15+
tracer = get_tracer(__name__, "", trace_provider)
16+
version = v("google-genai")
17+
18+
_W(
19+
module="google.genai",
20+
name="models.Models.generate_content",
21+
wrapper=patch_google_genai(tracer, version),
22+
)
23+
_W(
24+
module="google.genai",
25+
name="models.Models.generate_content_stream",
26+
wrapper=patch_google_genai_streaming(tracer, version),
27+
)
28+
29+
def _uninstrument(self, **kwargs):
30+
pass
Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
from langtrace_python_sdk.utils.llm import (
2+
get_langtrace_attributes,
3+
get_llm_request_attributes,
4+
set_span_attributes,
5+
set_usage_attributes,
6+
set_span_attribute,
7+
set_event_completion,
8+
)
9+
from langtrace_python_sdk.utils import handle_span_error
10+
11+
from opentelemetry.trace import Tracer, SpanKind
12+
from opentelemetry.sdk.trace import Span
13+
from langtrace.trace_attributes import SpanAttributes
14+
15+
from typing import Iterator
16+
17+
18+
def patch_google_genai(tracer: Tracer, version: str):
19+
def traced_method(wrapped, instance, args, kwargs):
20+
prompt = [
21+
{
22+
"role": "user",
23+
"content": kwargs["contents"],
24+
}
25+
]
26+
span_attributes = {
27+
**get_langtrace_attributes(
28+
service_provider="google_genai", version=version
29+
),
30+
**get_llm_request_attributes(kwargs=kwargs, prompts=prompt),
31+
}
32+
with tracer.start_as_current_span(
33+
name="google.genai.generate_content",
34+
kind=SpanKind.CLIENT,
35+
) as span:
36+
try:
37+
set_span_attributes(span, span_attributes)
38+
response = wrapped(*args, **kwargs)
39+
set_response_attributes(span, response)
40+
return response
41+
except Exception as error:
42+
handle_span_error(span, error)
43+
raise
44+
45+
return traced_method
46+
47+
48+
def patch_google_genai_streaming(tracer: Tracer, version: str):
49+
def traced_method(wrapped, instance, args, kwargs):
50+
prompt = [
51+
{
52+
"role": "user",
53+
"content": kwargs["contents"],
54+
}
55+
]
56+
span_attributes = {
57+
**get_langtrace_attributes(
58+
service_provider="google_genai", version=version
59+
),
60+
**get_llm_request_attributes(kwargs=kwargs, prompts=prompt),
61+
}
62+
with tracer.start_as_current_span(
63+
name="google.genai.generate_content_stream",
64+
kind=SpanKind.CLIENT,
65+
) as span:
66+
set_span_attributes(span, span_attributes)
67+
response = wrapped(*args, **kwargs)
68+
set_streaming_response_attributes(span, response)
69+
return response
70+
71+
return traced_method
72+
73+
74+
def set_streaming_response_attributes(span: Span, response):
75+
accum_completion = ""
76+
for chunk in response:
77+
set_span_attribute(
78+
span,
79+
SpanAttributes.LLM_RESPONSE_MODEL,
80+
chunk.model_version,
81+
)
82+
candidates = chunk.candidates
83+
for candidate in candidates:
84+
set_span_attribute(
85+
span,
86+
SpanAttributes.LLM_RESPONSE_FINISH_REASON,
87+
candidate.finish_reason,
88+
)
89+
90+
accum_completion += candidate.content.parts[0].text
91+
92+
if chunk.usage_metadata:
93+
set_usage_attributes(
94+
span,
95+
{
96+
"input_tokens": chunk.usage_metadata.prompt_token_count,
97+
"output_tokens": chunk.usage_metadata.candidates_token_count,
98+
},
99+
)
100+
set_event_completion(span, [{"role": "assistant", "content": accum_completion}])
101+
102+
103+
def set_response_attributes(span: Span, response):
104+
completions = []
105+
for candidate in response.candidates:
106+
set_span_attribute(
107+
span, SpanAttributes.LLM_RESPONSE_FINISH_REASON, candidate.finish_reason
108+
)
109+
parts = candidate.content.parts
110+
role = candidate.content.role
111+
completion = {
112+
"role": role or "assistant",
113+
"content": [part.text for part in parts],
114+
}
115+
completions.append(completion)
116+
117+
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.model_version)
118+
set_event_completion(span, completions)
119+
if response.usage_metadata:
120+
set_usage_attributes(
121+
span,
122+
{
123+
"input_tokens": response.usage_metadata.prompt_token_count,
124+
"output_tokens": response.usage_metadata.candidates_token_count,
125+
},
126+
)

src/langtrace_python_sdk/langtrace.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
PyMongoInstrumentation,
6969
CerebrasInstrumentation,
7070
MilvusInstrumentation,
71+
GoogleGenaiInstrumentation,
7172
)
7273
from opentelemetry.util.re import parse_env_headers
7374

@@ -301,6 +302,7 @@ def init(
301302
"vertexai": VertexAIInstrumentation(),
302303
"google-cloud-aiplatform": VertexAIInstrumentation(),
303304
"google-generativeai": GeminiInstrumentation(),
305+
"google-genai": GoogleGenaiInstrumentation(),
304306
"mistralai": MistralInstrumentation(),
305307
"boto3": AWSBedrockInstrumentation(),
306308
"autogen": AutogenInstrumentation(),
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "3.3.22"
1+
__version__ = "3.3.23"

src/run_example.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@
2020
"vertexai": False,
2121
"gemini": False,
2222
"mistral": False,
23-
"awsbedrock": True,
23+
"awsbedrock": False,
2424
"cerebras": False,
25+
"google_genai": True,
2526
}
2627

2728
if ENABLED_EXAMPLES["anthropic"]:
@@ -137,3 +138,9 @@
137138

138139
print(Fore.BLUE + "Running Cerebras example" + Fore.RESET)
139140
CerebrasRunner().run()
141+
142+
if ENABLED_EXAMPLES["google_genai"]:
143+
from examples.google_genai_example import GoogleGenaiRunner
144+
145+
print(Fore.BLUE + "Running Google Genai example" + Fore.RESET)
146+
GoogleGenaiRunner().run()

0 commit comments

Comments
 (0)