Skip to content

Commit 03cea4e

Browse files
committed
Support google.genai
1 parent 1ed6af1 commit 03cea4e

File tree

10 files changed

+212
-5
lines changed

10 files changed

+212
-5
lines changed

src/examples/gemini_example/main.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@ async def async_demo():
2020

2121
def basic():
2222
generate()
23-
generate(stream=True, with_tools=True)
23+
# generate(stream=True, with_tools=True)
2424

2525
# image_to_text()
2626
# audio_to_text()
27-
asyncio.run(async_demo())
27+
# asyncio.run(async_demo())
2828

2929

3030
def generate(stream=False, with_tools=False):
3131
model = genai.GenerativeModel(
32-
"gemini-1.5-pro", system_instruction="You are a cat. Your name is Neko."
32+
"gemini-2.0-flash-exp", system_instruction="You are a cat. Your name is Neko."
3333
)
3434

3535
response = model.generate_content(
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
from .main import generate_content, generate_content_streaming
2+
3+
4+
class GoogleGenaiRunner:
5+
def run(self):
6+
# generate_content()
7+
generate_content_streaming()
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
from google import genai
2+
from dotenv import load_dotenv
3+
import os
4+
from langtrace_python_sdk import langtrace
5+
6+
load_dotenv()
7+
langtrace.init(write_spans_to_console=False)
8+
9+
10+
def generate_content():
11+
# Only run this block for Google AI API
12+
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
13+
response = client.models.generate_content(
14+
model="gemini-2.0-flash-exp", contents="What is your name?"
15+
)
16+
17+
print(response.text)
18+
19+
20+
def generate_content_streaming():
21+
client = genai.Client(api_key=os.getenv("GEMINI_API_KEY"))
22+
response = client.models.generate_content_stream(
23+
model="gemini-2.0-flash-exp", contents="What is your name?"
24+
)
25+
26+
for chunk in response:
27+
pass

src/langtrace_python_sdk/instrumentation/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from .pymongo import PyMongoInstrumentation
2525
from .cerebras import CerebrasInstrumentation
2626
from .milvus import MilvusInstrumentation
27+
from .google_genai import GoogleGenaiInstrumentation
2728

2829
__all__ = [
2930
"AnthropicInstrumentation",
@@ -52,4 +53,5 @@
5253
"AWSBedrockInstrumentation",
5354
"CerebrasInstrumentation",
5455
"MilvusInstrumentation",
56+
"GoogleGenaiInstrumentation",
5557
]
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .instrumentation import GoogleGenaiInstrumentation
2+
3+
__all__ = ["GoogleGenaiInstrumentation"]
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
from typing import Collection
2+
from importlib_metadata import version as v
3+
from wrapt import wrap_function_wrapper as _W
4+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5+
from opentelemetry.trace import get_tracer
6+
from .patch import patch_google_genai, patch_google_genai_streaming
7+
8+
9+
class GoogleGenaiInstrumentation(BaseInstrumentor):
10+
def instrumentation_dependencies(self) -> Collection[str]:
11+
return ["google-genai >= 0.1.0", "google-generativeai < 1.0.0"]
12+
13+
def _instrument(self, **kwargs):
14+
trace_provider = kwargs.get("tracer_provider")
15+
tracer = get_tracer(__name__, "", trace_provider)
16+
version = v("google-genai")
17+
18+
_W(
19+
module="google.genai",
20+
name="models.Models.generate_content",
21+
wrapper=patch_google_genai(tracer, version),
22+
)
23+
_W(
24+
module="google.genai",
25+
name="models.Models.generate_content_stream",
26+
wrapper=patch_google_genai_streaming(tracer, version),
27+
)
28+
29+
def _uninstrument(self, **kwargs):
30+
pass
Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
from langtrace_python_sdk.utils.llm import (
2+
get_langtrace_attributes,
3+
get_llm_request_attributes,
4+
set_span_attributes,
5+
set_usage_attributes,
6+
set_span_attribute,
7+
set_event_completion,
8+
)
9+
from langtrace_python_sdk.utils import handle_span_error
10+
11+
from opentelemetry.trace import Tracer, SpanKind
12+
from opentelemetry.sdk.trace import Span
13+
from langtrace.trace_attributes import SpanAttributes
14+
from google.genai.types import GenerateContentResponse
15+
16+
from typing import Iterator
17+
18+
19+
def patch_google_genai(tracer: Tracer, version: str):
20+
def traced_method(wrapped, instance, args, kwargs):
21+
prompt = [
22+
{
23+
"role": "user",
24+
"content": kwargs["contents"],
25+
}
26+
]
27+
span_attributes = {
28+
**get_langtrace_attributes(
29+
service_provider="google_genai", version=version
30+
),
31+
**get_llm_request_attributes(kwargs=kwargs, prompts=prompt),
32+
}
33+
with tracer.start_as_current_span(
34+
name="google.genai.generate_content",
35+
kind=SpanKind.CLIENT,
36+
) as span:
37+
try:
38+
set_span_attributes(span, span_attributes)
39+
response = wrapped(*args, **kwargs)
40+
set_response_attributes(span, response)
41+
return response
42+
except Exception as error:
43+
handle_span_error(span, error)
44+
raise
45+
46+
return traced_method
47+
48+
49+
def patch_google_genai_streaming(tracer: Tracer, version: str):
50+
def traced_method(wrapped, instance, args, kwargs):
51+
prompt = [
52+
{
53+
"role": "user",
54+
"content": kwargs["contents"],
55+
}
56+
]
57+
span_attributes = {
58+
**get_langtrace_attributes(
59+
service_provider="google_genai", version=version
60+
),
61+
**get_llm_request_attributes(kwargs=kwargs, prompts=prompt),
62+
}
63+
with tracer.start_as_current_span(
64+
name="google.genai.generate_content_stream",
65+
kind=SpanKind.CLIENT,
66+
) as span:
67+
set_span_attributes(span, span_attributes)
68+
response = wrapped(*args, **kwargs)
69+
set_streaming_response_attributes(span, response)
70+
return response
71+
72+
return traced_method
73+
74+
75+
def set_streaming_response_attributes(
76+
span: Span, response: Iterator[GenerateContentResponse]
77+
):
78+
accum_completion = ""
79+
for chunk in response:
80+
set_span_attribute(
81+
span,
82+
SpanAttributes.LLM_RESPONSE_MODEL,
83+
chunk.model_version,
84+
)
85+
candidates = chunk.candidates
86+
for candidate in candidates:
87+
set_span_attribute(
88+
span,
89+
SpanAttributes.LLM_RESPONSE_FINISH_REASON,
90+
candidate.finish_reason,
91+
)
92+
93+
accum_completion += candidate.content.parts[0].text
94+
95+
if chunk.usage_metadata:
96+
set_usage_attributes(
97+
span,
98+
{
99+
"input_tokens": chunk.usage_metadata.prompt_token_count,
100+
"output_tokens": chunk.usage_metadata.candidates_token_count,
101+
},
102+
)
103+
set_event_completion(span, [{"role": "assistant", "content": accum_completion}])
104+
105+
106+
def set_response_attributes(span: Span, response: GenerateContentResponse):
107+
completions = []
108+
for candidate in response.candidates:
109+
set_span_attribute(
110+
span, SpanAttributes.LLM_RESPONSE_FINISH_REASON, candidate.finish_reason
111+
)
112+
parts = candidate.content.parts
113+
role = candidate.content.role
114+
completion = {
115+
"role": role or "assistant",
116+
"content": [part.text for part in parts],
117+
}
118+
completions.append(completion)
119+
120+
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.model_version)
121+
set_event_completion(span, completions)
122+
if response.usage_metadata:
123+
set_usage_attributes(
124+
span,
125+
{
126+
"input_tokens": response.usage_metadata.prompt_token_count,
127+
"output_tokens": response.usage_metadata.candidates_token_count,
128+
},
129+
)

src/langtrace_python_sdk/langtrace.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@
6868
PyMongoInstrumentation,
6969
CerebrasInstrumentation,
7070
MilvusInstrumentation,
71+
GoogleGenaiInstrumentation,
7172
)
7273
from opentelemetry.util.re import parse_env_headers
7374

@@ -301,6 +302,7 @@ def init(
301302
"vertexai": VertexAIInstrumentation(),
302303
"google-cloud-aiplatform": VertexAIInstrumentation(),
303304
"google-generativeai": GeminiInstrumentation(),
305+
"google-genai": GoogleGenaiInstrumentation(),
304306
"mistralai": MistralInstrumentation(),
305307
"boto3": AWSBedrockInstrumentation(),
306308
"autogen": AutogenInstrumentation(),
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "3.3.22"
1+
__version__ = "3.3.23"

src/run_example.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,9 @@
2020
"vertexai": False,
2121
"gemini": False,
2222
"mistral": False,
23-
"awsbedrock": True,
23+
"awsbedrock": False,
2424
"cerebras": False,
25+
"google_genai": True,
2526
}
2627

2728
if ENABLED_EXAMPLES["anthropic"]:
@@ -137,3 +138,9 @@
137138

138139
print(Fore.BLUE + "Running Cerebras example" + Fore.RESET)
139140
CerebrasRunner().run()
141+
142+
if ENABLED_EXAMPLES["google_genai"]:
143+
from examples.google_genai_example import GoogleGenaiRunner
144+
145+
print(Fore.BLUE + "Running Google Genai example" + Fore.RESET)
146+
GoogleGenaiRunner().run()

0 commit comments

Comments
 (0)