11import os
22import asyncio
3- import google .generativeai as genai
3+ import google .genai as genai
44from traceloop .sdk import Traceloop
55from traceloop .sdk .decorators import workflow
66
77Traceloop .init (app_name = "gemini_example" )
88
9- genai .configure (api_key = os .environ .get ("GENAI_API_KEY" ))
9+ client = genai .Client (api_key = os .environ .get ("GENAI_API_KEY" ))
1010
1111
1212@workflow ("predict" )
1313def predict_text () -> str :
1414 """Ideation example with a Large Language Model"""
1515
16- model = genai . GenerativeModel ( "gemini-1.5-pro-002" )
17- response = model . generate_content (
18- "Give me ten interview questions for the role of program manager." ,
16+ response = client . models . generate_content (
17+ model = "gemini-1.5-pro-002" ,
18+ contents = "Give me ten interview questions for the role of program manager." ,
1919 )
2020
2121 return response .text
@@ -25,24 +25,37 @@ def predict_text() -> str:
2525async def async_predict_text () -> str :
2626 """Async Ideation example with a Large Language Model"""
2727
28- model = genai . GenerativeModel ( "gemini-1.5-pro-002" )
29- response = await model . generate_content_async (
30- "Give me ten interview questions for the role of program manager." ,
28+ response = client . models . generate_content (
29+ model = "gemini-1.5-pro-002" ,
30+ contents = "Give me ten interview questions for the role of program manager." ,
3131 )
3232
3333 return response .text
3434
3535
3636@workflow ("chat" )
3737def chat () -> str :
38- """Chat example with a Large Language Model """
38+ """Real chat example with conversation context """
3939
40- model = genai .GenerativeModel ("gemini-1.5-pro-002" )
41- chat = model .start_chat ()
42- response = chat .send_message ("Hello, how are you?" )
43- response = chat .send_message ("What is the capital of France?" )
40+ # First message
41+ response1 = client .models .generate_content (
42+ model = "gemini-1.5-pro-002" ,
43+ contents = "Hello, how are you?" ,
44+ )
4445
45- return response .text
46+ # Second message with conversation history
47+ conversation = [
48+ {"role" : "user" , "parts" : [{"text" : "Hello, how are you?" }]},
49+ {"role" : "model" , "parts" : [{"text" : response1 .text }]},
50+ {"role" : "user" , "parts" : [{"text" : "What is the capital of France?" }]},
51+ ]
52+
53+ response2 = client .models .generate_content (
54+ model = "gemini-1.5-pro-002" ,
55+ contents = conversation ,
56+ )
57+
58+ return response2 .text
4659
4760
4861if __name__ == "__main__" :
0 commit comments