diff --git a/agentops/llms/providers/gemini.py b/agentops/llms/providers/gemini.py
new file mode 100644
index 000000000..6ca96c3eb
--- /dev/null
+++ b/agentops/llms/providers/gemini.py
@@ -0,0 +1,194 @@
+from typing import Optional, Any, Dict, Union
+
+from agentops.llms.providers.base import BaseProvider
+from agentops.event import LLMEvent, ErrorEvent
+from agentops.session import Session
+from agentops.helpers import get_ISO_time, check_call_stack_for_agent_id
+from agentops.log_config import logger
+from agentops.singleton import singleton
+
+
+@singleton
+class GeminiProvider(BaseProvider):
+ original_generate_content = None
+ original_generate_content_async = None
+
+ """Provider for Google's Gemini API.
+
+ This provider is automatically detected and initialized when agentops.init()
+ is called and the google.generativeai package is imported. No manual
+ initialization is required."""
+
+ def __init__(self, client=None):
+ """Initialize the Gemini provider.
+
+ Args:
+ client: Optional client instance. If not provided, will be set during override.
+ """
+ super().__init__(client)
+ self._provider_name = "Gemini"
+
+ def handle_response(self, response, kwargs, init_timestamp, session: Optional[Session] = None) -> dict:
+ """Handle responses from Gemini API for both sync and streaming modes.
+
+ Args:
+ response: The response from the Gemini API
+ kwargs: The keyword arguments passed to generate_content
+ init_timestamp: The timestamp when the request was initiated
+ session: Optional AgentOps session for recording events
+
+ Returns:
+ For sync responses: The original response object
+ For streaming responses: A generator yielding response chunks
+ """
+ llm_event = LLMEvent(init_timestamp=init_timestamp, params=kwargs)
+ if session is not None:
+ llm_event.session_id = session.session_id
+
+ accumulated_content = ""
+
+ def handle_stream_chunk(chunk):
+ nonlocal llm_event, accumulated_content
+ try:
+ if llm_event.returns is None:
+ llm_event.returns = chunk
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.model = getattr(chunk, "model", None) or "gemini-1.5-flash"
+ llm_event.prompt = kwargs.get("prompt", kwargs.get("contents", None)) or []
+
+ # Accumulate text from chunk
+ if hasattr(chunk, "text") and chunk.text:
+ accumulated_content += chunk.text
+
+ # Extract token counts if available
+ if hasattr(chunk, "usage_metadata"):
+ llm_event.prompt_tokens = getattr(chunk.usage_metadata, "prompt_token_count", None)
+ llm_event.completion_tokens = getattr(chunk.usage_metadata, "candidates_token_count", None)
+
+ # If this is the last chunk
+ if hasattr(chunk, "finish_reason") and chunk.finish_reason:
+ llm_event.completion = accumulated_content
+ llm_event.end_timestamp = get_ISO_time()
+ self._safe_record(session, llm_event)
+
+ except Exception as e:
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
+ logger.warning(
+ f"Unable to parse chunk for Gemini LLM call. Error: {str(e)}\n"
+ f"Response: {chunk}\n"
+ f"Arguments: {kwargs}\n"
+ )
+
+ # For streaming responses
+ if kwargs.get("stream", False):
+
+ def generator():
+ for chunk in response:
+ handle_stream_chunk(chunk)
+ yield chunk
+
+ return generator()
+
+ # For synchronous responses
+ try:
+ llm_event.returns = response
+ llm_event.agent_id = check_call_stack_for_agent_id()
+ llm_event.prompt = kwargs.get("prompt", kwargs.get("contents", None)) or []
+ llm_event.completion = response.text
+ llm_event.model = getattr(response, "model", None) or "gemini-1.5-flash"
+
+ # Extract token counts from usage metadata if available
+ if hasattr(response, "usage_metadata"):
+ llm_event.prompt_tokens = getattr(response.usage_metadata, "prompt_token_count", None)
+ llm_event.completion_tokens = getattr(response.usage_metadata, "candidates_token_count", None)
+
+ llm_event.end_timestamp = get_ISO_time()
+ self._safe_record(session, llm_event)
+ except Exception as e:
+ self._safe_record(session, ErrorEvent(trigger_event=llm_event, exception=e))
+ logger.warning(
+ f"Unable to parse response for Gemini LLM call. Error: {str(e)}\n"
+ f"Response: {response}\n"
+ f"Arguments: {kwargs}\n"
+ )
+
+ return response
+
+ def override(self):
+ """Override Gemini's generate_content method to track LLM events."""
+ self._override_gemini_generate_content()
+ self._override_gemini_generate_content_async()
+
+ def _override_gemini_generate_content(self):
+ """Override synchronous generate_content method"""
+ import google.generativeai as genai
+
+ # Store original method if not already stored
+ if self.original_generate_content is None:
+ self.original_generate_content = genai.GenerativeModel.generate_content
+
+ provider = self # Store provider instance for closure
+
+ def patched_function(model_self, *args, **kwargs):
+ init_timestamp = get_ISO_time()
+ session = kwargs.pop("session", None)
+
+ # Handle positional prompt argument
+ event_kwargs = kwargs.copy()
+ if args and len(args) > 0:
+ prompt = args[0]
+ if "contents" not in kwargs:
+ kwargs["contents"] = prompt
+ event_kwargs["prompt"] = prompt
+ args = args[1:]
+
+ result = provider.original_generate_content(model_self, *args, **kwargs)
+ return provider.handle_response(result, event_kwargs, init_timestamp, session=session)
+
+ # Override the method at class level
+ genai.GenerativeModel.generate_content = patched_function
+
+ def _override_gemini_generate_content_async(self):
+ """Override asynchronous generate_content method"""
+ import google.generativeai as genai
+
+ # Store original async method if not already stored
+ if self.original_generate_content_async is None:
+ self.original_generate_content_async = genai.GenerativeModel.generate_content_async
+
+ provider = self # Store provider instance for closure
+
+ async def patched_function(model_self, *args, **kwargs):
+ init_timestamp = get_ISO_time()
+ session = kwargs.pop("session", None)
+
+ # Handle positional prompt argument
+ event_kwargs = kwargs.copy()
+ if args and len(args) > 0:
+ prompt = args[0]
+ if "contents" not in kwargs:
+ kwargs["contents"] = prompt
+ event_kwargs["prompt"] = prompt
+ args = args[1:]
+
+ result = await provider.original_generate_content_async(model_self, *args, **kwargs)
+ return provider.handle_response(result, event_kwargs, init_timestamp, session=session)
+
+ # Override the async method at class level
+ genai.GenerativeModel.generate_content_async = patched_function
+
+ def undo_override(self):
+ """Restore original Gemini methods.
+
+ Note:
+ This method is called automatically by AgentOps during cleanup.
+ Users should not call this method directly."""
+ import google.generativeai as genai
+
+ if self.original_generate_content is not None:
+ genai.GenerativeModel.generate_content = self.original_generate_content
+ self.original_generate_content = None
+
+ if self.original_generate_content_async is not None:
+ genai.GenerativeModel.generate_content_async = self.original_generate_content_async
+ self.original_generate_content_async = None
diff --git a/agentops/llms/tracker.py b/agentops/llms/tracker.py
index 3609354f5..648920963 100644
--- a/agentops/llms/tracker.py
+++ b/agentops/llms/tracker.py
@@ -16,6 +16,7 @@
from .providers.ai21 import AI21Provider
from .providers.llama_stack_client import LlamaStackClientProvider
from .providers.taskweaver import TaskWeaverProvider
+from .providers.gemini import GeminiProvider
original_func = {}
original_create = None
@@ -24,6 +25,9 @@
class LlmTracker:
SUPPORTED_APIS = {
+ "google.generativeai": {
+ "0.1.0": ("GenerativeModel.generate_content", "GenerativeModel.generate_content_stream"),
+ },
"litellm": {"1.3.1": ("openai_chat_completions.completion",)},
"openai": {
"1.0.0": (
@@ -210,6 +214,15 @@ def override_api(self):
else:
logger.warning(f"Only TaskWeaver>=0.0.1 supported. v{module_version} found.")
+ if api == "google.generativeai":
+ module_version = version(api)
+
+ if Version(module_version) >= parse("0.1.0"):
+ provider = GeminiProvider(self.client)
+ provider.override()
+ else:
+ logger.warning(f"Only google.generativeai>=0.1.0 supported. v{module_version} found.")
+
def stop_instrumenting(self):
OpenAiProvider(self.client).undo_override()
GroqProvider(self.client).undo_override()
@@ -221,3 +234,4 @@ def stop_instrumenting(self):
AI21Provider(self.client).undo_override()
LlamaStackClientProvider(self.client).undo_override()
TaskWeaverProvider(self.client).undo_override()
+ GeminiProvider(self.client).undo_override()
diff --git a/docs/images/external/deepmind/gemini-logo.png b/docs/images/external/deepmind/gemini-logo.png
new file mode 100644
index 000000000..61275eb39
Binary files /dev/null and b/docs/images/external/deepmind/gemini-logo.png differ
diff --git a/docs/mint.json b/docs/mint.json
index 3fa8f6633..7c4aa15f6 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -93,6 +93,7 @@
"v1/integrations/camel",
"v1/integrations/cohere",
"v1/integrations/crewai",
+ "v1/integrations/gemini",
"v1/integrations/groq",
"v1/integrations/langchain",
"v1/integrations/llama_stack",
diff --git a/docs/v1/examples/examples.mdx b/docs/v1/examples/examples.mdx
index dda198bee..cc7c9e5bb 100644
--- a/docs/v1/examples/examples.mdx
+++ b/docs/v1/examples/examples.mdx
@@ -57,6 +57,10 @@ mode: "wide"
Ultra-fast LLM inference with Groq Cloud
+ } iconType="image" href="/v1/integrations/gemini">
+ Explore Google DeepMind's Gemini with observation via AgentOps
+
+
} iconType="image" href="/v1/examples/langchain">
Jupyter Notebook with a sample LangChain integration
diff --git a/docs/v1/integrations/gemini.mdx b/docs/v1/integrations/gemini.mdx
new file mode 100644
index 000000000..0f643f346
--- /dev/null
+++ b/docs/v1/integrations/gemini.mdx
@@ -0,0 +1,118 @@
+---
+title: Gemini
+description: "Explore Google DeepMind's Gemini with observation via AgentOps"
+---
+
+import CodeTooltip from '/snippets/add-code-tooltip.mdx'
+import EnvTooltip from '/snippets/add-env-tooltip.mdx'
+
+[Gemini (Google Generative AI)](https://ai.google.dev/gemini-api/docs/quickstart) is a leading provider of AI tools and services.
+Explore the [Gemini API](https://ai.google.dev/docs) for more information.
+
+
+ `google-generativeai>=0.1.0` is currently supported.
+
+
+
+
+
+ ```bash pip
+ pip install agentops
+ ```
+ ```bash poetry
+ poetry add agentops
+ ```
+
+
+
+
+ `google-generativeai>=0.1.0` is required for Gemini integration.
+
+
+ ```bash pip
+ pip install google-generativeai
+ ```
+ ```bash poetry
+ poetry add google-generativeai
+ ```
+
+
+
+
+
+ ```python python
+ import google.generativeai as genai
+ import agentops
+
+ agentops.init()
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ ...
+ # End of program (e.g. main.py)
+ agentops.end_session("Success") # Success|Fail|Indeterminate
+ ```
+
+
+
+ ```python .env
+ AGENTOPS_API_KEY=
+ GEMINI_API_KEY=
+ ```
+
+ Read more about environment variables in [Advanced Configuration](/v1/usage/advanced-configuration)
+
+
+ Execute your program and visit [app.agentops.ai/drilldown](https://app.agentops.ai/drilldown) to observe your Agent! 🕵️
+
+ After your run, AgentOps prints a clickable url to console linking directly to your session in the Dashboard
+
+
+
+
+
+
+
+
+## Full Examples
+
+
+ ```python sync
+ import google.generativeai as genai
+ import agentops
+
+ agentops.init()
+ model = genai.GenerativeModel("gemini-1.5-flash")
+
+ response = model.generate_content(
+ "Write a haiku about AI and humans working together"
+ )
+
+ print(response.text)
+ agentops.end_session('Success')
+ ```
+
+ ```python stream
+ import google.generativeai as genai
+ import agentops
+
+ agentops.init()
+ model = genai.GenerativeModel("gemini-1.5-flash")
+
+ response = model.generate_content(
+ "Write a haiku about AI and humans working together",
+ stream=True
+ )
+
+ for chunk in response:
+ print(chunk.text, end="")
+
+ agentops.end_session('Success')
+ ```
+
+
+You can find more examples in the [Gemini Examples](/v1/examples/gemini_examples) section.
+
+
+
+
+
+
diff --git a/examples/anthropic_examples/anthropic-example-sync.py b/examples/anthropic_examples/anthropic-example-sync.py
new file mode 100644
index 000000000..b4060293c
--- /dev/null
+++ b/examples/anthropic_examples/anthropic-example-sync.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# # Anthropic Sync Example
+#
+# We are going to create a program called "Nier Storyteller". In short, it uses a message system similar to Nier Automata's to generate a one sentence summary before creating a short story.
+#
+# Example:
+#
+# {A foolish doll} {died in a world} {of ended dreams.} turns into "In a forgotten land where sunlight barely touched the ground, a little doll wandered through the remains of shattered dreams. Its porcelain face, cracked and wea..."
+
+# First, we start by importing Agentops and Anthropic
+
+# In[ ]:
+
+
+get_ipython().run_line_magic('pip', 'install agentops')
+get_ipython().run_line_magic('pip', 'install anthropic')
+
+
+# Setup our generic default statements
+
+# In[4]:
+
+
+from anthropic import Anthropic, AsyncAnthropic
+import agentops
+from dotenv import load_dotenv
+import os
+import random
+
+
+# And set our API keys.
+
+# In[6]:
+
+
+load_dotenv()
+ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") or "ANTHROPIC KEY HERE"
+AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or "AGENTOPS KEY HERE"
+
+
+# Now let's set the client as Anthropic and an AgentOps session!
+
+# In[7]:
+
+
+client = Anthropic(api_key=ANTHROPIC_API_KEY)
+
+
+# In[ ]:
+
+
+agentops.init(AGENTOPS_API_KEY, default_tags=["anthropic-example"])
+
+# Remember that story we made earlier? As of writing, claude-3-5-sonnet-20240620 (the version we will be using) has a 150k word, 680k character length. We also get an 8192 context length. This is great because we can actually set an example for the script!
+#
+# Let's assume we have user (the person speaking), assistant (the AI itself) for now and computer (the way the LLM gets references from).
+# Let's set a default story as a script!
+
+# In[10]:
+
+
+defaultstory = "In a forgotten land where sunlight barely touched the ground, a little doll wandered through the remains of shattered dreams. Its porcelain face, cracked and weathered, reflected the emptiness that hung in the air like a lingering fog. The doll's painted eyes, now chipped and dull, stared into the distance, searching for something—anything—that still held life. It had once belonged to a child who dreamt of endless adventures, of castles in the clouds and whispered secrets under starry skies. But those dreams had long since crumbled to dust, leaving behind nothing but a hollow world where even hope dared not tread. The doll, a relic of a life that had faded, trudged through the darkness, its tiny feet stumbling over broken wishes and forgotten stories. Each step took more effort than the last, as if the world itself pulled at the doll's limbs, weary and bitter. It reached a place where the ground fell away into an abyss of despair, the edge crumbling under its weight. The doll paused, teetering on the brink. It reached out, as though to catch a fading dream, but there was nothing left to hold onto. With a faint crack, its brittle body gave way, and the doll tumbled silently into the void. And so, in a world where dreams had died, the foolish little doll met its end. There were no tears, no mourning. Only the soft, empty echo of its fall, fading into the darkness, as the land of ended dreams swallowed the last trace of what once was."
+
+
+# We are almost done! Let's generate a one sentence story summary by taking 3 random sentence fragments and connecting them!
+
+# In[11]:
+
+
+# Define the lists
+first = [
+ "A unremarkable soldier",
+ "A lone swordsman",
+ "A lone lancer",
+ "A lone pugilist",
+ "A dual-wielder",
+ "A weaponless soldier",
+ "A beautiful android",
+ "A small android",
+ "A double-crossing android",
+ "A weapon carrying android",
+]
+
+second = [
+ "felt despair at this cold world",
+ "held nothing back",
+ "gave it all",
+ "could not get up again",
+ "grimaced in anger",
+ "missed the chance of a lifetime",
+ "couldn't find a weakpoint",
+ "was overwhelmed",
+ "was totally outmatched",
+ "was distracted by a flower",
+ "hesitated to land the killing blow",
+ "was attacked from behind",
+ "fell to the ground",
+]
+
+third = [
+ "in a dark hole beneath a city",
+ "underground",
+ "at the enemy's lair",
+ "inside an empty ship",
+ "at a tower built by the gods",
+ "on a tower smiled upon by angels",
+ "inside a tall tower",
+ "at a peace-loving village",
+ "at a village of refugees",
+ "in the free skies",
+ "below dark skies",
+ "in a blood-soaked battlefield",
+]
+
+# Generate a random sentence
+generatedsentence = (
+ f"{random.choice(first)} {random.choice(second)} {random.choice(third)}."
+)
+
+
+# And now to construct a stream/message! We set an example for the assistant now!
+
+# In[ ]:
+
+
+stream = client.messages.create(
+ max_tokens=2400,
+ model="claude-3-5-sonnet-20240620", # Comma added here
+ messages=[
+ {
+ "role": "user",
+ "content": "Create a story based on the three sentence fragments given to you, it has been combined into one below.",
+ },
+ {
+ "role": "assistant",
+ "content": "{A foolish doll} {died in a world} {of ended dreams.}",
+ },
+ {"role": "assistant", "content": defaultstory},
+ {
+ "role": "user",
+ "content": "Create a story based on the three sentence fragments given to you, it has been combined into one below.",
+ },
+ {"role": "assistant", "content": generatedsentence},
+ ],
+ stream=True,
+)
+
+response = ""
+for event in stream:
+ if event.type == "content_block_delta":
+ response += event.delta.text
+ elif event.type == "message_stop":
+ print(generatedsentence)
+ print(response)
+
+
+# We can observe the session in the AgentOps dashboard by going to the session URL provided above.
+#
+# Now we will end the session with a success message. We can also end the session with a failure or intdeterminate status. By default, the session will be marked as indeterminate.
+
+# In[ ]:
+
+
+agentops.end_session("Success")
+
diff --git a/examples/gemini_examples/README.md b/examples/gemini_examples/README.md
new file mode 100644
index 000000000..fb0af7624
--- /dev/null
+++ b/examples/gemini_examples/README.md
@@ -0,0 +1,94 @@
+# Gemini Integration Examples
+
+This directory contains examples demonstrating how to use AgentOps with Google's Gemini API for tracking and monitoring LLM interactions.
+
+## Prerequisites
+
+- Python 3.7+
+- `agentops` package installed (`pip install -U agentops`)
+- `google-generativeai` package installed (`pip install -U google-generativeai>=0.1.0`)
+- A Gemini API key (get one at [Google AI Studio](https://ai.google.dev/tutorials/setup))
+- An AgentOps API key (get one at [AgentOps Dashboard](https://app.agentops.ai/settings/projects))
+
+## Environment Setup
+
+1. Install required packages:
+```bash
+pip install -U agentops google-generativeai
+```
+
+2. Set your API keys as environment variables:
+```bash
+export GEMINI_API_KEY='your-gemini-api-key'
+export AGENTOPS_API_KEY='your-agentops-api-key'
+```
+
+## Examples
+
+### Synchronous and Streaming Example
+
+The [gemini_example_sync.ipynb](./gemini_example_sync.ipynb) notebook demonstrates:
+- Basic synchronous text generation
+- Streaming text generation with chunk handling
+- Automatic event tracking and token usage monitoring
+- Session management and statistics
+
+```python
+import google.generativeai as genai
+import agentops
+
+# Configure API keys
+genai.configure(api_key=GEMINI_API_KEY)
+
+# Initialize AgentOps (provider detection is automatic)
+agentops.init()
+
+# Create Gemini model
+model = genai.GenerativeModel("gemini-1.5-flash")
+
+# Generate text (synchronous)
+response = model.generate_content("What are the three laws of robotics?")
+print(response.text)
+
+# Generate text (streaming)
+response = model.generate_content(
+ "Explain machine learning in simple terms.",
+ stream=True
+)
+for chunk in response:
+ print(chunk.text, end="")
+
+# End session and view stats
+agentops.end_session(
+ end_state="Success",
+ end_state_reason="Example completed successfully"
+)
+```
+
+To run the example:
+1. Make sure you have set up your environment variables
+2. Open and run the notebook: `jupyter notebook gemini_example_sync.ipynb`
+3. View your session in the AgentOps dashboard using the URL printed at the end
+
+## Features
+
+- **Automatic Provider Detection**: The Gemini provider is automatically detected and initialized when you call `agentops.init()`
+- **Zero Configuration**: No manual provider setup required - just import and use
+- **Comprehensive Event Tracking**: All LLM calls are automatically tracked and visible in your AgentOps dashboard
+- **Token Usage Monitoring**: Token counts are extracted from the Gemini API's usage metadata when available
+- **Error Handling**: Robust error handling for both synchronous and streaming responses
+- **Session Management**: Automatic session tracking with detailed statistics
+
+## Notes
+
+- The provider supports both synchronous and streaming text generation
+- All events are automatically tracked and can be viewed in the AgentOps dashboard
+- Token usage is extracted when available in the response metadata
+- Error events are automatically captured and logged
+- The provider is designed to work seamlessly with AgentOps' session management
+
+## Additional Resources
+
+- [Gemini API Documentation](https://ai.google.dev/docs)
+- [AgentOps Documentation](https://docs.agentops.ai)
+- [Gemini Integration Guide](https://docs.agentops.ai/v1/integrations/gemini)
diff --git a/examples/gemini_examples/gemini_example.ipynb b/examples/gemini_examples/gemini_example.ipynb
new file mode 100644
index 000000000..3e85414ee
--- /dev/null
+++ b/examples/gemini_examples/gemini_example.ipynb
@@ -0,0 +1,130 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "580c85ac",
+ "metadata": {},
+ "source": [
+ "# Gemini API Example with AgentOps\n",
+ "\n",
+ "This notebook demonstrates how to use AgentOps with Google's Gemini API for both synchronous and streaming text generation."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d731924a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import google.generativeai as genai\n",
+ "import agentops\n",
+ "from dotenv import load_dotenv\n",
+ "import os"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "a94545c9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "load_dotenv()\n",
+ "\n",
+ "GEMINI_API_KEY = os.getenv(\"GEMINI_API_KEY\") or \"your gemini api key\"\n",
+ "AGENTOPS_API_KEY = os.getenv(\"AGENTOPS_API_KEY\") or \"your agentops api key\"\n",
+ "\n",
+ "genai.configure(api_key=GEMINI_API_KEY)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d632fe48",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Initialize AgentOps and Gemini model\n",
+ "agentops.init()\n",
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3923b6b8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Test synchronous generation\n",
+ "print(\"Testing synchronous generation:\")\n",
+ "response = model.generate_content(\n",
+ " \"What are the three laws of robotics?\",\n",
+ " # session=ao_client\n",
+ ")\n",
+ "print(response.text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "da54e521",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Test streaming generation\n",
+ "print(\"\\nTesting streaming generation:\")\n",
+ "response = model.generate_content(\n",
+ " \"Explain the concept of machine learning in simple terms.\",\n",
+ " stream=True,\n",
+ " # session=ao_client\n",
+ ")\n",
+ "\n",
+ "for chunk in response:\n",
+ " print(chunk.text, end=\"\")\n",
+ "print() # Add newline after streaming output\n",
+ "\n",
+ "# Test another synchronous generation\n",
+ "print(\"\\nTesting another synchronous generation:\")\n",
+ "response = model.generate_content(\n",
+ " \"What is the difference between supervised and unsupervised learning?\",\n",
+ " # session=ao_client\n",
+ ")\n",
+ "print(response.text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c6a674c0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# End session and check stats\n",
+ "agentops.end_session(end_state=\"Success\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ops",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/openai_examples/openai_example_sync.py b/examples/openai_examples/openai_example_sync.py
new file mode 100644
index 000000000..c1a64227c
--- /dev/null
+++ b/examples/openai_examples/openai_example_sync.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# # OpenAI Sync Example
+#
+# We are going to create a simple chatbot that creates stories based on a prompt. The chatbot will use the gpt-4o-mini LLM to generate the story using a user prompt.
+#
+# We will track the chatbot with AgentOps and see how it performs!
+
+# First let's install the required packages
+
+# In[ ]:
+
+
+get_ipython().run_line_magic('pip', 'install -U openai')
+get_ipython().run_line_magic('pip', 'install -U agentops')
+
+
+from openai import OpenAI
+import agentops
+import os
+from dotenv import load_dotenv
+
+# Then continue with the example
+
+
+# Next, we'll grab our API keys. You can use dotenv like below or however else you like to load environment variables
+
+# In[2]:
+
+
+load_dotenv()
+OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or ""
+AGENTOPS_API_KEY = os.getenv("AGENTOPS_API_KEY") or ""
+
+
+# Next we initialize the AgentOps client.
+
+# In[ ]:
+
+
+agentops.init(AGENTOPS_API_KEY, default_tags=["openai-sync-example"])
+
+
+# And we are all set! Note the seesion url above. We will use it to track the chatbot.
+#
+# Let's create a simple chatbot that generates stories.
+
+# In[4]:
+
+
+client = OpenAI(api_key=OPENAI_API_KEY)
+
+system_prompt = """
+You are a master storyteller, with the ability to create vivid and engaging stories.
+You have experience in writing for children and adults alike.
+You are given a prompt and you need to generate a story based on the prompt.
+"""
+
+user_prompt = "Write a story about a cyber-warrior trapped in the imperial time period."
+
+messages = [
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_prompt},
+]
+
+
+# In[ ]:
+
+
+response = client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=messages,
+)
+
+print(response.choices[0].message.content)
+
+
+# The response is a string that contains the story. We can track this with AgentOps by navigating to the session url and viewing the run.
+
+# ## Streaming Version
+# We will demonstrate the streaming version of the API.
+
+# In[ ]:
+
+
+stream = client.chat.completions.create(
+ model="gpt-4o-mini",
+ messages=messages,
+ stream=True,
+)
+
+for chunk in stream:
+ print(chunk.choices[0].delta.content or "", end="")
+
+
+# Note that the response is a generator that yields chunks of the story. We can track this with AgentOps by navigating to the session url and viewing the run.
+
+# In[ ]:
+
+
+agentops.end_session(end_state="Success", end_state_reason="The story was generated successfully.")
+
+
+# We end the session with a success state and a success reason. This is useful if you want to track the success or failure of the chatbot. In that case you can set the end state to failure and provide a reason. By default the session will have an indeterminate end state.
+#
+# All done!
diff --git a/pyproject.toml b/pyproject.toml
index f21dd03c7..1e274b2f8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -49,6 +49,7 @@ test = [
"groq",
"ollama",
"mistralai",
+ "google-generativeai>=0.1.0",
# ;;
# The below is a really hard dependency, that can be installed only between python >=3.10,<3.13.
# CI will fail because all tests will automatically pull this dependency group;