Skip to content

Commit de3fa6d

Browse files
committed
Integration tests!
1 parent d28f2c4 commit de3fa6d

File tree

9 files changed

+345
-7
lines changed

9 files changed

+345
-7
lines changed
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
name: Integration Tests
2+
3+
on:
4+
push:
5+
branches: [main]
6+
paths:
7+
- 'src/**'
8+
- 'tests/**'
9+
workflow_dispatch:
10+
11+
jobs:
12+
run-integration-tests:
13+
timeout-minutes: 30
14+
runs-on: ubuntu-latest
15+
16+
steps:
17+
- uses: actions/checkout@v4
18+
19+
- uses: astral-sh/setup-uv@v6
20+
with:
21+
python-version: 3.13
22+
activate-environment: true
23+
24+
- name: Install dependencies
25+
run: |
26+
uv sync --group tests --extra all
27+
28+
- name: Run Integration tests (parallel with xdist)
29+
env:
30+
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
31+
run: pytest -v tests/integration -n auto

README.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
# any-llm
1010

1111
[![Docs](https://github.com/mozilla-ai/any-llm/actions/workflows/docs.yaml/badge.svg)](https://github.com/mozilla-ai/any-llm/actions/workflows/docs.yaml/)
12-
[![Tests](https://github.com/mozilla-ai/any-agent/actions/workflows/tests-unit.yaml/badge.svg)](https://github.com/mozilla-ai/any-agent/actions/workflows/tests-unit.yaml/)
12+
[![Unit Tests](https://github.com/mozilla-ai/any-llm/actions/workflows/tests-unit.yaml/badge.svg)](https://github.com/mozilla-ai/any-llm/actions/workflows/tests-unit.yaml/)
13+
[![Integration Tests](https://github.com/mozilla-ai/any-llm/actions/workflows/tests-integration.yaml/badge.svg)](https://github.com/mozilla-ai/any-llm/actions/workflows/tests-integration.yaml/)
1314
![Python 3.11+](https://img.shields.io/badge/python-3.11%2B-blue.svg)
1415

1516
A single interface to use and evaluate different llm providers.
@@ -39,7 +40,7 @@ While the OpenAI API has become the de facto standard for LLM provider interface
3940
`any-llm` fills the gap by providing a simple, well-maintained interface that:
4041
- **Leverages official provider SDKs** when available, reducing maintenance burden and ensuring compatibility
4142
- **Stays framework-agnostic** so it can be used across different projects and use cases
42-
- **Provides active maintenance** we support this in our product ([any-agent](https://github.com/mozilla-ai/any-agent)) so we're motivated to maintain it.
43+
- **Provides active maintenance** we support this in our product ([any-llm](https://github.com/mozilla-ai/any-llm)) so we're motivated to maintain it.
4344

4445

4546

docs/providers.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
`any-llm` supports the following providers:
44

55
- [OpenAI](https://platform.openai.com/docs/api-reference)
6+
- [Anthropic](https://docs.anthropic.com/en/home)
7+
- [Google](https://cloud.google.com/vertex-ai/docs)
68
- [Mistral](https://docs.mistral.ai/)
79
- [Ollama](https://github.com/ollama/ollama)
810
- [DeepSeek](https://platform.deepseek.com/)

pyproject.toml

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ dependencies = [
1818
[project.optional-dependencies]
1919

2020
all = [
21-
"any-llm-sdk[mistral,anthropic]"
21+
"any-llm-sdk[mistral,anthropic,google]"
2222
]
2323

2424
mistral = [
@@ -29,6 +29,10 @@ anthropic = [
2929
"anthropic",
3030
]
3131

32+
google = [
33+
"google-genai",
34+
]
35+
3236
[project.urls]
3337
Documentation = "https://mozilla-ai.github.io/any-llm/"
3438
Issues = "https://github.com/mozilla-ai/any-llm/issues"
@@ -89,6 +93,7 @@ lint = [
8993
tests = [
9094
"pytest>=8,<9",
9195
"mktestdocs>=0.2.4",
96+
"pytest-xdist>=3.6.1",
9297
]
9398

9499
# For completeness, but 'uv sync --group dev' currently installs the others too.
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .google import GoogleProvider
2+
3+
__all__ = ["GoogleProvider"]
Lines changed: 265 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,265 @@
1+
import os
2+
import json
3+
from typing import Any
4+
5+
try:
6+
from google import genai
7+
from google.genai import types
8+
except ImportError:
9+
msg = "google-genai is not installed. Please install it with `pip install any-llm-sdk[google]`"
10+
raise ImportError(msg)
11+
12+
from openai.types.chat.chat_completion import ChatCompletion, Choice
13+
from openai.types.completion_usage import CompletionUsage
14+
from openai.types.chat.chat_completion_message import ChatCompletionMessage
15+
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall, Function
16+
from any_llm.provider import Provider, ApiConfig
17+
18+
DEFAULT_TEMPERATURE = 0.7
19+
20+
21+
def _convert_kwargs(kwargs: dict[str, Any]) -> dict[str, Any]:
22+
"""Format the kwargs for Google GenAI."""
23+
kwargs = kwargs.copy()
24+
25+
# Convert tools if present
26+
if "tools" in kwargs:
27+
kwargs["tools"] = _convert_tool_spec(kwargs["tools"])
28+
29+
# Handle unsupported parameters
30+
unsupported_params = ["response_format", "parallel_tool_calls"]
31+
for param in unsupported_params:
32+
if param in kwargs:
33+
kwargs.pop(param)
34+
35+
return kwargs
36+
37+
38+
def _convert_tool_spec(openai_tools: list[dict[str, Any]]) -> list[types.Tool]:
39+
"""Convert OpenAI tool specification to Google GenAI format."""
40+
function_declarations = []
41+
42+
for tool in openai_tools:
43+
if tool.get("type") != "function":
44+
continue
45+
46+
function = tool["function"]
47+
parameters_dict = {
48+
"type": "object",
49+
"properties": {
50+
param_name: {
51+
"type": param_info.get("type", "string"),
52+
"description": param_info.get("description", ""),
53+
**({"enum": param_info["enum"]} if "enum" in param_info else {}),
54+
}
55+
for param_name, param_info in function["parameters"]["properties"].items()
56+
},
57+
"required": function["parameters"].get("required", []),
58+
}
59+
60+
function_declarations.append(
61+
types.FunctionDeclaration(
62+
name=function["name"],
63+
description=function.get("description", ""),
64+
parameters=types.Schema(**parameters_dict),
65+
)
66+
)
67+
68+
return [types.Tool(function_declarations=function_declarations)]
69+
70+
71+
def _convert_messages(messages: list[dict[str, Any]]) -> list[types.Content]:
72+
"""Convert messages to Google GenAI format."""
73+
formatted_messages = []
74+
75+
for message in messages:
76+
if message["role"] == "system":
77+
# System messages are treated as user messages in GenAI
78+
parts = [types.Part.from_text(text=message["content"])]
79+
formatted_messages.append(types.Content(role="user", parts=parts))
80+
elif message["role"] == "user":
81+
parts = [types.Part.from_text(text=message["content"])]
82+
formatted_messages.append(types.Content(role="user", parts=parts))
83+
elif message["role"] == "assistant":
84+
if "tool_calls" in message and message["tool_calls"]:
85+
# Handle function calls
86+
tool_call = message["tool_calls"][0] # Assuming single function call for now
87+
function_call = tool_call["function"]
88+
89+
parts = [
90+
types.Part.from_function_call(
91+
name=function_call["name"], args=json.loads(function_call["arguments"])
92+
)
93+
]
94+
else:
95+
# Handle regular text messages
96+
parts = [types.Part.from_text(text=message["content"])]
97+
98+
formatted_messages.append(types.Content(role="model", parts=parts))
99+
elif message["role"] == "tool":
100+
# Convert tool result to function response
101+
try:
102+
content_json = json.loads(message["content"])
103+
part = types.Part.from_function_response(name=message.get("name", "unknown"), response=content_json)
104+
formatted_messages.append(types.Content(role="function", parts=[part]))
105+
except json.JSONDecodeError:
106+
# If not JSON, treat as text
107+
part = types.Part.from_function_response(
108+
name=message.get("name", "unknown"), response={"result": message["content"]}
109+
)
110+
formatted_messages.append(types.Content(role="function", parts=[part]))
111+
112+
return formatted_messages
113+
114+
115+
def _convert_response(response: Any) -> ChatCompletion:
116+
"""Convert Google GenAI response to OpenAI ChatCompletion format."""
117+
# Check if the response contains function calls
118+
if (
119+
hasattr(response.candidates[0].content.parts[0], "function_call")
120+
and response.candidates[0].content.parts[0].function_call
121+
):
122+
function_call = response.candidates[0].content.parts[0].function_call
123+
124+
# Convert the function call arguments to a dictionary
125+
args_dict = {}
126+
if hasattr(function_call, "args") and function_call.args:
127+
for key, value in function_call.args.items():
128+
args_dict[key] = value
129+
130+
tool_calls = [
131+
ChatCompletionMessageToolCall(
132+
id=f"call_{hash(function_call.name)}",
133+
type="function",
134+
function=Function(name=function_call.name, arguments=json.dumps(args_dict)),
135+
)
136+
]
137+
138+
message = ChatCompletionMessage(
139+
content=None,
140+
role="assistant",
141+
tool_calls=tool_calls,
142+
)
143+
144+
finish_reason = "tool_calls"
145+
else:
146+
# Handle regular text response
147+
content = response.candidates[0].content.parts[0].text
148+
message = ChatCompletionMessage(
149+
content=content,
150+
role="assistant",
151+
tool_calls=None,
152+
)
153+
154+
finish_reason = "stop"
155+
156+
# Create the choice
157+
choice = Choice(
158+
finish_reason=finish_reason, # type: ignore
159+
index=0,
160+
message=message,
161+
)
162+
163+
# Create usage information (extract if available)
164+
usage = CompletionUsage(
165+
completion_tokens=getattr(response.usage_metadata, "candidates_token_count", 0)
166+
if hasattr(response, "usage_metadata")
167+
else 0,
168+
prompt_tokens=getattr(response.usage_metadata, "prompt_token_count", 0)
169+
if hasattr(response, "usage_metadata")
170+
else 0,
171+
total_tokens=getattr(response.usage_metadata, "total_token_count", 0)
172+
if hasattr(response, "usage_metadata")
173+
else 0,
174+
)
175+
176+
# Build the final ChatCompletion object
177+
return ChatCompletion(
178+
id="google_genai_response",
179+
model="google/genai",
180+
object="chat.completion",
181+
created=0,
182+
choices=[choice],
183+
usage=usage,
184+
)
185+
186+
187+
class GoogleProvider(Provider):
188+
def __init__(self, config: ApiConfig) -> None:
189+
"""Initialize Google GenAI provider."""
190+
# Check if we should use Vertex AI or Gemini Developer API
191+
self.use_vertex_ai = os.getenv("GOOGLE_USE_VERTEX_AI", "false").lower() == "true"
192+
193+
if self.use_vertex_ai:
194+
# Vertex AI configuration
195+
self.project_id = os.getenv("GOOGLE_PROJECT_ID")
196+
self.location = os.getenv("GOOGLE_REGION", "us-central1")
197+
198+
if not self.project_id:
199+
msg = "GOOGLE_PROJECT_ID environment variable is required for Vertex AI"
200+
raise ValueError(msg)
201+
202+
# Initialize client for Vertex AI
203+
self.client = genai.Client(vertexai=True, project=self.project_id, location=self.location)
204+
else:
205+
# Gemini Developer API configuration
206+
api_key = os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
207+
208+
if not api_key:
209+
msg = "GEMINI_API_KEY or GOOGLE_API_KEY environment variable is required for Gemini Developer API"
210+
raise ValueError(msg)
211+
212+
# Initialize client for Gemini Developer API
213+
self.client = genai.Client(api_key=api_key)
214+
215+
def completion(
216+
self,
217+
model: str,
218+
messages: list[dict[str, Any]],
219+
**kwargs: Any,
220+
) -> ChatCompletion:
221+
"""Create a chat completion using Google GenAI."""
222+
kwargs = _convert_kwargs(kwargs)
223+
224+
# Set the temperature if provided, otherwise use the default
225+
temperature = kwargs.get("temperature", DEFAULT_TEMPERATURE)
226+
227+
# Convert messages to GenAI format
228+
formatted_messages = _convert_messages(messages)
229+
230+
# Handle tools if provided
231+
tools = kwargs.get("tools")
232+
233+
# Create generation config
234+
generation_config = types.GenerateContentConfig(
235+
temperature=temperature,
236+
tools=tools,
237+
)
238+
239+
# Generate content using the client
240+
# For now, let's use a simple string-based approach
241+
content_text = ""
242+
243+
if len(formatted_messages) == 1 and formatted_messages[0].role == "user":
244+
# Single user message
245+
parts = formatted_messages[0].parts
246+
if parts and hasattr(parts[0], "text"):
247+
content_text = parts[0].text or ""
248+
else:
249+
content_text = "Hello" # fallback
250+
else:
251+
# Multiple messages - concatenate user messages for simplicity
252+
content_parts = []
253+
for msg in formatted_messages:
254+
if msg.role == "user" and msg.parts:
255+
if hasattr(msg.parts[0], "text") and msg.parts[0].text:
256+
content_parts.append(msg.parts[0].text)
257+
258+
content_text = "\n".join(content_parts)
259+
if not content_text:
260+
content_text = "Hello" # fallback
261+
262+
response = self.client.models.generate_content(model=model, contents=content_text, config=generation_config)
263+
264+
# Convert and return the response
265+
return _convert_response(response)

src/any_llm/providers/openai/base.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,6 @@ def __init__(self, config: ApiConfig) -> None:
3939

4040
# Get API key from environment if not provided in config
4141
api_key = config.api_key or os.getenv(self.ENV_API_KEY_NAME)
42-
if api_key is None:
43-
msg = f"No {self.PROVIDER_NAME} API key provided. Please provide it in the config or set the {self.ENV_API_KEY_NAME} environment variable."
44-
raise ValueError(msg)
45-
4642
client_kwargs["api_key"] = api_key
4743

4844
# Create the OpenAI client

tests/integration/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)