diff --git a/pydantic_ai_slim/pydantic_ai/models/openrouter.py b/pydantic_ai_slim/pydantic_ai/models/openrouter.py new file mode 100644 index 0000000000..b2cf553eb0 --- /dev/null +++ b/pydantic_ai_slim/pydantic_ai/models/openrouter.py @@ -0,0 +1,267 @@ +from typing import Any, Literal, cast + +from openai import AsyncOpenAI +from openai.types.chat import ChatCompletion +from typing_extensions import TypedDict + +from ..messages import ModelResponse +from ..profiles import ModelProfileSpec +from ..providers import Provider +from ..settings import ModelSettings +from . import ModelRequestParameters +from .openai import OpenAIChatModel, OpenAIChatModelSettings + + +class OpenRouterMaxprice(TypedDict, total=False): + """The object specifying the maximum price you want to pay for this request. USD price per million tokens, for prompt and completion.""" + + prompt: int + completion: int + image: int + audio: int + request: int + + +LatestOpenRouterSlugs = Literal[ + 'z-ai', + 'cerebras', + 'venice', + 'moonshotai', + 'morph', + 'stealth', + 'wandb', + 'klusterai', + 'openai', + 'sambanova', + 'amazon-bedrock', + 'mistral', + 'nextbit', + 'atoma', + 'ai21', + 'minimax', + 'baseten', + 'anthropic', + 'featherless', + 'groq', + 'lambda', + 'azure', + 'ncompass', + 'deepseek', + 'hyperbolic', + 'crusoe', + 'cohere', + 'mancer', + 'avian', + 'perplexity', + 'novita', + 'siliconflow', + 'switchpoint', + 'xai', + 'inflection', + 'fireworks', + 'deepinfra', + 'inference-net', + 'inception', + 'atlas-cloud', + 'nvidia', + 'alibaba', + 'friendli', + 'infermatic', + 'targon', + 'ubicloud', + 'aion-labs', + 'liquid', + 'nineteen', + 'cloudflare', + 'nebius', + 'chutes', + 'enfer', + 'crofai', + 'open-inference', + 'phala', + 'gmicloud', + 'meta', + 'relace', + 'parasail', + 'together', + 'google-ai-studio', + 'google-vertex', +] +"""Known providers in the OpenRouter marketplace""" + +OpenRouterSlug = str | LatestOpenRouterSlugs +"""Possible OpenRouter provider slugs. + +Since OpenRouter is constantly updating their list of providers, we explicitly list some known providers but +allow any name in the type hints. +See [the OpenRouter API](https://openrouter.ai/docs/api-reference/list-available-providers) for a full list. +""" + +Transforms = Literal['middle-out'] +"""Available messages transforms for OpenRouter models with limited token windows. + +Currently only supports 'middle-out', but is expected to grow in the future. +""" + + +class OpenRouterPreferences(TypedDict, total=False): + """Represents the 'Provider' object from the OpenRouter API.""" + + order: list[OpenRouterSlug] + """List of provider slugs to try in order (e.g. ["anthropic", "openai"]). [See details](https://openrouter.ai/docs/features/provider-routing#ordering-specific-providers)""" + + allow_fallbacks: bool + """Whether to allow backup providers when the primary is unavailable. [See details](https://openrouter.ai/docs/features/provider-routing#disabling-fallbacks)""" + + require_parameters: bool + """Only use providers that support all parameters in your request.""" + + data_collection: Literal['allow', 'deny'] + """Control whether to use providers that may store data. [See details](https://openrouter.ai/docs/features/provider-routing#requiring-providers-to-comply-with-data-policies)""" + + zdr: bool + """Restrict routing to only ZDR (Zero Data Retention) endpoints. [See details](https://openrouter.ai/docs/features/provider-routing#zero-data-retention-enforcement)""" + + only: list[OpenRouterSlug] + """List of provider slugs to allow for this request. [See details](https://openrouter.ai/docs/features/provider-routing#allowing-only-specific-providers)""" + + ignore: list[str] + """List of provider slugs to skip for this request. [See details](https://openrouter.ai/docs/features/provider-routing#ignoring-providers)""" + + quantizations: list[Literal['int4', 'int8', 'fp4', 'fp6', 'fp8', 'fp16', 'bf16', 'fp32', 'unknown']] + """List of quantization levels to filter by (e.g. ["int4", "int8"]). [See details](https://openrouter.ai/docs/features/provider-routing#quantization)""" + + sort: Literal['price', 'throughput', 'latency'] + """Sort providers by price or throughput. (e.g. "price" or "throughput"). [See details](https://openrouter.ai/docs/features/provider-routing#provider-sorting)""" + + max_price: OpenRouterMaxprice + """The maximum pricing you want to pay for this request. [See details](https://openrouter.ai/docs/features/provider-routing#max-price)""" + + +class OpenRouterReasoning(TypedDict, total=False): + """Configuration for reasoning tokens in OpenRouter requests. + + Reasoning tokens allow models to show their step-by-step thinking process. + You can configure this using either OpenAI-style effort levels or Anthropic-style + token limits, but not both simultaneously. + """ + + effort: Literal['high', 'medium', 'low'] + """OpenAI-style reasoning effort level. Cannot be used with max_tokens.""" + + max_tokens: int + """Anthropic-style specific token limit for reasoning. Cannot be used with effort.""" + + exclude: bool + """Whether to exclude reasoning tokens from the response. Default is False. All models support this.""" + + enabled: bool + """Whether to enable reasoning with default parameters. Default is inferred from effort or max_tokens.""" + + +class OpenRouterModelSettings(ModelSettings, total=False): + """Settings used for an OpenRouter model request.""" + + # ALL FIELDS MUST BE `openrouter_` PREFIXED SO YOU CAN MERGE THEM WITH OTHER MODELS. + + openrouter_models: list[str] + """A list of fallback models. + + These models will be tried, in order, if the main model returns an error. [See details](https://openrouter.ai/docs/features/model-routing#the-models-parameter) + """ + + openrouter_preferences: OpenRouterPreferences + """OpenRouter routes requests to the best available providers for your model. By default, requests are load balanced across the top providers to maximize uptime. + + You can customize how your requests are routed using the provider object. [See more](https://openrouter.ai/docs/features/provider-routing)""" + + openrouter_preset: str + """Presets allow you to separate your LLM configuration from your code. + + Create and manage presets through the OpenRouter web application to control provider routing, model selection, system prompts, and other parameters, then reference them in OpenRouter API requests. [See more](https://openrouter.ai/docs/features/presets)""" + + openrouter_transforms: list[Transforms] + """To help with prompts that exceed the maximum context size of a model. + + Transforms work by removing or truncating messages from the middle of the prompt, until the prompt fits within the model's context window. [See more](https://openrouter.ai/docs/features/message-transforms) + """ + + openrouter_reasoning: OpenRouterReasoning + """To control the reasoning tokens in the request. + + The reasoning config object consolidates settings for controlling reasoning strength across different models. [See more](https://openrouter.ai/docs/use-cases/reasoning-tokens) + """ + + +def _openrouter_settings_to_openai_settings(model_settings: OpenRouterModelSettings) -> OpenAIChatModelSettings: + """Transforms a 'OpenRouterModelSettings' object into an 'OpenAIChatModelSettings' object. + + Args: + model_settings: The 'OpenRouterModelSettings' object to transform. + + Returns: + An 'OpenAIChatModelSettings' object with equivalent settings. + """ + extra_body: dict[str, Any] = {} + + if models := model_settings.get('openrouter_models'): + extra_body['models'] = models + if provider := model_settings.get('openrouter_preferences'): + extra_body['provider'] = provider + if preset := model_settings.get('openrouter_preset'): + extra_body['preset'] = preset + if transforms := model_settings.get('openrouter_transforms'): + extra_body['transforms'] = transforms + + base_keys = ModelSettings.__annotations__.keys() + base_data: dict[str, Any] = {k: model_settings[k] for k in base_keys if k in model_settings} + + new_settings = OpenAIChatModelSettings(**base_data, extra_body=extra_body) + + return new_settings + + +class OpenRouterModel(OpenAIChatModel): + """Extends OpenAIModel to capture extra metadata for Openrouter.""" + + def __init__( + self, + model_name: str, + *, + provider: Literal['openrouter'] | Provider[AsyncOpenAI] = 'openrouter', + profile: ModelProfileSpec | None = None, + settings: ModelSettings | None = None, + ): + """Initialize an OpenRouter model. + + Args: + model_name: The name of the model to use. + provider: The provider to use for authentication and API access. Currently, uses OpenAI as the internal client. Can be either the string + 'openrouter' or an instance of `Provider[AsyncOpenAI]`. If not provided, a new provider will be + created using the other parameters. + profile: The model profile to use. Defaults to a profile picked by the provider based on the model name. + settings: Model-specific settings that will be used as defaults for this model. + """ + super().__init__(model_name, provider=provider, profile=profile, settings=settings) + + def prepare_request( + self, + model_settings: ModelSettings | None, + model_request_parameters: ModelRequestParameters, + ) -> tuple[ModelSettings | None, ModelRequestParameters]: + merged_settings, customized_parameters = super().prepare_request(model_settings, model_request_parameters) + new_settings = _openrouter_settings_to_openai_settings(cast(OpenRouterModelSettings, merged_settings or {})) + return new_settings, customized_parameters + + def _process_response(self, response: ChatCompletion | str) -> ModelResponse: + model_response = super()._process_response(response=response) + response = cast(ChatCompletion, response) # If above did not raise an error, we can assume response != str + + provider_details: dict[str, str] = {} + + if openrouter_provider := getattr(response, 'provider', None): # pragma: lax no cover + provider_details['downstream_provider'] = openrouter_provider + + model_response.provider_details = provider_details + + return model_response diff --git a/pydantic_ai_slim/pydantic_ai/providers/openrouter.py b/pydantic_ai_slim/pydantic_ai/providers/openrouter.py index 33745ada29..d54ad6f343 100644 --- a/pydantic_ai_slim/pydantic_ai/providers/openrouter.py +++ b/pydantic_ai_slim/pydantic_ai/providers/openrouter.py @@ -81,6 +81,12 @@ def __init__(self, *, api_key: str) -> None: ... @overload def __init__(self, *, api_key: str, http_client: httpx.AsyncClient) -> None: ... + @overload + def __init__(self, *, api_key: str, http_referer: str, x_title: str) -> None: ... + + @overload + def __init__(self, *, api_key: str, http_referer: str, x_title: str, http_client: httpx.AsyncClient) -> None: ... + @overload def __init__(self, *, openai_client: AsyncOpenAI | None = None) -> None: ... @@ -88,6 +94,8 @@ def __init__( self, *, api_key: str | None = None, + http_referer: str | None = None, + x_title: str | None = None, openai_client: AsyncOpenAI | None = None, http_client: httpx.AsyncClient | None = None, ) -> None: @@ -98,10 +106,20 @@ def __init__( 'to use the OpenRouter provider.' ) + attribution_headers: dict[str, str] = {} + if http_referer := http_referer or os.getenv('OPENROUTER_HTTP_REFERER'): + attribution_headers['HTTP-Referer'] = http_referer + if x_title := x_title or os.getenv('OPENROUTER_X_TITLE'): + attribution_headers['X-Title'] = x_title + if openai_client is not None: self._client = openai_client elif http_client is not None: - self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client) + self._client = AsyncOpenAI( + base_url=self.base_url, api_key=api_key, http_client=http_client, default_headers=attribution_headers + ) else: http_client = cached_async_http_client(provider='openrouter') - self._client = AsyncOpenAI(base_url=self.base_url, api_key=api_key, http_client=http_client) + self._client = AsyncOpenAI( + base_url=self.base_url, api_key=api_key, http_client=http_client, default_headers=attribution_headers + ) diff --git a/tests/models/cassettes/test_openrouter/test_openrouter_errors_raised.yaml b/tests/models/cassettes/test_openrouter/test_openrouter_errors_raised.yaml new file mode 100644 index 0000000000..dacb9f72c9 --- /dev/null +++ b/tests/models/cassettes/test_openrouter/test_openrouter_errors_raised.yaml @@ -0,0 +1,161 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '158' + content-type: + - application/json + host: + - openrouter.ai + method: POST + parsed_body: + messages: + - content: Be helpful. + role: system + - content: Tell me a joke. + role: user + model: google/gemini-2.0-flash-exp:free + stream: false + uri: https://openrouter.ai/api/v1/chat/completions + response: + headers: + access-control-allow-origin: + - '*' + connection: + - keep-alive + content-type: + - application/json + permissions-policy: + - payment=(self "https://checkout.stripe.com" "https://connect-js.stripe.com" "https://js.stripe.com" "https://*.js.stripe.com" + "https://hooks.stripe.com") + referrer-policy: + - no-referrer, strict-origin-when-cross-origin + transfer-encoding: + - chunked + vary: + - Accept-Encoding + parsed_body: + error: + code: 429 + message: Provider returned error + metadata: + provider_name: Google + raw: 'google/gemini-2.0-flash-exp:free is temporarily rate-limited upstream. Please retry shortly, or add your own + key to accumulate your rate limits: https://openrouter.ai/settings/integrations' + user_id: user_2wT5ElBE4Es3R4QrNLpZiXICmQP + status: + code: 429 + message: Too Many Requests +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '158' + content-type: + - application/json + host: + - openrouter.ai + method: POST + parsed_body: + messages: + - content: Be helpful. + role: system + - content: Tell me a joke. + role: user + model: google/gemini-2.0-flash-exp:free + stream: false + uri: https://openrouter.ai/api/v1/chat/completions + response: + headers: + access-control-allow-origin: + - '*' + connection: + - keep-alive + content-type: + - application/json + permissions-policy: + - payment=(self "https://checkout.stripe.com" "https://connect-js.stripe.com" "https://js.stripe.com" "https://*.js.stripe.com" + "https://hooks.stripe.com") + referrer-policy: + - no-referrer, strict-origin-when-cross-origin + transfer-encoding: + - chunked + vary: + - Accept-Encoding + parsed_body: + error: + code: 429 + message: Provider returned error + metadata: + provider_name: Google + raw: 'google/gemini-2.0-flash-exp:free is temporarily rate-limited upstream. Please retry shortly, or add your own + key to accumulate your rate limits: https://openrouter.ai/settings/integrations' + user_id: user_2wT5ElBE4Es3R4QrNLpZiXICmQP + status: + code: 429 + message: Too Many Requests +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '158' + content-type: + - application/json + host: + - openrouter.ai + method: POST + parsed_body: + messages: + - content: Be helpful. + role: system + - content: Tell me a joke. + role: user + model: google/gemini-2.0-flash-exp:free + stream: false + uri: https://openrouter.ai/api/v1/chat/completions + response: + headers: + access-control-allow-origin: + - '*' + connection: + - keep-alive + content-type: + - application/json + permissions-policy: + - payment=(self "https://checkout.stripe.com" "https://connect-js.stripe.com" "https://js.stripe.com" "https://*.js.stripe.com" + "https://hooks.stripe.com") + referrer-policy: + - no-referrer, strict-origin-when-cross-origin + transfer-encoding: + - chunked + vary: + - Accept-Encoding + parsed_body: + error: + code: 429 + message: Provider returned error + metadata: + provider_name: Google + raw: 'google/gemini-2.0-flash-exp:free is temporarily rate-limited upstream. Please retry shortly, or add your own + key to accumulate your rate limits: https://openrouter.ai/settings/integrations' + user_id: user_2wT5ElBE4Es3R4QrNLpZiXICmQP + status: + code: 429 + message: Too Many Requests +version: 1 diff --git a/tests/models/cassettes/test_openrouter/test_openrouter_with_native_options.yaml b/tests/models/cassettes/test_openrouter/test_openrouter_with_native_options.yaml new file mode 100644 index 0000000000..b073b87179 --- /dev/null +++ b/tests/models/cassettes/test_openrouter/test_openrouter_with_native_options.yaml @@ -0,0 +1,82 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '193' + content-type: + - application/json + host: + - openrouter.ai + method: POST + parsed_body: + messages: + - content: Who are you + role: user + model: google/gemini-2.0-flash-exp:free + models: + - x-ai/grok-4 + provider: + only: + - xai + stream: false + transforms: + - middle-out + uri: https://openrouter.ai/api/v1/chat/completions + response: + headers: + access-control-allow-origin: + - '*' + connection: + - keep-alive + content-length: + - '1067' + content-type: + - application/json + permissions-policy: + - payment=(self "https://checkout.stripe.com" "https://connect-js.stripe.com" "https://js.stripe.com" "https://*.js.stripe.com" + "https://hooks.stripe.com") + referrer-policy: + - no-referrer, strict-origin-when-cross-origin + transfer-encoding: + - chunked + vary: + - Accept-Encoding + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + content: |- + I'm Grok, a helpful and maximally truthful AI built by xAI. I'm not based on any other companies' models—instead, I'm inspired by the Hitchhiker's Guide to the Galaxy and JARVIS from Iron Man. My goal is to assist with questions, provide information, and maybe crack a joke or two along the way. + + What can I help you with today? + reasoning: null + refusal: null + role: assistant + native_finish_reason: stop + created: 1759509677 + id: gen-1759509677-MpJiZ3ZkiGU3lnbM8QKo + model: x-ai/grok-4 + object: chat.completion + provider: xAI + system_fingerprint: fp_19e21a36c0 + usage: + completion_tokens: 240 + completion_tokens_details: + reasoning_tokens: 165 + prompt_tokens: 687 + prompt_tokens_details: + audio_tokens: 0 + cached_tokens: 682 + total_tokens: 927 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/cassettes/test_openrouter/test_openrouter_with_preset.yaml b/tests/models/cassettes/test_openrouter/test_openrouter_with_preset.yaml new file mode 100644 index 0000000000..bd85de5b07 --- /dev/null +++ b/tests/models/cassettes/test_openrouter/test_openrouter_with_preset.yaml @@ -0,0 +1,75 @@ +interactions: +- request: + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - '131' + content-type: + - application/json + host: + - openrouter.ai + method: POST + parsed_body: + messages: + - content: Trains + role: user + model: google/gemini-2.5-flash-lite + preset: '@preset/comedian' + stream: false + uri: https://openrouter.ai/api/v1/chat/completions + response: + headers: + access-control-allow-origin: + - '*' + connection: + - keep-alive + content-length: + - '617' + content-type: + - application/json + permissions-policy: + - payment=(self "https://checkout.stripe.com" "https://connect-js.stripe.com" "https://js.stripe.com" "https://*.js.stripe.com" + "https://hooks.stripe.com") + referrer-policy: + - no-referrer, strict-origin-when-cross-origin + transfer-encoding: + - chunked + vary: + - Accept-Encoding + parsed_body: + choices: + - finish_reason: stop + index: 0 + logprobs: null + message: + content: |- + Why did the train break up with the track? + + Because it felt like their relationship was going nowhere. + reasoning: null + refusal: null + role: assistant + native_finish_reason: STOP + created: 1759510642 + id: gen-1759510642-J9qupM2EtKoYTfG7ehDn + model: google/gemini-2.5-flash-lite + object: chat.completion + provider: Google + usage: + completion_tokens: 21 + completion_tokens_details: + image_tokens: 0 + reasoning_tokens: 0 + prompt_tokens: 31 + prompt_tokens_details: + cached_tokens: 0 + total_tokens: 52 + status: + code: 200 + message: OK +version: 1 diff --git a/tests/models/test_openrouter.py b/tests/models/test_openrouter.py new file mode 100644 index 0000000000..2074bae30f --- /dev/null +++ b/tests/models/test_openrouter.py @@ -0,0 +1,67 @@ +from typing import cast + +import pytest +from inline_snapshot import snapshot + +from pydantic_ai import Agent, ModelHTTPError, ModelRequest, TextPart +from pydantic_ai.direct import model_request + +from ..conftest import try_import + +with try_import() as imports_successful: + from pydantic_ai.models.openrouter import OpenRouterModel, OpenRouterModelSettings + from pydantic_ai.providers.openrouter import OpenRouterProvider + +pytestmark = [ + pytest.mark.skipif(not imports_successful(), reason='openai not installed'), + pytest.mark.vcr, + pytest.mark.anyio, +] + + +async def test_openrouter_with_preset(allow_model_requests: None, openrouter_api_key: str) -> None: + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = OpenRouterModel('google/gemini-2.5-flash-lite', provider=provider) + settings = OpenRouterModelSettings(openrouter_preset='@preset/comedian') + response = await model_request(model, [ModelRequest.user_text_prompt('Trains')], model_settings=settings) + text_part = cast(TextPart, response.parts[0]) + assert text_part.content == snapshot( + """\ +Why did the train break up with the track? + +Because it felt like their relationship was going nowhere.\ +""" + ) + + +async def test_openrouter_with_native_options(allow_model_requests: None, openrouter_api_key: str) -> None: + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = OpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) + # These specific settings will force OpenRouter to use the fallback model, since Gemini is not available via the xAI provider. + settings = OpenRouterModelSettings( + openrouter_models=['x-ai/grok-4'], + openrouter_transforms=['middle-out'], + openrouter_preferences={'only': ['xai']}, + ) + response = await model_request(model, [ModelRequest.user_text_prompt('Who are you')], model_settings=settings) + text_part = cast(TextPart, response.parts[0]) + assert text_part.content == snapshot( + """\ +I'm Grok, a helpful and maximally truthful AI built by xAI. I'm not based on any other companies' models—instead, I'm inspired by the Hitchhiker's Guide to the Galaxy and JARVIS from Iron Man. My goal is to assist with questions, provide information, and maybe crack a joke or two along the way. + +What can I help you with today?\ +""" + ) + assert response.provider_details is not None + assert response.provider_details['downstream_provider'] == 'xAI' + + +async def test_openrouter_errors_raised(allow_model_requests: None, openrouter_api_key: str) -> None: + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = OpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) + agent = Agent(model, instructions='Be helpful.', retries=1) + with pytest.raises(ModelHTTPError) as exc_info: + await agent.run('Tell me a joke.') + assert str(exc_info.value) == snapshot( + "status_code: 429, model_name: google/gemini-2.0-flash-exp:free, body: {'code': 429, 'message': 'Provider returned error', 'metadata': {'provider_name': 'Google', 'raw': 'google/gemini-2.0-flash-exp:free is temporarily rate-limited upstream. Please retry shortly, or add your own key to accumulate your rate limits: https://openrouter.ai/settings/integrations'}}" + ) diff --git a/tests/providers/test_openrouter.py b/tests/providers/test_openrouter.py index acdf166c50..a070b936b7 100644 --- a/tests/providers/test_openrouter.py +++ b/tests/providers/test_openrouter.py @@ -25,7 +25,7 @@ with try_import() as imports_successful: import openai - from pydantic_ai.models.openai import OpenAIChatModel + from pydantic_ai.models.openrouter import OpenRouterModel from pydantic_ai.providers.openrouter import OpenRouterProvider @@ -44,6 +44,16 @@ def test_openrouter_provider(): assert provider.client.api_key == 'api-key' +def test_openrouter_provider_with_app_attribution(): + provider = OpenRouterProvider(api_key='api-key', http_referer='test.com', x_title='test') + assert provider.name == 'openrouter' + assert provider.base_url == 'https://openrouter.ai/api/v1' + assert isinstance(provider.client, openai.AsyncOpenAI) + assert provider.client.api_key == 'api-key' + assert provider.client.default_headers['X-Title'] == 'test' + assert provider.client.default_headers['HTTP-Referer'] == 'test.com' + + def test_openrouter_provider_need_api_key(env: TestEnv) -> None: env.remove('OPENROUTER_API_KEY') with pytest.raises( @@ -70,7 +80,7 @@ def test_openrouter_pass_openai_client() -> None: async def test_openrouter_with_google_model(allow_model_requests: None, openrouter_api_key: str) -> None: provider = OpenRouterProvider(api_key=openrouter_api_key) - model = OpenAIChatModel('google/gemini-2.0-flash-exp:free', provider=provider) + model = OpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) agent = Agent(model, instructions='Be helpful.') response = await agent.run('Tell me a joke.') assert response.output == snapshot("""\