|
| 1 | +"""Tests of pydantic-ai actually connecting to OpenAI and Gemini models. |
| 2 | +
|
| 3 | +WARNING: running these tests will consume your OpenAI and Gemini credits. |
| 4 | +""" |
| 5 | + |
| 6 | +import os |
| 7 | + |
| 8 | +import httpx |
| 9 | +import pytest |
| 10 | +from pydantic import BaseModel |
| 11 | + |
| 12 | +from pydantic_ai import Agent |
| 13 | +from pydantic_ai.models.gemini import GeminiModel |
| 14 | +from pydantic_ai.models.openai import OpenAIModel |
| 15 | + |
| 16 | +pytestmark = [ |
| 17 | + pytest.mark.skipif(os.getenv('PYDANTIC_AI_LIVE_TEST_DANGEROUS') != 'CHARGE-ME!', reason='live tests disabled'), |
| 18 | + pytest.mark.anyio, |
| 19 | +] |
| 20 | + |
| 21 | + |
| 22 | +@pytest.fixture |
| 23 | +async def http_client(): |
| 24 | + async with httpx.AsyncClient(timeout=30) as client: |
| 25 | + yield client |
| 26 | + |
| 27 | + |
| 28 | +async def test_openai(http_client: httpx.AsyncClient): |
| 29 | + agent = Agent(OpenAIModel('gpt-3.5-turbo', http_client=http_client)) |
| 30 | + result = await agent.run('What is the capital of France?') |
| 31 | + print('OpenAI response:', result.data) |
| 32 | + assert 'paris' in result.data.lower() |
| 33 | + print('OpenAI cost:', result.cost()) |
| 34 | + cost = result.cost() |
| 35 | + assert cost.total_tokens is not None and cost.total_tokens > 0 |
| 36 | + |
| 37 | + |
| 38 | +async def test_openai_stream(http_client: httpx.AsyncClient): |
| 39 | + agent = Agent(OpenAIModel('gpt-3.5-turbo', http_client=http_client)) |
| 40 | + async with agent.run_stream('What is the capital of France?') as result: |
| 41 | + data = await result.get_data() |
| 42 | + print('OpenAI stream response:', data) |
| 43 | + assert 'paris' in data.lower() |
| 44 | + print('OpenAI stream cost:', result.cost()) |
| 45 | + cost = result.cost() |
| 46 | + assert cost.total_tokens is not None and cost.total_tokens > 0 |
| 47 | + |
| 48 | + |
| 49 | +class MyModel(BaseModel): |
| 50 | + city: str |
| 51 | + |
| 52 | + |
| 53 | +async def test_openai_structured(http_client: httpx.AsyncClient): |
| 54 | + agent = Agent(OpenAIModel('gpt-4o-mini', http_client=http_client), result_type=MyModel) |
| 55 | + result = await agent.run('What is the capital of the UK?') |
| 56 | + print('OpenAI structured response:', result.data) |
| 57 | + assert result.data.city.lower() == 'london' |
| 58 | + print('OpenAI structured cost:', result.cost()) |
| 59 | + cost = result.cost() |
| 60 | + assert cost.total_tokens is not None and cost.total_tokens > 0 |
| 61 | + |
| 62 | + |
| 63 | +async def test_gemini(http_client: httpx.AsyncClient): |
| 64 | + agent = Agent(GeminiModel('gemini-1.5-flash', http_client=http_client)) |
| 65 | + result = await agent.run('What is the capital of France?') |
| 66 | + print('Gemini response:', result.data) |
| 67 | + assert 'paris' in result.data.lower() |
| 68 | + print('Gemini cost:', result.cost()) |
| 69 | + cost = result.cost() |
| 70 | + assert cost.total_tokens is not None and cost.total_tokens > 0 |
| 71 | + |
| 72 | + |
| 73 | +async def test_gemini_stream(http_client: httpx.AsyncClient): |
| 74 | + agent = Agent(GeminiModel('gemini-1.5-pro', http_client=http_client)) |
| 75 | + async with agent.run_stream('What is the capital of France?') as result: |
| 76 | + data = await result.get_data() |
| 77 | + print('Gemini stream response:', data) |
| 78 | + assert 'paris' in data.lower() |
| 79 | + print('Gemini stream cost:', result.cost()) |
| 80 | + cost = result.cost() |
| 81 | + assert cost.total_tokens is not None and cost.total_tokens > 0 |
| 82 | + |
| 83 | + |
| 84 | +async def test_gemini_structured(http_client: httpx.AsyncClient): |
| 85 | + agent = Agent(GeminiModel('gemini-1.5-pro', http_client=http_client), result_type=MyModel) |
| 86 | + result = await agent.run('What is the capital of the UK?') |
| 87 | + print('Gemini structured response:', result.data) |
| 88 | + assert result.data.city.lower() == 'london' |
| 89 | + print('Gemini structured cost:', result.cost()) |
| 90 | + cost = result.cost() |
| 91 | + assert cost.total_tokens is not None and cost.total_tokens > 0 |
0 commit comments