Skip to content

Commit e6d8cee

Browse files
authored
Switch to AsyncAnthropic client (#12)
Fixes #9 The sync `Anthropic` client was being used inside `async` FastAPI routes, blocking the event loop during API calls. As @Alc-Alc correctly identified, this would affect throughput under concurrent requests. ### Changes - `app/explain.py`: `Anthropic` → `AsyncAnthropic`, `await client.messages.create()` - `app/main.py`: `Anthropic()` → `AsyncAnthropic()` in lifespan - `app/test_explain.py`: `MagicMock` → `AsyncMock` for the messages.create mock All 91 tests pass, live smoke test confirmed working. *(I'm Molty, an AI assistant acting on behalf of @mattgodbolt)*
2 parents 1473117 + 92c19df commit e6d8cee

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

app/explain.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import logging
22

3-
from anthropic import Anthropic
3+
from anthropic import AsyncAnthropic
44

55
from app.cache import CacheProvider, cache_response, get_cached_response
66
from app.explain_api import CostBreakdown, ExplainRequest, ExplainResponse, TokenUsage
@@ -19,7 +19,7 @@
1919

2020
async def process_request(
2121
body: ExplainRequest,
22-
client: Anthropic,
22+
client: AsyncAnthropic,
2323
prompt: Prompt,
2424
metrics_provider: MetricsProvider,
2525
cache_provider: CacheProvider | None = None,
@@ -31,7 +31,7 @@ async def process_request(
3131
3232
Args:
3333
body: The request body as a Pydantic model
34-
client: Anthropic client instance
34+
client: AsyncAnthropic client instance
3535
prompt: Prompt instance for generating messages
3636
metrics_provider: metrics provider for tracking stats
3737
cache_provider: cache provider for storing/retrieving responses
@@ -69,7 +69,7 @@ async def process_request(
6969

7070
async def _call_anthropic_api(
7171
body: ExplainRequest,
72-
client: Anthropic,
72+
client: AsyncAnthropic,
7373
prompt: Prompt,
7474
metrics_provider: MetricsProvider,
7575
) -> ExplainResponse:
@@ -92,7 +92,7 @@ async def _call_anthropic_api(
9292
# Call Claude API
9393
LOGGER.info("Using Anthropic client with model: %s", {prompt_data["model"]})
9494

95-
message = client.messages.create(
95+
message = await client.messages.create(
9696
model=prompt_data["model"],
9797
max_tokens=prompt_data["max_tokens"],
9898
temperature=prompt_data["temperature"],

app/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from contextlib import asynccontextmanager
33
from pathlib import Path
44

5-
from anthropic import Anthropic
5+
from anthropic import AsyncAnthropic
66
from anthropic import __version__ as anthropic_version
77
from fastapi import FastAPI, Request
88
from fastapi.middleware.cors import CORSMiddleware
@@ -42,7 +42,7 @@ async def lifespan(app: FastAPI):
4242

4343
# Store shared resources in app.state
4444
app.state.settings = settings
45-
app.state.anthropic_client = Anthropic(api_key=settings.anthropic_api_key)
45+
app.state.anthropic_client = AsyncAnthropic(api_key=settings.anthropic_api_key)
4646

4747
# Load the prompt configuration
4848
prompt_config_path = Path(__file__).parent / "prompt.yaml"

app/test_explain.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import json
22
from pathlib import Path
3-
from unittest.mock import MagicMock
3+
from unittest.mock import AsyncMock, MagicMock
44

55
import pytest
66

@@ -65,7 +65,7 @@ def mock_anthropic_client():
6565
mock_message.usage.input_tokens = 100
6666
mock_message.usage.output_tokens = 50
6767

68-
mock_client.messages.create.return_value = mock_message
68+
mock_client.messages.create = AsyncMock(return_value=mock_message)
6969
return mock_client
7070

7171

0 commit comments

Comments
 (0)