Skip to content

Commit 80fa92d

Browse files
authored
Add anthropic api (#1202)
* update * update anthropic
1 parent 4212b2f commit 80fa92d

File tree

5 files changed

+813
-0
lines changed

5 files changed

+813
-0
lines changed

evalscope/constants.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,8 @@ class EvalType:
7272
SERVICE = 'openai_api' # model service
7373
TEXT2IMAGE = 'text2image' # image generation service
7474
IMAGE_EDITING = 'image_editing' # image editing service
75+
OPENAI_API = 'openai_api'
76+
ANTHROPIC_API = 'anthropic_api'
7577

7678

7779
class OutputType:
Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
import os
2+
from anthropic import Anthropic, APIStatusError, BadRequestError, PermissionDeniedError
3+
from anthropic.types import Message
4+
from typing import Any, Dict, List, Optional, Tuple, Union
5+
6+
from evalscope.api.messages import ChatMessage
7+
from evalscope.api.model import ChatCompletionChoice, GenerateConfig, ModelAPI, ModelOutput
8+
from evalscope.api.tool import ToolChoice, ToolInfo
9+
from evalscope.utils import get_logger
10+
from evalscope.utils.function_utils import retry_call
11+
from .utils.anthropic import (
12+
anthropic_chat_messages,
13+
anthropic_chat_tool_choice,
14+
anthropic_chat_tools,
15+
anthropic_completion_params,
16+
anthropic_handle_bad_request,
17+
chat_choices_from_anthropic,
18+
collect_stream_response,
19+
model_output_from_anthropic,
20+
)
21+
22+
logger = get_logger()
23+
24+
25+
class AnthropicCompatibleAPI(ModelAPI):
26+
"""Anthropic API compatible model implementation.
27+
28+
This class provides a compatible interface for interacting with Anthropic's
29+
Claude models via their official API.
30+
"""
31+
32+
def __init__(
33+
self,
34+
model_name: str,
35+
base_url: Optional[str] = None,
36+
api_key: Optional[str] = None,
37+
config: GenerateConfig = GenerateConfig(),
38+
**model_args: Any,
39+
) -> None:
40+
super().__init__(
41+
model_name=model_name,
42+
base_url=base_url,
43+
api_key=api_key,
44+
config=config,
45+
)
46+
47+
# Use service prefix to lookup api_key
48+
self.api_key = api_key or os.environ.get('ANTHROPIC_API_KEY', None) or os.environ.get('EVALSCOPE_API_KEY', None)
49+
assert self.api_key, f'API key for {model_name} not found. Set ANTHROPIC_API_KEY or EVALSCOPE_API_KEY.'
50+
51+
# Use service prefix to lookup base_url (optional for Anthropic)
52+
self.base_url = base_url or os.environ.get('ANTHROPIC_BASE_URL',
53+
None) or os.environ.get('EVALSCOPE_BASE_URL', None)
54+
55+
# Remove trailing slash from base_url if present
56+
if self.base_url:
57+
self.base_url = self.base_url.rstrip('/')
58+
# Anthropic SDK automatically appends /v1/messages, so we need to remove /v1 suffix
59+
# to avoid double /v1 in the URL (e.g., /v1/v1/messages)
60+
if self.base_url.endswith('/v1'):
61+
self.base_url = self.base_url[:-3]
62+
63+
# Create Anthropic client
64+
client_kwargs: Dict[str, Any] = {
65+
'api_key': self.api_key,
66+
**model_args,
67+
}
68+
if self.base_url:
69+
client_kwargs['base_url'] = self.base_url
70+
71+
self.client = Anthropic(**client_kwargs)
72+
73+
def generate(
74+
self,
75+
input: List[ChatMessage],
76+
tools: List[ToolInfo],
77+
tool_choice: ToolChoice,
78+
config: GenerateConfig,
79+
) -> ModelOutput:
80+
"""Generate a response from the Anthropic API.
81+
82+
Args:
83+
input: List of chat messages forming the conversation.
84+
tools: List of available tools for the model to use.
85+
tool_choice: How the model should choose which tool to use.
86+
config: Generation configuration parameters.
87+
88+
Returns:
89+
ModelOutput containing the model's response.
90+
"""
91+
# Setup request for logging/debugging
92+
request: Dict[str, Any] = {}
93+
response: Dict[str, Any] = {}
94+
95+
tools, tool_choice, config = self.resolve_tools(tools, tool_choice, config)
96+
97+
# Build completion parameters
98+
completion_params = self.completion_params(config)
99+
100+
# Convert messages to Anthropic format
101+
system_message, messages = anthropic_chat_messages(input)
102+
103+
# Build request
104+
request = dict(
105+
messages=messages,
106+
**completion_params,
107+
)
108+
109+
# Add system message if present
110+
if system_message:
111+
request['system'] = system_message
112+
113+
# Add tools if present
114+
if len(tools) > 0:
115+
request['tools'] = anthropic_chat_tools(tools)
116+
request['tool_choice'] = anthropic_chat_tool_choice(tool_choice)
117+
118+
# Handle streaming
119+
if config.stream:
120+
request['stream'] = True
121+
122+
try:
123+
# Generate completion
124+
message = retry_call(
125+
self.client.messages.create,
126+
retries=config.retries,
127+
sleep_interval=config.retry_interval,
128+
**request,
129+
)
130+
131+
# Handle streaming response
132+
if not isinstance(message, Message):
133+
message = collect_stream_response(message)
134+
135+
response = message.model_dump()
136+
self.on_response(response)
137+
138+
# Return output
139+
choices = self.chat_choices_from_message(message, tools)
140+
return model_output_from_anthropic(message, choices)
141+
142+
except (BadRequestError, PermissionDeniedError) as ex:
143+
return self.handle_bad_request(ex)
144+
145+
def resolve_tools(self, tools: List[ToolInfo], tool_choice: ToolChoice,
146+
config: GenerateConfig) -> Tuple[List[ToolInfo], ToolChoice, GenerateConfig]:
147+
"""Provides an opportunity for concrete classes to customize tool resolution."""
148+
return tools, tool_choice, config
149+
150+
def completion_params(self, config: GenerateConfig) -> Dict[str, Any]:
151+
"""Build Anthropic completion parameters from config."""
152+
return anthropic_completion_params(
153+
model=self.model_name,
154+
config=config,
155+
)
156+
157+
def on_response(self, response: Dict[str, Any]) -> None:
158+
"""Hook for subclasses to do custom response handling."""
159+
pass
160+
161+
def chat_choices_from_message(self, message: Message, tools: List[ToolInfo]) -> List[ChatCompletionChoice]:
162+
"""Hook for subclasses to do custom chat choice processing."""
163+
return chat_choices_from_anthropic(message, tools)
164+
165+
def handle_bad_request(self, ex: APIStatusError) -> Union[ModelOutput, Exception]:
166+
"""Hook for subclasses to do bad request handling."""
167+
result = anthropic_handle_bad_request(self.model_name, ex)
168+
if isinstance(result, Exception):
169+
raise result
170+
return result

evalscope/models/model_apis.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,15 @@ def openai_api() -> type[ModelAPI]:
1818
return OpenAICompatibleAPI
1919

2020

21+
@register_model_api(name='anthropic_api')
22+
def anthropic_api() -> type[ModelAPI]:
23+
check_import('anthropic', package='anthropic', raise_error=True, feature_name='anthropic_api')
24+
25+
from .anthropic_compatible import AnthropicCompatibleAPI
26+
27+
return AnthropicCompatibleAPI
28+
29+
2130
@register_model_api(name='server')
2231
@deprecated(since='1.0.0', remove_in='1.1.0', alternative='openai_api')
2332
def server() -> type[ModelAPI]:

0 commit comments

Comments
 (0)