Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions src/api/endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ async def create_message(request: ClaudeMessagesRequest, http_request: Request,
request_id = str(uuid.uuid4())

# Convert Claude request to OpenAI format
openai_request = convert_claude_to_openai(request, model_manager)
openai_request, tool_name_mapping = convert_claude_to_openai(request, model_manager)

# Check if client disconnected before processing
if await http_request.is_disconnected():
Expand All @@ -81,6 +81,7 @@ async def create_message(request: ClaudeMessagesRequest, http_request: Request,
http_request,
openai_client,
request_id,
tool_name_mapping,
),
media_type="text/event-stream",
headers={
Expand Down Expand Up @@ -108,7 +109,7 @@ async def create_message(request: ClaudeMessagesRequest, http_request: Request,
openai_request, request_id
)
claude_response = convert_openai_to_claude_response(
openai_response, request
openai_response, request, tool_name_mapping
)
return claude_response
except HTTPException:
Expand Down
163 changes: 141 additions & 22 deletions src/conversion/request_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,21 @@

def convert_claude_to_openai(
claude_request: ClaudeMessagesRequest, model_manager
) -> Dict[str, Any]:
"""Convert Claude API request format to OpenAI format."""
) -> tuple[Dict[str, Any], Dict[str, str]]:
"""Convert Claude API request format to OpenAI format.

Returns:
tuple: (openai_request, tool_name_mapping)
- openai_request: The converted OpenAI format request
- tool_name_mapping: Dict mapping sanitized tool names back to original names
"""

# Map model
openai_model = model_manager.map_claude_model_to_openai(claude_request.model)

# Initialize tool name mapping for reverse lookups
tool_name_mapping = {}

# Convert messages
openai_messages = []

Expand Down Expand Up @@ -93,23 +102,23 @@ def convert_claude_to_openai(
if claude_request.top_p is not None:
openai_request["top_p"] = claude_request.top_p

# Convert tools
# Convert tools based on configuration
if claude_request.tools:
openai_tools = []
for tool in claude_request.tools:
if tool.name and tool.name.strip():
openai_tools.append(
{
"type": Constants.TOOL_FUNCTION,
Constants.TOOL_FUNCTION: {
"name": tool.name,
"description": tool.description or "",
"parameters": tool.input_schema,
},
}
)
if openai_tools:
openai_request["tools"] = openai_tools
if config.tooling_api == "kosong":
# Use Kosong/Kimi tooling format
kimi_tools, kimi_mapping = convert_tools_to_kimi_format_with_mapping(claude_request.tools[:config.max_tools_limit])
if kimi_tools:
openai_request["tools"] = kimi_tools
tool_name_mapping.update(kimi_mapping)
else:
# Use standard OpenAI tooling format
openai_tools = convert_tools_to_openai_format(claude_request.tools[:config.max_tools_limit])
if openai_tools:
openai_request["tools"] = openai_tools
# For OpenAI format, no sanitization needed, so mapping is 1:1
for tool in claude_request.tools[:config.max_tools_limit]:
if tool.name and tool.name.strip():
tool_name_mapping[tool.name] = tool.name

# Convert tool choice
if claude_request.tool_choice:
Expand All @@ -126,14 +135,14 @@ def convert_claude_to_openai(
else:
openai_request["tool_choice"] = "auto"

return openai_request
return openai_request, tool_name_mapping


def convert_claude_user_message(msg: ClaudeMessage) -> Dict[str, Any]:
"""Convert Claude user message to OpenAI format."""
if msg.content is None:
return {"role": Constants.ROLE_USER, "content": ""}

if isinstance(msg.content, str):
return {"role": Constants.ROLE_USER, "content": msg.content}

Expand Down Expand Up @@ -172,20 +181,25 @@ def convert_claude_assistant_message(msg: ClaudeMessage) -> Dict[str, Any]:

if msg.content is None:
return {"role": Constants.ROLE_ASSISTANT, "content": None}

if isinstance(msg.content, str):
return {"role": Constants.ROLE_ASSISTANT, "content": msg.content}

for block in msg.content:
if block.type == Constants.CONTENT_TEXT:
text_parts.append(block.text)
elif block.type == Constants.CONTENT_TOOL_USE:
# Sanitize tool name if using Kimi tooling API
tool_name = block.name
if config.tooling_api == "kosong":
tool_name = sanitize_tool_name_for_kimi(block.name)

tool_calls.append(
{
"id": block.id,
"type": Constants.TOOL_FUNCTION,
Constants.TOOL_FUNCTION: {
"name": block.name,
"name": tool_name,
"arguments": json.dumps(block.input, ensure_ascii=False),
},
}
Expand Down Expand Up @@ -225,6 +239,111 @@ def convert_claude_tool_results(msg: ClaudeMessage) -> List[Dict[str, Any]]:
return tool_messages


def convert_tools_to_openai_format(tools):
"""Convert Claude tools to OpenAI format."""
openai_tools = []
for tool in tools:
if tool.name and tool.name.strip():
openai_tools.append(
{
"type": Constants.TOOL_FUNCTION,
Constants.TOOL_FUNCTION: {
"name": tool.name,
"description": tool.description or "",
"parameters": tool.input_schema,
},
}
)
return openai_tools


def sanitize_tool_name_for_kimi(name):
"""Sanitize tool name for Kimi API requirements.

Kimi requires tool names to:
- Start with a letter
- Contain only letters, numbers, underscores, and dashes
"""
import re

# Remove invalid characters, keep only letters, numbers, underscores, and dashes
sanitized = re.sub(r'[^a-zA-Z0-9_-]', '_', name)

# Replace multiple consecutive underscores with single ones
sanitized = re.sub(r'_+', '_', sanitized)

# Remove leading/trailing underscores
sanitized = sanitized.strip('_')

# Ensure it starts with a letter
if sanitized and not sanitized[0].isalpha():
sanitized = 'tool_' + sanitized

# Ensure it's not empty
if not sanitized:
sanitized = 'unknown_tool'

return sanitized


def convert_tools_to_kimi_format_with_mapping(tools):
"""Convert Claude tools to Kimi/Kosong format and return name mapping.

Returns:
tuple: (kimi_tools, tool_name_mapping)
- kimi_tools: List of tools in Kimi format
- tool_name_mapping: Dict mapping sanitized names back to original names
"""
kimi_tools = []
tool_name_mapping = {}

for tool in tools:
if tool.name and tool.name.strip():
logger.debug(f"Processing tool: {tool.name}")

# Check if this is a Kimi builtin function (starts with $)
if tool.name.startswith("$"):
kimi_tools.append(
{
"type": "builtin_function",
"function": {
"name": tool.name,
# Builtin functions don't need description and parameters
},
}
)
# No sanitization for builtin functions
tool_name_mapping[tool.name] = tool.name
else:
# Sanitize tool name for Kimi API compatibility
sanitized_name = sanitize_tool_name_for_kimi(tool.name)
logger.debug(f"Sanitized tool name: {tool.name} -> {sanitized_name}")

# Store mapping for reverse lookup
tool_name_mapping[sanitized_name] = tool.name

# Use standard OpenAI format for custom tools
kimi_tools.append(
{
"type": Constants.TOOL_FUNCTION,
Constants.TOOL_FUNCTION: {
"name": sanitized_name,
"description": tool.description or "",
"parameters": tool.input_schema,
},
}
)

logger.debug(f"Final kimi_tools: {json.dumps(kimi_tools, indent=2, ensure_ascii=False)}")
logger.debug(f"Tool name mapping: {tool_name_mapping}")
return kimi_tools, tool_name_mapping

def convert_tools_to_kimi_format(tools):
"""Convert Claude tools to Kimi/Kosong format. Legacy function for backward compatibility."""
kimi_tools, _ = convert_tools_to_kimi_format_with_mapping(tools)
return kimi_tools


def parse_tool_result_content(content):
"""Parse and normalize tool result content into a string format."""
if content is None:
Expand Down
41 changes: 34 additions & 7 deletions src/conversion/response_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,15 @@


def convert_openai_to_claude_response(
openai_response: dict, original_request: ClaudeMessagesRequest
openai_response: dict, original_request: ClaudeMessagesRequest, tool_name_mapping: dict[str, str] = None
) -> dict:
"""Convert OpenAI response to Claude format."""
"""Convert OpenAI response to Claude format.

Args:
openai_response: The response from OpenAI API
original_request: The original Claude request
tool_name_mapping: Optional mapping from sanitized tool names back to original names
"""

# Extract response data
choices = openai_response.get("choices", [])
Expand Down Expand Up @@ -36,11 +42,17 @@ def convert_openai_to_claude_response(
except json.JSONDecodeError:
arguments = {"raw_arguments": function_data.get("arguments", "")}

# Get original tool name from mapping if available
sanitized_name = function_data.get("name", "")
original_name = sanitized_name
if tool_name_mapping and sanitized_name in tool_name_mapping:
original_name = tool_name_mapping[sanitized_name]

content_blocks.append(
{
"type": Constants.CONTENT_TOOL_USE,
"id": tool_call.get("id", f"tool_{uuid.uuid4()}"),
"name": function_data.get("name", ""),
"name": original_name,
"input": arguments,
}
)
Expand Down Expand Up @@ -79,7 +91,7 @@ def convert_openai_to_claude_response(


async def convert_openai_streaming_to_claude(
openai_stream, original_request: ClaudeMessagesRequest, logger
openai_stream, original_request: ClaudeMessagesRequest, logger, tool_name_mapping: dict[str, str] = None
):
"""Convert OpenAI streaming response to Claude streaming format."""

Expand Down Expand Up @@ -150,7 +162,12 @@ async def convert_openai_streaming_to_claude(
# Update function name and start content block if we have both id and name
function_data = tc_delta.get(Constants.TOOL_FUNCTION, {})
if function_data.get("name"):
tool_call["name"] = function_data["name"]
sanitized_name = function_data["name"]
# Convert sanitized name back to original if mapping exists
original_name = sanitized_name
if tool_name_mapping and sanitized_name in tool_name_mapping:
original_name = tool_name_mapping[sanitized_name]
tool_call["name"] = original_name

# Start content block when we have complete initial data
if (tool_call["id"] and tool_call["name"] and not tool_call["started"]):
Expand Down Expand Up @@ -220,8 +237,13 @@ async def convert_openai_streaming_to_claude_with_cancellation(
http_request: Request,
openai_client,
request_id: str,
tool_name_mapping: dict[str, str] = None,
):
"""Convert OpenAI streaming response to Claude streaming format with cancellation support."""
"""Convert OpenAI streaming response to Claude streaming format with cancellation support.

Args:
tool_name_mapping: Optional mapping from sanitized tool names back to original names
"""

message_id = f"msg_{uuid.uuid4().hex[:24]}"

Expand Down Expand Up @@ -309,7 +331,12 @@ async def convert_openai_streaming_to_claude_with_cancellation(
# Update function name and start content block if we have both id and name
function_data = tc_delta.get(Constants.TOOL_FUNCTION, {})
if function_data.get("name"):
tool_call["name"] = function_data["name"]
sanitized_name = function_data["name"]
# Convert sanitized name back to original if mapping exists
original_name = sanitized_name
if tool_name_mapping and sanitized_name in tool_name_mapping:
original_name = tool_name_mapping[sanitized_name]
tool_call["name"] = original_name

# Start content block when we have complete initial data
if (tool_call["id"] and tool_call["name"] and not tool_call["started"]):
Expand Down
24 changes: 22 additions & 2 deletions src/core/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,24 @@ def __init__(self, api_key: str, base_url: str, timeout: int = 90, api_version:
"User-Agent": "claude-proxy/1.0.0"
}

# Merge custom headers with default headers
all_headers = {**default_headers, **self.custom_headers}
# Case-insensitive merge of custom headers with default headers
all_headers = {**default_headers}

# Create a mapping of lowercase header names to their original case
default_headers_lower = {name.lower(): name for name in default_headers.keys()}

# Add custom headers with case-insensitive override
for custom_name, custom_value in self.custom_headers.items():
custom_name_lower = custom_name.lower()

# If header exists in defaults (case-insensitive), replace it
if custom_name_lower in default_headers_lower:
original_name = default_headers_lower[custom_name_lower]
all_headers[original_name] = custom_value
else:
# For new headers, use proper HTTP capitalization
capitalized_name = '-'.join(word.capitalize() for word in custom_name.split('-'))
all_headers[capitalized_name] = custom_value

# Detect if using Azure and instantiate the appropriate client
if api_version:
Expand Down Expand Up @@ -50,6 +66,10 @@ async def create_chat_completion(self, request: Dict[str, Any], request_id: Opti
self.active_requests[request_id] = cancel_event

try:
# Add stream_options for Kimi API compatibility
if "stream_options" not in request:
request["stream_options"] = {"include_usage": True}

# Create task that can be cancelled
completion_task = asyncio.create_task(
self.client.chat.completions.create(**request)
Expand Down
4 changes: 4 additions & 0 deletions src/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ def __init__(self):
self.big_model = os.environ.get("BIG_MODEL", "gpt-4o")
self.middle_model = os.environ.get("MIDDLE_MODEL", self.big_model)
self.small_model = os.environ.get("SMALL_MODEL", "gpt-4o-mini")

# Tooling API settings
self.tooling_api = os.environ.get("TOOLING_API", "openai").lower() # "openai" or "kosong"
self.max_tools_limit = int(os.environ.get("MAX_TOOLS_LIMIT", "100")) # Maximum tools to send to API

def validate_api_key(self):
"""Basic API key validation"""
Expand Down