diff --git a/docs/my-website/docs/providers/matterai.md b/docs/my-website/docs/providers/matterai.md
new file mode 100644
index 000000000000..fc4af6254875
--- /dev/null
+++ b/docs/my-website/docs/providers/matterai.md
@@ -0,0 +1,209 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# MatterAI
+
+https://docs.matterai.so
+
+MatterAI offers SuperIntelligent large language models for general purpose, coding and research. Its OpenAI-compatible API makes integration straightforward, enabling developers to build efficient and scalable AI applications.
+
+| Property | Details |
+| ------------------------- | --------------------------------------------------------------------------- |
+| Description | MatterAI offers powerful language models like `axon-base` and `axon-code`. |
+| Provider Route on LiteLLM | `matterai/` (add this prefix to the model name - e.g. `matterai/axon-base`) |
+| Provider Doc | [MatterAI ↗](https://docs.matterai.so) |
+| API Endpoint for Provider | https://api.matterai.so/v1 |
+| Supported Endpoints | `/chat/completions`, `/completions` |
+
+## Supported OpenAI Parameters
+
+MatterAI is fully OpenAI-compatible and supports the following parameters:
+
+```
+"stream",
+"stop",
+"temperature",
+"top_p",
+"max_tokens",
+"presence_penalty",
+"frequency_penalty",
+"logit_bias",
+"user",
+"response_format",
+"seed",
+"tools",
+"tool_choice",
+"parallel_tool_calls",
+"extra_headers"
+```
+
+## API Key Setup
+
+To use MatterAI, set your API key as an environment variable:
+
+```python
+import os
+
+os.environ["MATTERAI_API_KEY"] = "your-api-key"
+```
+
+## Usage
+
+
+
+
+```python
+from litellm import completion
+import os
+
+os.environ['MATTERAI_API_KEY'] = "your-api-key"
+
+response = completion(
+ model="matterai/axon-base",
+ messages=[
+ {"role": "user", "content": "Hello from LiteLLM!"}
+ ],
+)
+print(response)
+```
+
+
+
+
+```yaml
+model_list:
+ - model_name: matterai-axon-base
+ litellm_params:
+ model: matterai/axon-base
+ api_key: os.environ/MATTERAI_API_KEY
+```
+
+
+
+
+## Streaming
+
+```python
+from litellm import completion
+import os
+
+os.environ['MATTERAI_API_KEY'] = "your-api-key"
+
+response = completion(
+ model="matterai/axon-code",
+ messages=[
+ {"role": "user", "content": "Write a short story about a robot learning to code."}
+ ],
+ stream=True
+)
+
+for chunk in response:
+ print(chunk)
+```
+
+## Advanced Usage
+
+### Custom Parameters
+
+```python
+from litellm import completion
+
+response = completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Explain quantum computing"}],
+ temperature=0.7,
+ max_tokens=500,
+ top_p=0.9,
+ stop=["Human:", "AI:"]
+)
+```
+
+### Function Calling
+
+MatterAI supports OpenAI-compatible function calling:
+
+```python
+from litellm import completion
+
+functions = [
+ {
+ "name": "get_weather",
+ "description": "Get current weather information",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state"
+ }
+ },
+ "required": ["location"]
+ }
+ }
+]
+
+response = completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
+ tools=[{"type": "function", "function": f} for f in functions],
+ tool_choice="auto"
+)
+```
+
+### Async Usage
+
+```python
+import asyncio
+from litellm import acompletion
+
+async def async_call():
+ response = await acompletion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Hello async world!"}]
+ )
+ return response
+
+# Run async function
+response = asyncio.run(async_call())
+print(response)
+```
+
+## Available Models
+
+MatterAI offers models like `axon-base` and `axon-code`.
+
+Common model formats:
+
+- `matterai/axon-base`
+- `matterai/axon-code`
+
+## Benefits
+
+- **Powerful Models**: Access to advanced language models optimized for various tasks
+- **OpenAI Compatibility**: Seamless integration with existing OpenAI-compatible tools and workflows
+- **Scalable**: Built for efficient, high-throughput applications
+- **Developer-Friendly**: Simple API with comprehensive documentation
+
+## Error Handling
+
+MatterAI returns standard OpenAI-compatible error responses:
+
+```python
+from litellm import completion
+from litellm.exceptions import AuthenticationError, RateLimitError
+
+try:
+ response = completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Hello"}]
+ )
+except AuthenticationError:
+ print("Invalid API key")
+except RateLimitError:
+ print("Rate limit exceeded")
+```
+
+## Support
+
+- Documentation: https://api.matterai.so
+- Contact: support@matterai.so
diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js
index 7acd92240c66..d64ec977c20d 100644
--- a/docs/my-website/sidebars.js
+++ b/docs/my-website/sidebars.js
@@ -522,6 +522,7 @@ const sidebars = {
"providers/oci",
"providers/datarobot",
"providers/ovhcloud",
+ "providers/matterai",
],
},
{
diff --git a/litellm/__init__.py b/litellm/__init__.py
index 273100f1c25d..cabd42eaf8fd 100644
--- a/litellm/__init__.py
+++ b/litellm/__init__.py
@@ -526,6 +526,7 @@ def identify(event_details):
ovhcloud_models: Set = set()
ovhcloud_embedding_models: Set = set()
lemonade_models: Set = set()
+matterai_models: Set = set()
def is_bedrock_pricing_only_model(key: str) -> bool:
@@ -750,6 +751,8 @@ def add_known_models():
ovhcloud_embedding_models.add(key)
elif value.get("litellm_provider") == "lemonade":
lemonade_models.add(key)
+ elif value.get("litellm_provider") == "matterai":
+ matterai_models.add(key)
add_known_models()
@@ -848,6 +851,7 @@ def add_known_models():
| wandb_models
| ovhcloud_models
| lemonade_models
+ | matterai_models
)
model_list_set = set(model_list)
@@ -933,6 +937,7 @@ def add_known_models():
"wandb": wandb_models,
"ovhcloud": ovhcloud_models | ovhcloud_embedding_models,
"lemonade": lemonade_models,
+ "matterai": matterai_models,
}
# mapping for those models which have larger equivalents
diff --git a/litellm/llms/matterai/__init__.py b/litellm/llms/matterai/__init__.py
new file mode 100644
index 000000000000..ddf54b7b5ddf
--- /dev/null
+++ b/litellm/llms/matterai/__init__.py
@@ -0,0 +1 @@
+# MatterAI Provider for LiteLLM
diff --git a/litellm/llms/matterai/chat/__init__.py b/litellm/llms/matterai/chat/__init__.py
new file mode 100644
index 000000000000..95d8dceac15e
--- /dev/null
+++ b/litellm/llms/matterai/chat/__init__.py
@@ -0,0 +1 @@
+# MatterAI Chat Module for LiteLLM
diff --git a/litellm/llms/matterai/chat/transformation.py b/litellm/llms/matterai/chat/transformation.py
new file mode 100644
index 000000000000..d76959ae923d
--- /dev/null
+++ b/litellm/llms/matterai/chat/transformation.py
@@ -0,0 +1,84 @@
+# MatterAI Chat Transformation for LiteLLM
+import httpx
+from typing import Any, List, Optional, Tuple, Union
+
+from litellm.llms.openai import OpenAIGPTConfig
+from litellm.types.utils import ModelResponse, BaseLLMException, OpenAIError
+from litellm.utils import get_secret_str
+
+
+class MatterAIChatConfig(OpenAIGPTConfig):
+ """
+ Configuration class for MatterAI chat completions.
+ Since MatterAI is OpenAI-compatible, we extend OpenAIGPTConfig.
+ """
+
+ def _get_openai_compatible_provider_info(
+ self, api_base: Optional[str], api_key: Optional[str]
+ ) -> Tuple[Optional[str], Optional[str]]:
+ """
+ Get API base and key for MatterAI provider.
+ """
+ api_base = api_base or "https://api.matterai.so/v1"
+ dynamic_api_key = api_key or get_secret_str("MATTERAI_API_KEY") or ""
+ return api_base, dynamic_api_key
+
+ def transform_response(
+ self,
+ model: str,
+ raw_response: httpx.Response,
+ model_response: ModelResponse,
+ logging_obj: Any,
+ request_data: dict,
+ messages: List,
+ optional_params: dict,
+ litellm_params: dict,
+ encoding: Any,
+ api_key: Optional[str] = None,
+ json_mode: Optional[bool] = None,
+ ) -> ModelResponse:
+ """
+ Transform MatterAI response to LiteLLM format.
+ Since MatterAI is OpenAI-compatible, we can use the standard OpenAI transformation.
+ """
+ # LOGGING
+ logging_obj.post_call(
+ input=messages,
+ api_key=api_key,
+ original_response=raw_response.text,
+ additional_args={"complete_input_dict": request_data},
+ )
+
+ # RESPONSE OBJECT
+ response_json = raw_response.json()
+
+ # Handle JSON mode if needed
+ if json_mode:
+ for choice in response_json["choices"]:
+ message = choice.get("message")
+ if message and message.get("tool_calls"):
+ # Convert tool calls to content for JSON mode
+ tool_calls = message.get("tool_calls", [])
+ if len(tool_calls) == 1:
+ message["content"] = tool_calls[0]["function"].get("arguments", "")
+ message["tool_calls"] = None
+
+ returned_response = ModelResponse(**response_json)
+
+ # Set model name with provider prefix
+ returned_response.model = f"matterai/{model}"
+
+ return returned_response
+
+ def get_error_class(
+ self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
+ ) -> BaseLLMException:
+ """
+ Get the appropriate error class for MatterAI errors.
+ Since MatterAI is OpenAI-compatible, we use OpenAI error handling.
+ """
+ return OpenAIError(
+ status_code=status_code,
+ message=error_message,
+ headers=headers,
+ )
diff --git a/litellm/main.py b/litellm/main.py
index cfb0bef07976..3766b34acf9d 100644
--- a/litellm/main.py
+++ b/litellm/main.py
@@ -151,6 +151,7 @@
from .llms.bedrock.image.image_handler import BedrockImageGeneration
from .llms.bytez.chat.transformation import BytezChatConfig
from .llms.lemonade.chat.transformation import LemonadeChatConfig
+from .llms.matterai.chat.transformation import MatterAIChatConfig
from .llms.codestral.completion.handler import CodestralTextCompletion
from .llms.cohere.embed import handler as cohere_embed
from .llms.custom_httpx.aiohttp_handler import BaseLLMAIOHTTPHandler
@@ -270,6 +271,7 @@
oci_transformation = OCIChatConfig()
ovhcloud_transformation = OVHCloudChatConfig()
lemonade_transformation = LemonadeChatConfig()
+matterai_transformation = MatterAIChatConfig()
####### COMPLETION ENDPOINTS ################
@@ -631,18 +633,14 @@ def _handle_mock_potential_exceptions(
raise litellm.MockException(
status_code=getattr(mock_response, "status_code", 500), # type: ignore
message=getattr(mock_response, "text", str(mock_response)),
- llm_provider=getattr(
- mock_response, "llm_provider", custom_llm_provider or "openai"
- ), # type: ignore
+ llm_provider=getattr(mock_response, "llm_provider", custom_llm_provider or "openai"), # type: ignore
model=model, # type: ignore
request=httpx.Request(method="POST", url="https://api.openai.com/v1/"),
)
elif isinstance(mock_response, str) and mock_response == "litellm.RateLimitError":
raise litellm.RateLimitError(
message="this is a mock rate limit error",
- llm_provider=getattr(
- mock_response, "llm_provider", custom_llm_provider or "openai"
- ), # type: ignore
+ llm_provider=getattr(mock_response, "llm_provider", custom_llm_provider or "openai"), # type: ignore
model=model,
)
elif (
@@ -1101,7 +1099,6 @@ def completion( # type: ignore # noqa: PLR0915
prompt_id=prompt_id, non_default_params=non_default_params
)
):
-
(
model,
messages,
@@ -2073,7 +2070,6 @@ def completion( # type: ignore # noqa: PLR0915
try:
if use_base_llm_http_handler:
-
response = base_llm_http_handler.completion(
model=model,
messages=messages,
@@ -3500,7 +3496,6 @@ def completion( # type: ignore # noqa: PLR0915
)
raise e
elif custom_llm_provider == "gradient_ai":
-
api_base = litellm.api_base or api_base
response = base_llm_http_handler.completion(
model=model,
@@ -3577,7 +3572,6 @@ def completion( # type: ignore # noqa: PLR0915
pass
-
elif custom_llm_provider == "ovhcloud" or model in litellm.ovhcloud_models:
api_key = (
api_key
@@ -3612,6 +3606,40 @@ def completion( # type: ignore # noqa: PLR0915
provider_config=ovhcloud_transformation,
)
+ pass
+ elif custom_llm_provider == "matterai" or model in litellm.matterai_models:
+ api_key = (
+ api_key
+ or litellm.matterai_key
+ or get_secret_str("MATTERAI_API_KEY")
+ or litellm.api_key
+ )
+ api_base = (
+ api_base
+ or litellm.api_base
+ or get_secret_str("MATTERAI_API_BASE")
+ or "https://api.matterai.so/v1"
+ )
+
+ response = base_llm_http_handler.completion(
+ model=model,
+ messages=messages,
+ headers=headers,
+ model_response=model_response,
+ api_key=api_key,
+ api_base=api_base,
+ acompletion=acompletion,
+ logging_obj=logging,
+ optional_params=optional_params,
+ litellm_params=litellm_params,
+ timeout=timeout, # type: ignore
+ client=client,
+ custom_llm_provider=custom_llm_provider,
+ encoding=encoding,
+ stream=stream,
+ provider_config=matterai_transformation,
+ )
+
pass
elif custom_llm_provider == "custom":
@@ -4163,12 +4191,7 @@ def embedding( # noqa: PLR0915
api_base = api_base or litellm.api_base or get_secret("DATABRICKS_API_BASE") # type: ignore
# set API KEY
- api_key = (
- api_key
- or litellm.api_key
- or litellm.databricks_key
- or get_secret("DATABRICKS_API_KEY")
- ) # type: ignore
+ api_key = api_key or litellm.api_key or litellm.databricks_key or get_secret("DATABRICKS_API_KEY") # type: ignore
## EMBEDDING CALL
response = databricks_embedding.embedding(
@@ -4248,12 +4271,7 @@ def embedding( # noqa: PLR0915
headers=headers,
)
elif custom_llm_provider == "huggingface":
- api_key = (
- api_key
- or litellm.huggingface_key
- or get_secret("HUGGINGFACE_API_KEY")
- or litellm.api_key
- ) # type: ignore
+ api_key = api_key or litellm.huggingface_key or get_secret("HUGGINGFACE_API_KEY") or litellm.api_key # type: ignore
response = huggingface_embed.embedding(
model=model,
input=input,
@@ -4414,12 +4432,7 @@ def embedding( # noqa: PLR0915
api_key=api_key,
)
elif custom_llm_provider == "ollama":
- api_base = (
- litellm.api_base
- or api_base
- or get_secret_str("OLLAMA_API_BASE")
- or "http://localhost:11434"
- ) # type: ignore
+ api_base = litellm.api_base or api_base or get_secret_str("OLLAMA_API_BASE") or "http://localhost:11434" # type: ignore
if isinstance(input, str):
input = [input]
@@ -5171,6 +5184,7 @@ async def aadapter_completion(
except Exception as e:
raise e
+
async def aadapter_generate_content(
**kwargs,
) -> Union[Dict[str, Any], AsyncIterator[bytes]]:
diff --git a/litellm/types/utils.py b/litellm/types/utils.py
index 4e93e1675303..ed43252e4ab3 100644
--- a/litellm/types/utils.py
+++ b/litellm/types/utils.py
@@ -2462,6 +2462,7 @@ class LlmProviders(str, Enum):
WANDB = "wandb"
OVHCLOUD = "ovhcloud"
LEMONADE = "lemonade"
+ MATTERAI = "matterai"
# Create a set of all provider values for quick lookup
diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json
index 5b862b98a820..d56bbb9b11c8 100644
--- a/model_prices_and_context_window.json
+++ b/model_prices_and_context_window.json
@@ -22583,5 +22583,29 @@
"supports_tool_choice": true,
"supports_vision": true,
"supports_web_search": true
+ },
+ "matterai/axon-base": {
+ "input_cost_per_token": 5e-07,
+ "litellm_provider": "matterai",
+ "max_tokens": 256000,
+ "max_input_tokens": 256000,
+ "max_output_tokens": 16384,
+ "mode": "chat",
+ "output_cost_per_token": 1e-06,
+ "supports_function_calling": true,
+ "supports_response_schema": true,
+ "supports_tool_choice": true
+ },
+ "matterai/axon-code": {
+ "input_cost_per_token": 1e-06,
+ "litellm_provider": "matterai",
+ "max_tokens": 256000,
+ "max_input_tokens": 256000,
+ "max_output_tokens": 32768,
+ "mode": "chat",
+ "output_cost_per_token": 3e-06,
+ "supports_function_calling": true,
+ "supports_response_schema": true,
+ "supports_tool_choice": true
}
}
diff --git a/tests/test_litellm/llms/matterai/test_matterai.py b/tests/test_litellm/llms/matterai/test_matterai.py
new file mode 100644
index 000000000000..9d34cebce78b
--- /dev/null
+++ b/tests/test_litellm/llms/matterai/test_matterai.py
@@ -0,0 +1,344 @@
+import json
+import os
+import sys
+from unittest.mock import AsyncMock, patch
+from typing import Optional
+
+import httpx
+import pytest
+import respx
+from respx import MockRouter
+
+import litellm
+from litellm import Choices, Message, ModelResponse
+
+
+@pytest.mark.respx()
+def test_matterai_completion_basic(respx_mock):
+ """Test basic MatterAI completion functionality"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_response = {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Hello! How can I help you today?"
+ },
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 9,
+ "completion_tokens": 12,
+ "total_tokens": 21
+ }
+ }
+
+ respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ json=mock_response, status_code=200
+ )
+
+ response = litellm.completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Hello"}],
+ api_key="test-key"
+ )
+
+ assert response.choices[0].message.content == "Hello! How can I help you today?"
+ assert response.model == "matterai/axon-base"
+ assert response.usage.total_tokens == 21
+
+
+@pytest.mark.respx()
+def test_matterai_completion_streaming(respx_mock):
+ """Test MatterAI streaming completion"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_chunks = [
+ "data: " + json.dumps({
+ "id": "chatcmpl-123",
+ "object": "chat.completion.chunk",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [
+ {
+ "index": 0,
+ "delta": {"content": "Hello"},
+ "finish_reason": None
+ }
+ ]
+ }) + "\n\n",
+ "data: " + json.dumps({
+ "id": "chatcmpl-123",
+ "object": "chat.completion.chunk",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [
+ {
+ "index": 0,
+ "delta": {"content": "!"},
+ "finish_reason": "stop"
+ }
+ ]
+ }) + "\n\n",
+ "data: [DONE]\n\n"
+ ]
+
+ respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ status_code=200,
+ headers={"content-type": "text/plain"},
+ content="".join(mock_chunks)
+ )
+
+ response = litellm.completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Hello"}],
+ api_key="test-key",
+ stream=True
+ )
+
+ chunks = list(response)
+ assert len(chunks) >= 2
+ assert chunks[0].choices[0].delta.content == "Hello"
+
+
+@pytest.mark.respx()
+def test_matterai_models_endpoint(respx_mock):
+ """Test MatterAI models listing"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_response = {
+ "object": "list",
+ "data": [
+ {
+ "id": "axon-base",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "matterai"
+ },
+ {
+ "id": "axon-code",
+ "object": "model",
+ "created": 1677610602,
+ "owned_by": "matterai"
+ }
+ ]
+ }
+
+ respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ json={
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [{
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Test response"
+ },
+ "finish_reason": "stop"
+ }],
+ "usage": {
+ "prompt_tokens": 5,
+ "completion_tokens": 10,
+ "total_tokens": 15
+ }
+ },
+ status_code=200
+ )
+
+ # This would be tested if litellm had a models() function
+ # For now, we'll test that the provider is properly configured
+ response = litellm.completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "test"}],
+ api_key="test-key"
+ )
+
+
+@pytest.mark.respx()
+def test_matterai_authentication_error(respx_mock):
+ """Test MatterAI authentication error handling"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_error = {
+ "error": {
+ "message": "Invalid API key provided",
+ "type": "invalid_request_error",
+ "param": None,
+ "code": "invalid_api_key"
+ }
+ }
+
+ respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ json=mock_error, status_code=401
+ )
+
+ with pytest.raises(litellm.APIConnectionError) as exc_info:
+ litellm.completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "test"}],
+ api_key="invalid-key"
+ )
+
+ # Verify the error contains the expected authentication error message
+ assert "Invalid API key provided" in str(exc_info.value)
+
+
+@pytest.mark.respx()
+def test_matterai_provider_detection(respx_mock):
+ """Test that MatterAI provider is properly detected from model name"""
+ from litellm.utils import get_llm_provider
+
+ model, provider, dynamic_api_key, api_base = get_llm_provider(
+ model="matterai/axon-base"
+ )
+
+ assert provider == "matterai"
+ assert model == "axon-base"
+
+
+@pytest.mark.respx()
+def test_matterai_with_optional_params(respx_mock):
+ """Test MatterAI with optional parameters like temperature, max_tokens"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_response = {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "This is a test response with custom parameters."
+ },
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 15,
+ "completion_tokens": 20,
+ "total_tokens": 35
+ }
+ }
+
+ request_mock = respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ json=mock_response, status_code=200
+ )
+
+ response = litellm.completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Hello with params"}],
+ api_key="test-key",
+ temperature=0.7,
+ max_tokens=100,
+ top_p=0.9
+ )
+
+ assert response.choices[0].message.content == "This is a test response with custom parameters."
+
+ # Verify the request was made with correct parameters
+ assert request_mock.called
+ request_data = request_mock.calls[0].request.content
+ parsed_data = json.loads(request_data)
+ assert parsed_data["temperature"] == 0.7
+ assert parsed_data["max_tokens"] == 100
+ assert parsed_data["top_p"] == 0.9
+
+
+@pytest.mark.respx()
+def test_matterai_headers_authentication(respx_mock):
+ """Test that MatterAI request includes proper authorization headers"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_response = {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Test response"
+ },
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 5,
+ "completion_tokens": 10,
+ "total_tokens": 15
+ }
+ }
+
+ request_mock = respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ json=mock_response, status_code=200
+ )
+
+ response = litellm.completion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Test auth"}],
+ api_key="test-api-key-123"
+ )
+
+ assert response.choices[0].message.content == "Test response"
+
+ # Verify authorization header was set correctly
+ assert request_mock.called
+ request_headers = request_mock.calls[0].request.headers
+ assert "authorization" in request_headers
+ assert request_headers["authorization"] == "Bearer test-api-key-123"
+
+
+@pytest.mark.asyncio
+@pytest.mark.respx()
+async def test_matterai_async_completion(respx_mock):
+ """Test MatterAI async completion"""
+ litellm.disable_aiohttp_transport = True
+
+ mock_response = {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "axon-base",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Async response from MatterAI"
+ },
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 8,
+ "completion_tokens": 15,
+ "total_tokens": 23
+ }
+ }
+
+ respx_mock.post("https://api.matterai.so/v1/chat/completions").respond(
+ json=mock_response, status_code=200
+ )
+
+ response = await litellm.acompletion(
+ model="matterai/axon-base",
+ messages=[{"role": "user", "content": "Async test"}],
+ api_key="test-key"
+ )
+
+ assert response.choices[0].message.content == "Async response from MatterAI"
+ assert response.usage.total_tokens == 23
diff --git a/ui/litellm-dashboard/public/assets/logos/matterai.svg b/ui/litellm-dashboard/public/assets/logos/matterai.svg
new file mode 100644
index 000000000000..488f7c96a177
--- /dev/null
+++ b/ui/litellm-dashboard/public/assets/logos/matterai.svg
@@ -0,0 +1,11 @@
+
diff --git a/ui/litellm-dashboard/src/components/add_model/provider_specific_fields.tsx b/ui/litellm-dashboard/src/components/add_model/provider_specific_fields.tsx
index 6741e43af500..60eb4bf98144 100644
--- a/ui/litellm-dashboard/src/components/add_model/provider_specific_fields.tsx
+++ b/ui/litellm-dashboard/src/components/add_model/provider_specific_fields.tsx
@@ -520,6 +520,12 @@ const PROVIDER_CREDENTIAL_FIELDS: Record =
placeholder: "https://1234567890.snowflakecomputing.com/api/v2/cortex/inference:complete",
tooltip: "Enter the full endpoint with path here. Example: https://1234567890.snowflakecomputing.com/api/v2/cortex/inference:complete",
required: true
+ }],
+ [Providers.MatterAI]: [{
+ key: "api_key",
+ label: "API Key",
+ type: "password",
+ required: true
}]
};
diff --git a/ui/litellm-dashboard/src/components/provider_info_helpers.tsx b/ui/litellm-dashboard/src/components/provider_info_helpers.tsx
index 7f854d9df157..f7e153f0a77a 100644
--- a/ui/litellm-dashboard/src/components/provider_info_helpers.tsx
+++ b/ui/litellm-dashboard/src/components/provider_info_helpers.tsx
@@ -40,7 +40,8 @@ export enum Providers {
Vertex_AI = "Vertex AI (Anthropic, Gemini, etc.)",
VolcEngine = "VolcEngine",
Voyage = "Voyage AI",
- xAI = "xAI",
+ xAI = "xAI",
+ MatterAI = "MatterAI",
}
export const provider_map: Record = {
@@ -82,6 +83,7 @@ export const provider_map: Record = {
VolcEngine: "volcengine",
DeepInfra: "deepinfra",
Hosted_Vllm: "hosted_vllm",
+ MatterAI: "matterai",
};
const asset_logos_folder = '/ui/assets/logos/';
@@ -124,7 +126,8 @@ export const providerLogoMap: Record = {
[Providers.Voyage]: `${asset_logos_folder}voyage.webp`,
[Providers.JinaAI]: `${asset_logos_folder}jina.png`,
[Providers.VolcEngine]: `${asset_logos_folder}volcengine.png`,
- [Providers.DeepInfra]: `${asset_logos_folder}deepinfra.png`
+ [Providers.DeepInfra]: `${asset_logos_folder}deepinfra.png`,
+ [Providers.MatterAI]: `${asset_logos_folder}matterai.svg`
};
export const getProviderLogoAndName = (providerValue: string): { logo: string, displayName: string } => {