Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
209 changes: 209 additions & 0 deletions docs/my-website/docs/providers/matterai.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';

# MatterAI

https://docs.matterai.so

MatterAI offers SuperIntelligent large language models for general purpose, coding and research. Its OpenAI-compatible API makes integration straightforward, enabling developers to build efficient and scalable AI applications.

| Property | Details |
| ------------------------- | --------------------------------------------------------------------------- |
| Description | MatterAI offers powerful language models like `axon-base` and `axon-code`. |
| Provider Route on LiteLLM | `matterai/` (add this prefix to the model name - e.g. `matterai/axon-base`) |
| Provider Doc | [MatterAI ↗](https://docs.matterai.so) |
| API Endpoint for Provider | https://api.matterai.so/v1 |
| Supported Endpoints | `/chat/completions`, `/completions` |

## Supported OpenAI Parameters

MatterAI is fully OpenAI-compatible and supports the following parameters:

```
"stream",
"stop",
"temperature",
"top_p",
"max_tokens",
"presence_penalty",
"frequency_penalty",
"logit_bias",
"user",
"response_format",
"seed",
"tools",
"tool_choice",
"parallel_tool_calls",
"extra_headers"
```

## API Key Setup

To use MatterAI, set your API key as an environment variable:

```python
import os

os.environ["MATTERAI_API_KEY"] = "your-api-key"
```

## Usage

<Tabs>
<TabItem value="sdk" label="SDK">

```python
from litellm import completion
import os

os.environ['MATTERAI_API_KEY'] = "your-api-key"

response = completion(
model="matterai/axon-base",
messages=[
{"role": "user", "content": "Hello from LiteLLM!"}
],
)
print(response)
```

</TabItem>
<TabItem value="proxy" label="Proxy">

```yaml
model_list:
- model_name: matterai-axon-base
litellm_params:
model: matterai/axon-base
api_key: os.environ/MATTERAI_API_KEY
```

</TabItem>
</Tabs>

## Streaming

```python
from litellm import completion
import os

os.environ['MATTERAI_API_KEY'] = "your-api-key"

response = completion(
model="matterai/axon-code",
messages=[
{"role": "user", "content": "Write a short story about a robot learning to code."}
],
stream=True
)

for chunk in response:
print(chunk)
```

## Advanced Usage

### Custom Parameters

```python
from litellm import completion

response = completion(
model="matterai/axon-base",
messages=[{"role": "user", "content": "Explain quantum computing"}],
temperature=0.7,
max_tokens=500,
top_p=0.9,
stop=["Human:", "AI:"]
)
```

### Function Calling

MatterAI supports OpenAI-compatible function calling:

```python
from litellm import completion

functions = [
{
"name": "get_weather",
"description": "Get current weather information",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state"
}
},
"required": ["location"]
}
}
]

response = completion(
model="matterai/axon-base",
messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
tools=[{"type": "function", "function": f} for f in functions],
tool_choice="auto"
)
```

### Async Usage

```python
import asyncio
from litellm import acompletion

async def async_call():
response = await acompletion(
model="matterai/axon-base",
messages=[{"role": "user", "content": "Hello async world!"}]
)
return response

# Run async function
response = asyncio.run(async_call())
print(response)
```

## Available Models

MatterAI offers models like `axon-base` and `axon-code`.

Common model formats:

- `matterai/axon-base`
- `matterai/axon-code`

## Benefits

- **Powerful Models**: Access to advanced language models optimized for various tasks
- **OpenAI Compatibility**: Seamless integration with existing OpenAI-compatible tools and workflows
- **Scalable**: Built for efficient, high-throughput applications
- **Developer-Friendly**: Simple API with comprehensive documentation

## Error Handling

MatterAI returns standard OpenAI-compatible error responses:

```python
from litellm import completion
from litellm.exceptions import AuthenticationError, RateLimitError

try:
response = completion(
model="matterai/axon-base",
messages=[{"role": "user", "content": "Hello"}]
)
except AuthenticationError:
print("Invalid API key")
except RateLimitError:
print("Rate limit exceeded")
```

## Support

- Documentation: https://api.matterai.so
- Contact: [email protected]
1 change: 1 addition & 0 deletions docs/my-website/sidebars.js
Original file line number Diff line number Diff line change
Expand Up @@ -522,6 +522,7 @@ const sidebars = {
"providers/oci",
"providers/datarobot",
"providers/ovhcloud",
"providers/matterai",
],
},
{
Expand Down
5 changes: 5 additions & 0 deletions litellm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,6 +526,7 @@ def identify(event_details):
ovhcloud_models: Set = set()
ovhcloud_embedding_models: Set = set()
lemonade_models: Set = set()
matterai_models: Set = set()


def is_bedrock_pricing_only_model(key: str) -> bool:
Expand Down Expand Up @@ -750,6 +751,8 @@ def add_known_models():
ovhcloud_embedding_models.add(key)
elif value.get("litellm_provider") == "lemonade":
lemonade_models.add(key)
elif value.get("litellm_provider") == "matterai":
matterai_models.add(key)


add_known_models()
Expand Down Expand Up @@ -848,6 +851,7 @@ def add_known_models():
| wandb_models
| ovhcloud_models
| lemonade_models
| matterai_models
)

model_list_set = set(model_list)
Expand Down Expand Up @@ -933,6 +937,7 @@ def add_known_models():
"wandb": wandb_models,
"ovhcloud": ovhcloud_models | ovhcloud_embedding_models,
"lemonade": lemonade_models,
"matterai": matterai_models,
}

# mapping for those models which have larger equivalents
Expand Down
1 change: 1 addition & 0 deletions litellm/llms/matterai/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# MatterAI Provider for LiteLLM
1 change: 1 addition & 0 deletions litellm/llms/matterai/chat/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# MatterAI Chat Module for LiteLLM
84 changes: 84 additions & 0 deletions litellm/llms/matterai/chat/transformation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# MatterAI Chat Transformation for LiteLLM
import httpx
from typing import Any, List, Optional, Tuple, Union

from litellm.llms.openai import OpenAIGPTConfig
from litellm.types.utils import ModelResponse, BaseLLMException, OpenAIError
from litellm.utils import get_secret_str


class MatterAIChatConfig(OpenAIGPTConfig):
"""
Configuration class for MatterAI chat completions.
Since MatterAI is OpenAI-compatible, we extend OpenAIGPTConfig.
"""

def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
"""
Get API base and key for MatterAI provider.
"""
api_base = api_base or "https://api.matterai.so/v1"
dynamic_api_key = api_key or get_secret_str("MATTERAI_API_KEY") or ""
return api_base, dynamic_api_key

def transform_response(
self,
model: str,
raw_response: httpx.Response,
model_response: ModelResponse,
logging_obj: Any,
request_data: dict,
messages: List,
optional_params: dict,
litellm_params: dict,
encoding: Any,
api_key: Optional[str] = None,
json_mode: Optional[bool] = None,
) -> ModelResponse:
"""
Transform MatterAI response to LiteLLM format.
Since MatterAI is OpenAI-compatible, we can use the standard OpenAI transformation.
"""
# LOGGING
logging_obj.post_call(
input=messages,
api_key=api_key,
original_response=raw_response.text,
additional_args={"complete_input_dict": request_data},
)

# RESPONSE OBJECT
response_json = raw_response.json()

# Handle JSON mode if needed
if json_mode:
for choice in response_json["choices"]:
message = choice.get("message")
if message and message.get("tool_calls"):
# Convert tool calls to content for JSON mode
tool_calls = message.get("tool_calls", [])
if len(tool_calls) == 1:
message["content"] = tool_calls[0]["function"].get("arguments", "")
message["tool_calls"] = None

returned_response = ModelResponse(**response_json)

# Set model name with provider prefix
returned_response.model = f"matterai/{model}"

return returned_response

def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:
"""
Get the appropriate error class for MatterAI errors.
Since MatterAI is OpenAI-compatible, we use OpenAI error handling.
"""
return OpenAIError(
status_code=status_code,
message=error_message,
headers=headers,
)
Loading
Loading