Skip to content

Commit a2ce20b

Browse files
committed
fix: remove openai provider
1 parent a050cc5 commit a2ce20b

File tree

9 files changed

+28
-212
lines changed

9 files changed

+28
-212
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
<div align="center">
2-
<img height=200 src="https://github.com/SynaLinks/synalinks/blob/main/img/synalinks.png?raw=true">
2+
<img height=200 src="https://github.com/SynaLinks/synalinks/blob/main/img/synalinks.svg?raw=true">
33
</div>
44

55
<div align="center">
@@ -88,7 +88,7 @@ Building robust LM apps is hard. Synalinks simplifies it with:
8888
- **Constrained structured outputs** (JSON) for correctness
8989
- **Automatic async & parallel execution** by default
9090
- **Metrics, rewards & evaluations** built-in
91-
- **Native integrations**: OpenAI, Ollama, Anthropic, Mistral, Azure, Groq, Gemini, XAI
91+
- **Native integrations**: Ollama, Anthropic, Mistral, Azure, Groq, Gemini
9292
- **Embeddable fast knowledge base support**: based on DuckDB
9393
- **API-ready**: Deploy with FastAPI or FastMCP
9494
- **KerasTuner compatibility** for hyperparameter search

coverage-badge.svg

Lines changed: 1 addition & 1 deletion
Loading

docs/Deployment/Building a REST API.md

Lines changed: 0 additions & 184 deletions
This file was deleted.

img/synalinks.png

-51.1 KB
Binary file not shown.

img/synalinks.svg

Lines changed: 13 additions & 0 deletions
Loading

mkdocs.yml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,5 @@ nav:
194194
- Synalinks API/Utilities/More plotting utilities.md
195195
- Synalinks API/Utilities/NLP utilities.md
196196
- Synalinks API/Config.md
197-
- Deployment:
198-
- Deployment/Building a REST API.md
199197
- Observability:
200198
- Observability/MLflow.md

synalinks/src/language_models/language_model.py

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
litellm.drop_params = True
1818
litellm.disable_aiohttp_transport = True
19-
19+
litellm.drop_params = True
2020

2121
@synalinks_export(
2222
[
@@ -38,19 +38,6 @@ class LanguageModel(SynalinksSaveable):
3838
3939
For the complete list of models, please refer to the providers documentation.
4040
41-
**Using OpenAI models**
42-
43-
```python
44-
import synalinks
45-
import os
46-
47-
os.environ["OPENAI_API_KEY"] = "your-api-key"
48-
49-
language_model = synalinks.LanguageModel(
50-
model="openai/gpt-4o-mini",
51-
)
52-
```
53-
5441
**Using Groq models**
5542
5643
```python
@@ -150,13 +137,13 @@ class LanguageModel(SynalinksSaveable):
150137
import synalinks
151138
import os
152139
153-
os.environ["OPENAI_API_KEY"] = "your-api-key"
140+
os.environ["GEMINI_API_KEY"] = "your-api-key"
154141
os.environ["ANTHROPIC_API_KEY"] = "your-api-key"
155142
156143
language_model = synalinks.LanguageModel(
157144
model="anthropic/claude-3-sonnet-20240229",
158145
fallback=synalinks.LanguageModel(
159-
model="openai/gpt-4o-mini",
146+
model="gemini/gemini-3-flash-preview",
160147
)
161148
)
162149
```
@@ -228,6 +215,7 @@ async def __call__(self, messages, schema=None, streaming=False, **kwargs):
228215
json_instance = {}
229216
input_kwargs = copy.deepcopy(kwargs)
230217
schema = copy.deepcopy(schema)
218+
provider = self.model.split("/")[0]
231219

232220
# Handle reasoning_effort parameter - just forward to litellm if supported
233221
reasoning_effort = kwargs.pop("reasoning_effort", "none")
@@ -282,7 +270,7 @@ async def __call__(self, messages, schema=None, streaming=False, **kwargs):
282270
},
283271
}
284272
)
285-
elif self.model.startswith("openai") or self.model.startswith("azure"):
273+
elif self.model.startswith("azure"):
286274
# Use constrained structured output for openai
287275
# OpenAI require the field "additionalProperties"
288276
# Also OpenAI disallow the field "description" in $ref
@@ -356,8 +344,9 @@ async def __call__(self, messages, schema=None, streaming=False, **kwargs):
356344
if streaming:
357345
kwargs.update({"stream": True})
358346
# Enable prompt caching for the system instructions (that only change during training not inference)
359-
system_message_with_cache_control = {**formatted_messages[0], "cache_control": {"type": "ephemeral"}}
360-
formatted_messages[0] = system_message_with_cache_control
347+
if provider in ("gemini", "anthropic"):
348+
system_message_with_cache_control = {**formatted_messages[0], "cache_control": {"type": "ephemeral"}}
349+
formatted_messages[0] = system_message_with_cache_control
361350
for i in range(self.retry):
362351
try:
363352
response_str = ""

synalinks/src/modules/knowledge/retrieve_knowledge_test.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ async def test_retrieve_knowledge(self, mock_completion):
6363
]
6464
await knowledge_base.update(docs)
6565

66-
language_model = LanguageModel(model="openai/gpt-4o-mini")
66+
language_model = LanguageModel(model="ollama/mistral")
6767

6868
retrieve_module = RetrieveKnowledge(
6969
knowledge_base=knowledge_base,
@@ -85,7 +85,7 @@ async def test_retrieve_knowledge_none_input(self):
8585
data_models=[Document],
8686
)
8787

88-
language_model = LanguageModel(model="openai/gpt-4o-mini")
88+
language_model = LanguageModel(model="ollama/mistral")
8989

9090
retrieve_module = RetrieveKnowledge(
9191
knowledge_base=knowledge_base,
@@ -103,7 +103,7 @@ def test_retrieve_knowledge_default_instructions(self):
103103
data_models=[Document],
104104
)
105105

106-
language_model = LanguageModel(model="openai/gpt-4o-mini")
106+
language_model = LanguageModel(model="ollama/mistral")
107107

108108
retrieve_module = RetrieveKnowledge(
109109
knowledge_base=knowledge_base,

synalinks/src/modules/ttc/chain_of_thought_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ class Query(DataModel):
6060
class Answer(DataModel):
6161
answer: str = Field(description="The correct answer")
6262

63-
language_model = LanguageModel(model="openai/gpt-4o")
63+
language_model = LanguageModel(model="ollama/mistral")
6464

6565
x0 = Input(data_model=Query)
6666
x1 = await ChainOfThought(

0 commit comments

Comments
 (0)