Skip to content

Commit abd3ce4

Browse files
lesebiamemilio
authored andcommitted
chore: remove openai dependency from providers (llamastack#3398)
# What does this PR do? The openai package is already a dependency of the llama-stack project itself, so let's the project dictate which openai version we need and avoid potential breakage with unsatisfiable dependency resolution. Signed-off-by: Sébastien Han <[email protected]>
1 parent 4a7c8bd commit abd3ce4

File tree

5 files changed

+13
-23
lines changed

5 files changed

+13
-23
lines changed

llama_stack/providers/registry/batches.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ def available_providers() -> list[ProviderSpec]:
1313
InlineProviderSpec(
1414
api=Api.batches,
1515
provider_type="inline::reference",
16-
pip_packages=["openai"],
16+
pip_packages=[],
1717
module="llama_stack.providers.inline.batches.reference",
1818
config_class="llama_stack.providers.inline.batches.reference.config.ReferenceBatchesImplConfig",
1919
api_dependencies=[

llama_stack/providers/registry/inference.py

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def available_providers() -> list[ProviderSpec]:
7575
api=Api.inference,
7676
adapter=AdapterSpec(
7777
adapter_type="vllm",
78-
pip_packages=["openai"],
78+
pip_packages=[],
7979
module="llama_stack.providers.remote.inference.vllm",
8080
config_class="llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig",
8181
description="Remote vLLM inference provider for connecting to vLLM servers.",
@@ -151,9 +151,7 @@ def available_providers() -> list[ProviderSpec]:
151151
api=Api.inference,
152152
adapter=AdapterSpec(
153153
adapter_type="databricks",
154-
pip_packages=[
155-
"openai",
156-
],
154+
pip_packages=[],
157155
module="llama_stack.providers.remote.inference.databricks",
158156
config_class="llama_stack.providers.remote.inference.databricks.DatabricksImplConfig",
159157
description="Databricks inference provider for running models on Databricks' unified analytics platform.",
@@ -163,9 +161,7 @@ def available_providers() -> list[ProviderSpec]:
163161
api=Api.inference,
164162
adapter=AdapterSpec(
165163
adapter_type="nvidia",
166-
pip_packages=[
167-
"openai",
168-
],
164+
pip_packages=[],
169165
module="llama_stack.providers.remote.inference.nvidia",
170166
config_class="llama_stack.providers.remote.inference.nvidia.NVIDIAConfig",
171167
description="NVIDIA inference provider for accessing NVIDIA NIM models and AI services.",
@@ -175,7 +171,7 @@ def available_providers() -> list[ProviderSpec]:
175171
api=Api.inference,
176172
adapter=AdapterSpec(
177173
adapter_type="runpod",
178-
pip_packages=["openai"],
174+
pip_packages=[],
179175
module="llama_stack.providers.remote.inference.runpod",
180176
config_class="llama_stack.providers.remote.inference.runpod.RunpodImplConfig",
181177
description="RunPod inference provider for running models on RunPod's cloud GPU platform.",
@@ -207,7 +203,7 @@ def available_providers() -> list[ProviderSpec]:
207203
api=Api.inference,
208204
adapter=AdapterSpec(
209205
adapter_type="gemini",
210-
pip_packages=["litellm", "openai"],
206+
pip_packages=["litellm"],
211207
module="llama_stack.providers.remote.inference.gemini",
212208
config_class="llama_stack.providers.remote.inference.gemini.GeminiConfig",
213209
provider_data_validator="llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator",
@@ -218,7 +214,7 @@ def available_providers() -> list[ProviderSpec]:
218214
api=Api.inference,
219215
adapter=AdapterSpec(
220216
adapter_type="vertexai",
221-
pip_packages=["litellm", "google-cloud-aiplatform", "openai"],
217+
pip_packages=["litellm", "google-cloud-aiplatform"],
222218
module="llama_stack.providers.remote.inference.vertexai",
223219
config_class="llama_stack.providers.remote.inference.vertexai.VertexAIConfig",
224220
provider_data_validator="llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator",
@@ -248,7 +244,7 @@ def available_providers() -> list[ProviderSpec]:
248244
api=Api.inference,
249245
adapter=AdapterSpec(
250246
adapter_type="groq",
251-
pip_packages=["litellm", "openai"],
247+
pip_packages=["litellm"],
252248
module="llama_stack.providers.remote.inference.groq",
253249
config_class="llama_stack.providers.remote.inference.groq.GroqConfig",
254250
provider_data_validator="llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator",
@@ -270,7 +266,7 @@ def available_providers() -> list[ProviderSpec]:
270266
api=Api.inference,
271267
adapter=AdapterSpec(
272268
adapter_type="sambanova",
273-
pip_packages=["litellm", "openai"],
269+
pip_packages=["litellm"],
274270
module="llama_stack.providers.remote.inference.sambanova",
275271
config_class="llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig",
276272
provider_data_validator="llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator",

llama_stack/providers/registry/scoring.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def available_providers() -> list[ProviderSpec]:
3838
InlineProviderSpec(
3939
api=Api.scoring,
4040
provider_type="inline::braintrust",
41-
pip_packages=["autoevals", "openai"],
41+
pip_packages=["autoevals"],
4242
module="llama_stack.providers.inline.scoring.braintrust",
4343
config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig",
4444
api_dependencies=[

pyproject.toml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ dependencies = [
3232
"jinja2>=3.1.6",
3333
"jsonschema",
3434
"llama-stack-client>=0.2.21",
35-
"openai>=1.99.6",
35+
"openai>=1.100.0", # for expires_after support
3636
"prompt-toolkit",
3737
"python-dotenv",
3838
"python-jose[cryptography]",
@@ -80,7 +80,6 @@ dev = [
8080
unit = [
8181
"sqlite-vec",
8282
"ollama",
83-
"openai",
8483
"aiosqlite",
8584
"aiohttp",
8685
"psycopg2-binary>=2.9.0",
@@ -105,7 +104,6 @@ unit = [
105104
# separately. If you are using "uv" to execute your tests, you can use the "--group" flag to specify extra
106105
# dependencies.
107106
test = [
108-
"openai>=1.100.0", # for expires_after support
109107
"aiosqlite",
110108
"aiohttp",
111109
"torch>=2.6.0",

uv.lock

Lines changed: 2 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)