@@ -75,7 +75,7 @@ def available_providers() -> list[ProviderSpec]:
7575 api = Api .inference ,
7676 adapter = AdapterSpec (
7777 adapter_type = "vllm" ,
78- pip_packages = ["openai" ],
78+ pip_packages = [],
7979 module = "llama_stack.providers.remote.inference.vllm" ,
8080 config_class = "llama_stack.providers.remote.inference.vllm.VLLMInferenceAdapterConfig" ,
8181 description = "Remote vLLM inference provider for connecting to vLLM servers." ,
@@ -151,9 +151,7 @@ def available_providers() -> list[ProviderSpec]:
151151 api = Api .inference ,
152152 adapter = AdapterSpec (
153153 adapter_type = "databricks" ,
154- pip_packages = [
155- "openai" ,
156- ],
154+ pip_packages = [],
157155 module = "llama_stack.providers.remote.inference.databricks" ,
158156 config_class = "llama_stack.providers.remote.inference.databricks.DatabricksImplConfig" ,
159157 description = "Databricks inference provider for running models on Databricks' unified analytics platform." ,
@@ -163,9 +161,7 @@ def available_providers() -> list[ProviderSpec]:
163161 api = Api .inference ,
164162 adapter = AdapterSpec (
165163 adapter_type = "nvidia" ,
166- pip_packages = [
167- "openai" ,
168- ],
164+ pip_packages = [],
169165 module = "llama_stack.providers.remote.inference.nvidia" ,
170166 config_class = "llama_stack.providers.remote.inference.nvidia.NVIDIAConfig" ,
171167 description = "NVIDIA inference provider for accessing NVIDIA NIM models and AI services." ,
@@ -175,7 +171,7 @@ def available_providers() -> list[ProviderSpec]:
175171 api = Api .inference ,
176172 adapter = AdapterSpec (
177173 adapter_type = "runpod" ,
178- pip_packages = ["openai" ],
174+ pip_packages = [],
179175 module = "llama_stack.providers.remote.inference.runpod" ,
180176 config_class = "llama_stack.providers.remote.inference.runpod.RunpodImplConfig" ,
181177 description = "RunPod inference provider for running models on RunPod's cloud GPU platform." ,
@@ -207,7 +203,7 @@ def available_providers() -> list[ProviderSpec]:
207203 api = Api .inference ,
208204 adapter = AdapterSpec (
209205 adapter_type = "gemini" ,
210- pip_packages = ["litellm" , "openai" ],
206+ pip_packages = ["litellm" ],
211207 module = "llama_stack.providers.remote.inference.gemini" ,
212208 config_class = "llama_stack.providers.remote.inference.gemini.GeminiConfig" ,
213209 provider_data_validator = "llama_stack.providers.remote.inference.gemini.config.GeminiProviderDataValidator" ,
@@ -218,7 +214,7 @@ def available_providers() -> list[ProviderSpec]:
218214 api = Api .inference ,
219215 adapter = AdapterSpec (
220216 adapter_type = "vertexai" ,
221- pip_packages = ["litellm" , "google-cloud-aiplatform" , "openai" ],
217+ pip_packages = ["litellm" , "google-cloud-aiplatform" ],
222218 module = "llama_stack.providers.remote.inference.vertexai" ,
223219 config_class = "llama_stack.providers.remote.inference.vertexai.VertexAIConfig" ,
224220 provider_data_validator = "llama_stack.providers.remote.inference.vertexai.config.VertexAIProviderDataValidator" ,
@@ -248,7 +244,7 @@ def available_providers() -> list[ProviderSpec]:
248244 api = Api .inference ,
249245 adapter = AdapterSpec (
250246 adapter_type = "groq" ,
251- pip_packages = ["litellm" , "openai" ],
247+ pip_packages = ["litellm" ],
252248 module = "llama_stack.providers.remote.inference.groq" ,
253249 config_class = "llama_stack.providers.remote.inference.groq.GroqConfig" ,
254250 provider_data_validator = "llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator" ,
@@ -270,7 +266,7 @@ def available_providers() -> list[ProviderSpec]:
270266 api = Api .inference ,
271267 adapter = AdapterSpec (
272268 adapter_type = "sambanova" ,
273- pip_packages = ["litellm" , "openai" ],
269+ pip_packages = ["litellm" ],
274270 module = "llama_stack.providers.remote.inference.sambanova" ,
275271 config_class = "llama_stack.providers.remote.inference.sambanova.SambaNovaImplConfig" ,
276272 provider_data_validator = "llama_stack.providers.remote.inference.sambanova.config.SambaNovaProviderDataValidator" ,
0 commit comments