We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a600a2c commit 78bf0fbCopy full SHA for 78bf0fb
ci/L0_backend_vllm/vllm_backend/vllm_backend_test.py
@@ -167,12 +167,11 @@ def test_exclude_input_in_output_true(self):
167
def test_ensemble_model(self):
168
# Test to ensure that ensemble models are supported in vllm container.
169
# If ensemble support not present, triton will error out at model loading stage.
170
-
+
171
# Before loading ensemble model, the dependency model is loaded.
172
self.triton_client.load_model(self.vllm_model_name)
173
self.assertTrue(self.triton_client.is_model_ready(self.vllm_model_name))
174
175
176
self.triton_client.load_model(self.enseble_model_name)
177
self.assertTrue(self.triton_client.is_model_ready(self.enseble_model_name))
178
0 commit comments