-
Notifications
You must be signed in to change notification settings - Fork 32
fix: Adding ensemble support for vllm container #68
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
17b3710
13bd540
cc0dfeb
54ace7a
a600a2c
78bf0fb
ecda2e2
8697eba
23a0f7a
0ac4b04
9c4aedd
4654f35
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -50,6 +50,7 @@ coverage.xml | |
| .hypothesis/ | ||
| .pytest_cache/ | ||
| cover/ | ||
| *.out | ||
|
|
||
| # Translations | ||
| *.mo | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,59 @@ | ||
| # Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. | ||
KrishnanPrash marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| # | ||
| # Redistribution and use in source and binary forms, with or without | ||
| # modification, are permitted provided that the following conditions | ||
| # are met: | ||
| # * Redistributions of source code must retain the above copyright | ||
| # notice, this list of conditions and the following disclaimer. | ||
| # * Redistributions in binary form must reproduce the above copyright | ||
| # notice, this list of conditions and the following disclaimer in the | ||
| # documentation and/or other materials provided with the distribution. | ||
| # * Neither the name of NVIDIA CORPORATION nor the names of its | ||
| # contributors may be used to endorse or promote products derived | ||
| # from this software without specific prior written permission. | ||
| # | ||
| # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY | ||
| # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | ||
| # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | ||
| # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | ||
| # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | ||
| # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | ||
| # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | ||
| # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
|
|
||
| name: "ensemble_model" | ||
| platform: "ensemble" | ||
| max_batch_size: 1 | ||
| input [ | ||
| { | ||
| name: "text_input" | ||
| data_type: TYPE_STRING | ||
| dims: [ -1 ] | ||
| } | ||
| ] | ||
| output [ | ||
| { | ||
| name: "text_output" | ||
| data_type: TYPE_STRING | ||
| dims: [ -1 ] | ||
| } | ||
| ] | ||
| ensemble_scheduling { | ||
| step [ | ||
| { | ||
| model_name: "vllm_opt" | ||
| model_version: -1 | ||
| input_map { | ||
| key: "text_input" | ||
| value: "text_input" | ||
| } | ||
| output_map { | ||
| key: "text_output" | ||
| value: "text_output" | ||
| } | ||
| } | ||
| ] | ||
| } | ||
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -48,6 +48,7 @@ def setUp(self): | |||||||||||||||||||||
| self.triton_client = grpcclient.InferenceServerClient(url="localhost:8001") | ||||||||||||||||||||||
| self.vllm_model_name = "vllm_opt" | ||||||||||||||||||||||
| self.python_model_name = "add_sub" | ||||||||||||||||||||||
| self.enseble_model_name = "ensemble_model" | ||||||||||||||||||||||
KrishnanPrash marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||||||||||||||||||||||
| self.vllm_load_test = "vllm_load_test" | ||||||||||||||||||||||
|
|
||||||||||||||||||||||
| def test_vllm_triton_backend(self): | ||||||||||||||||||||||
|
|
@@ -163,6 +164,17 @@ def test_exclude_input_in_output_true(self): | |||||||||||||||||||||
| expected_output=expected_output, | ||||||||||||||||||||||
| ) | ||||||||||||||||||||||
|
|
||||||||||||||||||||||
| def test_ensemble_model(self): | ||||||||||||||||||||||
|
||||||||||||||||||||||
| # Load both vllm and add_sub models | |
| self.triton_client.load_model(self.vllm_load_test) | |
| self.assertTrue(self.triton_client.is_model_ready(self.vllm_load_test)) | |
| self.triton_client.load_model(self.python_model_name) | |
| self.assertTrue(self.triton_client.is_model_ready(self.python_model_name)) |
ex:
# Load both vllm and add_sub models
self.triton_client.load_model(self.vllm_load_test)
self.assertTrue(self.triton_client.is_model_ready(self.vllm_load_test))
self.triton_client.load_model(self.python_model_name)
self.assertTrue(self.triton_client.is_model_ready(self.python_model_name))
# Test to ensure that ensemble models are supported in vllm container.
# If ensemble support is not enabled, triton will fail to load the ensemble.
self.triton_client.load_model(self.ensemble_model_name)
self.assertTrue(self.triton_client.is_model_ready(self.ensemble_model_name)) Also use vllm_load_test model inside the ensemble instead of vllm_opt for the same reason.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The tests documents some expected behavior around vllm_opt staying alive for the duration of the test:
vllm_backend/ci/L0_backend_vllm/vllm_backend/test.sh
Lines 53 to 57 in 78bf0fb
| # `vllm_opt` model will be loaded on server start and stay loaded throughout | |
| # unittesting. To test vllm model load/unload we use a dedicated | |
| # `vllm_load_test`. To ensure that vllm's memory profiler will not error out | |
| # on `vllm_load_test` load, we reduce "gpu_memory_utilization" for `vllm_opt`, | |
| # so that at least 60% of GPU memory was available for other models. |
So I think it's best not to mess with it in a new test and make use of vllm_load_test instead.
Uh oh!
There was an error while loading. Please reload this page.