Skip to content

Commit 3d84b39

Browse files
removed testing multiple guided decoding backends to save time
Signed-off-by: Govind Ramnarayan <105831528+govind-ramnarayan@users.noreply.github.com>
1 parent af359e2 commit 3d84b39

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

tests/unittest/_torch/auto_deploy/unit/singlegpu/test_ad_guided_decoding_regex.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,22 +13,22 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import pytest
1716
from _model_test_utils import get_small_model_config
1817
from build_and_run_ad import ExperimentConfig, main
1918

2019
from tensorrt_llm.llmapi import GuidedDecodingParams
2120

2221

23-
@pytest.mark.parametrize("guided_decoding_backend", ["xgrammar", "llguidance"])
24-
def test_ad_guided_decoding_regex_e2e(guided_decoding_backend: str):
22+
def test_ad_guided_decoding_regex_e2e():
2523
"""Test guided decoding with regex pattern validation using the build_and_run_ad main()."""
2624
test_case = {
2725
"prompt": "What is the capital of France?",
2826
"regex": r"I don't know, I am a randomly initialized model|Paris",
2927
"valid_responses": ["I don't know, I am a randomly initialized model", "Paris"],
3028
}
3129

30+
guided_decoding_backend = "xgrammar"
31+
3232
experiment_config = get_small_model_config("meta-llama/Meta-Llama-3.1-8B-Instruct")
3333

3434
# DemoLLM runtime does not support guided decoding. Need to set runtime to trtllm.

0 commit comments

Comments
 (0)