From 6f75c3ea3c5d1e28702f23402f2ed61c40f4b4d0 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Fri, 15 Nov 2024 08:58:42 -0800 Subject: [PATCH] Bump runner memory for llama3_2 torchtune test_model --- .ci/scripts/gather_test_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/scripts/gather_test_models.py b/.ci/scripts/gather_test_models.py index 078561c9d85..7cc1d83a096 100755 --- a/.ci/scripts/gather_test_models.py +++ b/.ci/scripts/gather_test_models.py @@ -25,7 +25,7 @@ "resnet50": "linux.12xlarge", "llava": "linux.12xlarge", "llama3_2_vision_encoder": "linux.12xlarge", - "llama3_2_text_decoder": "linux.12xlarge", + "llama3_2_text_decoder": "linux.24xlarge", # This one causes timeout on smaller runner, the root cause is unclear (T161064121) "dl3": "linux.12xlarge", "emformer_join": "linux.12xlarge", @@ -88,7 +88,7 @@ def model_should_run_on_event(model: str, event: str) -> bool: We put higher priority and fast models to pull request and rest to push. """ if event == "pull_request": - return model in ["mv3", "vit"] + return model in ["mv3", "vit", "llama_3_2_text_decoder"] elif event == "push": # These are super slow. Only run it periodically return model not in ["dl3", "edsr", "emformer_predict"]