Skip to content

Commit 8963eff

Browse files
22quinnjingyu
authored andcommitted
[BugFix] [Spec Decode] Remove LlamaForCausalLMEagle3 to fix CI (vllm-project#22611)
Signed-off-by: 22quinn <[email protected]> Signed-off-by: jingyu <[email protected]>
1 parent 60190cf commit 8963eff

File tree

4 files changed

+32
-27
lines changed

4 files changed

+32
-27
lines changed

tests/models/registry.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -525,10 +525,11 @@ def check_available_online(
525525
trust_remote_code=True,
526526
speculative_model="yuhuili/EAGLE3-LLaMA3.1-Instruct-8B",
527527
tokenizer="meta-llama/Llama-3.1-8B-Instruct"),
528-
"LlamaForCausalLMEagle3": _HfExamplesInfo("AngelSlim/Qwen3-8B_eagle3", # noqa: E501
529-
trust_remote_code=True,
530-
speculative_model="AngelSlim/Qwen3-8B_eagle3",
531-
tokenizer="Qwen/Qwen3-8B"),
528+
# TODO: Re-enable this once tests/models/test_initialization.py is fixed, see PR #22333 #22611 # noqa: E501
529+
# "LlamaForCausalLMEagle3": _HfExamplesInfo("AngelSlim/Qwen3-8B_eagle3", # noqa: E501
530+
# trust_remote_code=True,
531+
# speculative_model="AngelSlim/Qwen3-8B_eagle3", # noqa: E501
532+
# tokenizer="Qwen/Qwen3-8B"),
532533
"EagleLlama4ForCausalLM": _HfExamplesInfo(
533534
"morgendave/EAGLE-Llama-4-Scout-17B-16E-Instruct",
534535
trust_remote_code=True,

tests/v1/e2e/test_spec_decode.py

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -125,27 +125,30 @@ def test_ngram_correctness(
125125
cleanup_dist_env_and_memory()
126126

127127

128-
@pytest.mark.parametrize(["model_setup", "mm_enabled"], [
129-
(("eagle3", "Qwen/Qwen3-8B", "AngelSlim/Qwen3-8B_eagle3", 1), False),
130-
(("eagle", "meta-llama/Llama-3.1-8B-Instruct",
131-
"yuhuili/EAGLE-LLaMA3.1-Instruct-8B", 1), False),
132-
(("eagle3", "meta-llama/Llama-3.1-8B-Instruct",
133-
"yuhuili/EAGLE3-LLaMA3.1-Instruct-8B", 1), False),
134-
pytest.param(
135-
("eagle", "meta-llama/Llama-4-Scout-17B-16E-Instruct",
136-
"morgendave/EAGLE-Llama-4-Scout-17B-16E-Instruct", 4),
137-
False,
138-
marks=pytest.mark.skip(reason="Skipping due to CI OOM issues")),
139-
pytest.param(
140-
("eagle", "meta-llama/Llama-4-Scout-17B-16E-Instruct",
141-
"morgendave/EAGLE-Llama-4-Scout-17B-16E-Instruct", 4),
142-
True,
143-
marks=pytest.mark.skip(reason="Skipping due to CI OOM issues")),
144-
],
145-
ids=[
146-
"qwen3_eagle3", "llama3_eagle", "llama3_eagle3",
147-
"llama4_eagle", "llama4_eagle_mm"
148-
])
128+
@pytest.mark.parametrize(
129+
["model_setup", "mm_enabled"],
130+
[
131+
# TODO: Re-enable this once tests/models/test_initialization.py is fixed, see PR #22333 #22611 # noqa: E501
132+
# (("eagle3", "Qwen/Qwen3-8B", "AngelSlim/Qwen3-8B_eagle3", 1), False),
133+
(("eagle", "meta-llama/Llama-3.1-8B-Instruct",
134+
"yuhuili/EAGLE-LLaMA3.1-Instruct-8B", 1), False),
135+
(("eagle3", "meta-llama/Llama-3.1-8B-Instruct",
136+
"yuhuili/EAGLE3-LLaMA3.1-Instruct-8B", 1), False),
137+
pytest.param(
138+
("eagle", "meta-llama/Llama-4-Scout-17B-16E-Instruct",
139+
"morgendave/EAGLE-Llama-4-Scout-17B-16E-Instruct", 4),
140+
False,
141+
marks=pytest.mark.skip(reason="Skipping due to CI OOM issues")),
142+
pytest.param(
143+
("eagle", "meta-llama/Llama-4-Scout-17B-16E-Instruct",
144+
"morgendave/EAGLE-Llama-4-Scout-17B-16E-Instruct", 4),
145+
True,
146+
marks=pytest.mark.skip(reason="Skipping due to CI OOM issues")),
147+
],
148+
ids=[
149+
"qwen3_eagle3", "llama3_eagle", "llama3_eagle3", "llama4_eagle",
150+
"llama4_eagle_mm"
151+
])
149152
@pytest.mark.parametrize("attn_backend",
150153
get_attn_backend_list_based_on_platform())
151154
def test_eagle_correctness(

vllm/model_executor/models/registry.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,8 @@
259259
"EagleLlama4ForCausalLM": ("llama4_eagle", "EagleLlama4ForCausalLM"),
260260
"EagleMiniCPMForCausalLM": ("minicpm_eagle", "EagleMiniCPMForCausalLM"),
261261
"Eagle3LlamaForCausalLM": ("llama_eagle3", "Eagle3LlamaForCausalLM"),
262-
"LlamaForCausalLMEagle3": ("llama_eagle3", "Eagle3LlamaForCausalLM"),
262+
# TODO: Re-enable this once tests/models/test_initialization.py is fixed, see PR #22333 #22611 # noqa: E501
263+
# "LlamaForCausalLMEagle3": ("llama_eagle3", "Eagle3LlamaForCausalLM"),
263264
"DeepSeekMTPModel": ("deepseek_mtp", "DeepSeekMTP"),
264265
"Glm4MoeMTPModel": ("glm4_moe_mtp", "Glm4MoeMTP"),
265266
"MedusaModel": ("medusa", "Medusa"),

vllm/transformers_utils/configs/eagle.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def __init__(self,
4545

4646
# Eagle model name should follow naming convention of
4747
# LlamaForCausalLM -> EagleLlamaForCausalLM
48-
# LlamaForCausalLM -> Eagle3LlamaForCausalLM / LlamaForCausalLMEagle3
48+
# LlamaForCausalLM -> Eagle3LlamaForCausalLM
4949
if method == "eagle":
5050
assert self.model is not None, \
5151
"model should not be None when method is eagle"

0 commit comments

Comments
 (0)