|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project |
| 3 | + |
| 4 | +import gc |
| 5 | +import random |
| 6 | +from typing import Optional, Union |
| 7 | + |
| 8 | +import pytest |
| 9 | +import torch |
| 10 | + |
| 11 | +from vllm import LLM, SamplingParams |
| 12 | +from vllm.config import CompilationConfig, CompilationLevel |
| 13 | +from vllm.forward_context import get_forward_context |
| 14 | +from vllm.model_executor.models.gemma3n import Gemma3nForConditionalGeneration |
| 15 | +from vllm.model_executor.models.registry import ModelRegistry |
| 16 | +from vllm.model_executor.models.utils import extract_layer_index |
| 17 | +from vllm.sequence import IntermediateTensors |
| 18 | + |
| 19 | +from ...utils import fork_new_process_for_each_test |
| 20 | + |
| 21 | + |
| 22 | +class TestGemma3nForConditionalGeneration(Gemma3nForConditionalGeneration): |
| 23 | + |
| 24 | + def forward( |
| 25 | + self, |
| 26 | + input_ids: torch.Tensor, |
| 27 | + positions: torch.Tensor, |
| 28 | + intermediate_tensors: Optional[IntermediateTensors] = None, |
| 29 | + inputs_embeds: Optional[torch.Tensor] = None, |
| 30 | + **kwargs, |
| 31 | + ) -> Union[torch.Tensor, IntermediateTensors]: |
| 32 | + hidden_states = self.model(input_ids, positions, intermediate_tensors, |
| 33 | + inputs_embeds, **kwargs) |
| 34 | + attn_metadata = get_forward_context().attn_metadata |
| 35 | + # attn_metadata is None during dummy runs |
| 36 | + if (attn_metadata is not None |
| 37 | + and self.cache_config.kv_sharing_fast_prefill): |
| 38 | + assert isinstance(attn_metadata, dict) # true in V1 |
| 39 | + # Gemma3n-E2B has 30 layers, with last 20 layers being |
| 40 | + # cross-decoder layers. Check attention metadata is correct |
| 41 | + for layer_name, metadata in attn_metadata.items(): |
| 42 | + layer_idx = extract_layer_index(layer_name) |
| 43 | + if layer_idx >= 20: |
| 44 | + assert hasattr(metadata, 'logits_indices_padded') |
| 45 | + assert hasattr(metadata, 'num_logits_indices') |
| 46 | + else: |
| 47 | + assert not hasattr(metadata, 'logits_indices_padded') |
| 48 | + assert not hasattr(metadata, 'num_logits_indices') |
| 49 | + |
| 50 | + # Last layer will be a KV sharing layer |
| 51 | + layer_attn_metadata = attn_metadata[ |
| 52 | + self.model.language_model.layers[-1].self_attn.attn.layer_name] |
| 53 | + logits_indices_padded = (layer_attn_metadata.logits_indices_padded) |
| 54 | + assert logits_indices_padded is not None |
| 55 | + num_logits_indices = layer_attn_metadata.num_logits_indices |
| 56 | + assert num_logits_indices > 0 |
| 57 | + # Reset hidden states to random values and |
| 58 | + # only set logits at logits_indices to valid values |
| 59 | + # Because logits_indices are the only positions that are used |
| 60 | + # for output token sampling, this still produces same outputs |
| 61 | + logits_hs = hidden_states[logits_indices_padded] |
| 62 | + hidden_states = torch.randn_like(hidden_states) |
| 63 | + gen_indices = logits_indices_padded[:num_logits_indices] |
| 64 | + hidden_states[gen_indices] = logits_hs[:num_logits_indices] |
| 65 | + |
| 66 | + return hidden_states |
| 67 | + |
| 68 | + |
| 69 | +@pytest.fixture |
| 70 | +def test_prompts(): |
| 71 | + """ |
| 72 | + Adapted from tests/v1/e2e/test_spec_decode.py |
| 73 | + """ |
| 74 | + prompt_types = ["repeat", "sentence"] |
| 75 | + # Setting higher num prompts increases the chance of numerics mismatch |
| 76 | + # due to matrix multiplication numerics depending on batch dimension |
| 77 | + num_prompts = 10 |
| 78 | + prompts = [] |
| 79 | + |
| 80 | + random.seed(0) |
| 81 | + random_prompt_type_choices = random.choices(prompt_types, k=num_prompts) |
| 82 | + |
| 83 | + for kind in random_prompt_type_choices: |
| 84 | + word_choices = ["test", "temp", "hello", "where"] |
| 85 | + word = random.choice(word_choices) |
| 86 | + if kind == "repeat": |
| 87 | + prompt = f"""please repeat the word '{word}' 10 times.""" |
| 88 | + elif kind == "sentence": |
| 89 | + prompt = f"""please give a ten-word sentence that |
| 90 | + uses the word {word} at least once.""" |
| 91 | + else: |
| 92 | + raise ValueError(f"Unknown prompt type: {kind}") |
| 93 | + prompts.append(prompt) |
| 94 | + |
| 95 | + return prompts |
| 96 | + |
| 97 | + |
| 98 | +@fork_new_process_for_each_test |
| 99 | +@pytest.mark.parametrize("enforce_eager", [True, False]) |
| 100 | +def test_kv_sharing_fast_prefill( |
| 101 | + monkeypatch: pytest.MonkeyPatch, |
| 102 | + enforce_eager: bool, |
| 103 | + test_prompts: list[str], |
| 104 | +): |
| 105 | + ModelRegistry.register_model("Gemma3nForConditionalGeneration", |
| 106 | + TestGemma3nForConditionalGeneration) |
| 107 | + sampling_params = SamplingParams(temperature=0.0, max_tokens=100) |
| 108 | + compilation_config = CompilationConfig( |
| 109 | + # This allows vLLM compilation backend to handle allocating and |
| 110 | + # managing buffers for cudagraph |
| 111 | + cudagraph_copy_inputs=True, |
| 112 | + level=CompilationLevel.PIECEWISE |
| 113 | + if not enforce_eager else CompilationLevel.NO_COMPILATION) |
| 114 | + |
| 115 | + with monkeypatch.context() as m: |
| 116 | + m.setenv("VLLM_USE_V1", "1") |
| 117 | + |
| 118 | + llm = LLM( |
| 119 | + model="google/gemma-3n-E2B-it", |
| 120 | + enforce_eager=enforce_eager, |
| 121 | + compilation_config=compilation_config, |
| 122 | + ) |
| 123 | + ref_responses = llm.generate(test_prompts, sampling_params) |
| 124 | + |
| 125 | + del llm |
| 126 | + gc.collect() |
| 127 | + torch.cuda.empty_cache() |
| 128 | + |
| 129 | + llm = LLM(model="google/gemma-3n-E2B-it", |
| 130 | + enforce_eager=enforce_eager, |
| 131 | + compilation_config=compilation_config, |
| 132 | + kv_sharing_fast_prefill=True) |
| 133 | + optimized_responses = llm.generate(test_prompts, sampling_params) |
| 134 | + |
| 135 | + misses = 0 |
| 136 | + |
| 137 | + for ref_response, optimized_response in zip(ref_responses, |
| 138 | + optimized_responses): |
| 139 | + if ref_response.outputs[0].text != optimized_response.outputs[ |
| 140 | + 0].text: |
| 141 | + misses += 1 |
| 142 | + |
| 143 | + assert misses == 0 |
0 commit comments