Skip to content

Commit 5329e53

Browse files
committed
raise NotImplementedError for num_reqs > 1 and run precommit
Signed-off-by: HonestDeng <2958906959@qq.com>
1 parent 77e8d8c commit 5329e53

File tree

4 files changed

+9
-5
lines changed

4 files changed

+9
-5
lines changed

tests/e2e/offline_inference/test_mammoth_moda2.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,9 @@
4848

4949
_EXAMPLE_DIR = Path(__file__).resolve().parents[3] / "examples" / "offline_inference" / "mammothmodal2_preview"
5050
_STAGE_CONFIGS_DIR = Path(__file__).resolve().parents[3] / "vllm_omni" / "model_executor" / "stage_configs"
51-
MODEL_PATH = os.environ.get("MAMMOTHMODA2_MODEL_PATH", str(Path(__file__).resolve().parents[3] / "MammothModa2-Preview"))
51+
MODEL_PATH = os.environ.get(
52+
"MAMMOTHMODA2_MODEL_PATH", str(Path(__file__).resolve().parents[3] / "MammothModa2-Preview")
53+
)
5254
T2I_STAGE_CONFIG = os.environ.get("MAMMOTHMODA2_T2I_STAGE_CONFIG", str(_STAGE_CONFIGS_DIR / "mammoth_moda2.yaml"))
5355
SUMMARIZE_STAGE_CONFIG = os.environ.get(
5456
"MAMMOTHMODA2_SUMMARIZE_STAGE_CONFIG", str(_STAGE_CONFIGS_DIR / "mammoth_moda2_ar.yaml")

vllm_omni/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
# Register custom configs (AutoConfig, AutoTokenizer) as early as possible.
2424
from vllm_omni.transformers_utils import configs as _configs # noqa: F401, E402
2525

26-
2726
from .config import OmniModelConfig
2827
from .entrypoints.async_omni import AsyncOmni
2928

vllm_omni/diffusion/models/mammoth_moda2/mammoth_moda2_dit.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111
from vllm.config import VllmConfig
1212
from vllm.model_executor.models.utils import AutoWeightsLoader, WeightsMapper
1313

14-
from vllm_omni.transformers_utils.configs.mammoth_moda2 import Mammothmoda2Config
1514
from vllm_omni.model_executor.models.output_templates import OmniOutput
15+
from vllm_omni.transformers_utils.configs.mammoth_moda2 import Mammothmoda2Config
1616

1717
from .mammothmoda2_dit_model import SimpleQFormerImageRefiner, Transformer2DModel
1818
from .rope_real import RotaryPosEmbedReal
@@ -93,9 +93,12 @@ def _reinit_caption_embedder(self, in_features: int) -> None:
9393
)
9494

9595
def get_dummy_runtime_additional_information(self, num_reqs: int) -> list[dict[str, object]]:
96-
num_reqs = 1 # TODO: support num_reqs > 1
9796
if num_reqs <= 0:
9897
raise ValueError(f"num_reqs must be positive, got {num_reqs}")
98+
if num_reqs > 1:
99+
raise NotImplementedError(
100+
f"get_dummy_runtime_additional_information does not support num_reqs > 1, got {num_reqs}"
101+
)
99102
text_prompt_embeds = torch.zeros((1, self._llm_hidden_size), dtype=torch.float32)
100103
image_prompt_embeds = torch.zeros((1, self._llm_hidden_size), dtype=torch.float32)
101104
negative_prompt_embeds = torch.zeros((0, self._llm_hidden_size), dtype=torch.float32)

vllm_omni/model_executor/models/mammoth_moda2/mammoth_moda2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ def _make_empty_intermediate_tensors(
408408
self.norm = PPMissingLayer()
409409

410410
@property
411-
def model(self) -> "MammothModa2Qwen2ForCausalLM":
411+
def model(self) -> MammothModa2Qwen2ForCausalLM:
412412
return self
413413

414414
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:

0 commit comments

Comments
 (0)