diff --git a/optimum/exporters/openvino/model_configs.py b/optimum/exporters/openvino/model_configs.py index 544735cbe4..612584d221 100644 --- a/optimum/exporters/openvino/model_configs.py +++ b/optimum/exporters/openvino/model_configs.py @@ -4645,6 +4645,39 @@ def generate_dummy_inputs(self, framework: str = "pt", **kwargs): return dummy_inputs +class HunyuanDummyPastKeyValuesGenerator(GemmaDummyPastKeyValuesGenerator): + def __init__( + self, + task: str, + normalized_config: NormalizedTextConfig, + batch_size: int = DEFAULT_DUMMY_SHAPES["batch_size"], + sequence_length: int = DEFAULT_DUMMY_SHAPES["sequence_length"], + random_batch_size_range: Optional[Tuple[int, int]] = None, + random_sequence_length_range: Optional[Tuple[int, int]] = None, + **kwargs, + ): + super().__init__( + task=task, + normalized_config=normalized_config, + batch_size=batch_size, + sequence_length=sequence_length, + random_batch_size_range=random_batch_size_range, + random_sequence_length_range=random_sequence_length_range, + ) + self.head_dim = normalized_config.attention_head_dim + +@register_in_tasks_manager("hunyuan_v1_dense", *["text-generation", "text-generation-with-past"], library_name="transformers") +class HunyuanOpenVINOConfig(TextDecoderWithPositionIdsOnnxConfig): + MIN_TRANSFORMERS_VERSION = "4.55.0.dev0" + + DUMMY_INPUT_GENERATOR_CLASSES = (DummyTextInputGenerator, HunyuanDummyPastKeyValuesGenerator) + DUMMY_PKV_GENERATOR_CLASS = HunyuanDummyPastKeyValuesGenerator + NORMALIZED_CONFIG_CLASS = NormalizedTextConfig + + def patch_model_for_export( + self, model: Union["PreTrainedModel", "TFPreTrainedModel"], model_kwargs: Optional[Dict[str, Any]] = None + ) -> "ModelPatcher": + return OVDecoderModelPatcher(self, model, model_kwargs=model_kwargs) @register_in_tasks_manager( "gpt2", diff --git a/tests/openvino/test_decoder.py b/tests/openvino/test_decoder.py index b4da85e759..04465d48de 100644 --- a/tests/openvino/test_decoder.py +++ b/tests/openvino/test_decoder.py @@ -135,6 +135,9 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase): if is_transformers_version(">=", "4.55.0"): SUPPORTED_ARCHITECTURES += ("gpt_oss", "gpt_oss_mxfp4") + + if is_transformers_version(">=", "4.56.0"): + SUPPORTED_ARCHITECTURES += ("hunyuan_v1_dense",) GENERATION_LENGTH = 100 REMOTE_CODE_MODELS = ( @@ -224,6 +227,7 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase): "mamba": 0, "falcon-mamba": 0, "arcee": 2, + "hunyuan_v1_dense": 2, } # TODO: remove gptq/awq from here diff --git a/tests/openvino/test_export.py b/tests/openvino/test_export.py index 31f20d8ca3..bf4aa68dc3 100644 --- a/tests/openvino/test_export.py +++ b/tests/openvino/test_export.py @@ -102,6 +102,9 @@ class ExportModelTest(unittest.TestCase): SUPPORTED_ARCHITECTURES.update({"qwen3": OVModelForFeatureExtraction}) GENERATIVE_MODELS = ("pix2struct", "t5", "bart", "gpt2", "whisper", "llava", "speecht5") + + if is_transformers_version(">=", "4.56"): + SUPPORTED_ARCHITECTURES.update({"hunyuan_v1_dense": OVModelForCausalLM}) def _openvino_export( self, diff --git a/tests/openvino/test_exporters_cli.py b/tests/openvino/test_exporters_cli.py index bcd0435ce7..0c3c48e421 100644 --- a/tests/openvino/test_exporters_cli.py +++ b/tests/openvino/test_exporters_cli.py @@ -141,6 +141,7 @@ class OVCLIExportTestCase(unittest.TestCase): "mamba": 2, "falcon-mamba": 2, "qwen3": 2, + "hunyuan_v1_dense": 2, } TOKENIZER_CHAT_TEMPLATE_TESTS_MODELS = { diff --git a/tests/openvino/utils_tests.py b/tests/openvino/utils_tests.py index 8c37e434b0..b9c21265ab 100644 --- a/tests/openvino/utils_tests.py +++ b/tests/openvino/utils_tests.py @@ -199,6 +199,7 @@ "sana": "katuni4ka/tiny-random-sana", "sana-sprint": "katuni4ka/tiny-random-sana-sprint", "ltx-video": "katuni4ka/tiny-random-ltx-video", + "hunyuan_v1_dense": "snake7gun/tiny-random-hunyuan", }