Skip to content

Commit ddc12b1

Browse files
JingyaHuangdacorvo
authored andcommitted
fix: mobilenet_v2 tests
1 parent f154a5b commit ddc12b1

File tree

7 files changed

+8
-48
lines changed

7 files changed

+8
-48
lines changed

optimum/exporters/neuron/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def get_input_shapes(task: str, args: argparse.Namespace) -> dict[str, int]:
138138
def get_neuron_config_class(task: str, model_id: str) -> NeuronExportConfig:
139139
config = AutoConfig.from_pretrained(model_id)
140140

141-
model_type = config.model_type.replace("_", "-")
141+
model_type = config.model_type
142142
if config.is_encoder_decoder:
143143
model_type = model_type + "-encoder"
144144

optimum/exporters/neuron/model_configs.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -452,11 +452,9 @@ class LevitNeuronConfig(ViTNeuronConfig):
452452
pass
453453

454454

455-
@register_in_tasks_manager(
456-
"mobilenet-v2", *["feature-extraction", "image-classification", "semantic-segmentation", "image-segmentation"]
457-
)
455+
@register_in_tasks_manager("mobilenet_v2", *["feature-extraction", "image-classification", "semantic-segmentation"])
458456
class MobileNetV2NeuronConfig(ViTNeuronConfig):
459-
MODEL_TYPE = "mobilenet-v2"
457+
MODEL_TYPE = "mobilenet_v2"
460458
pass
461459

462460

optimum/neuron/modeling_traced.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,6 @@ def _neuron_config_init(cls, config: "PretrainedConfig") -> "NeuronDefaultConfig
492492
task = neuron_config.get("task", None) or TasksManager.infer_task_from_model(cls.auto_model_class)
493493
task = TasksManager.map_from_synonym(task)
494494
model_type = neuron_config.get("model_type", None) or config.model_type
495-
model_type = model_type.replace("_", "-")
496495
neuron_config_constructor = TasksManager.get_exporter_config_constructor(
497496
model_type=model_type,
498497
exporter="neuron",

optimum/neuron/utils/argument_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ def store_compilation_config(
302302
original_model_type = getattr(config, "export_model_type", None) or getattr(
303303
config, "model_type", None
304304
) # prioritize sentence_transformers to transformers
305-
neuron_model_type = str(model_type).replace("_", "-") if model_type is not None else model_type
305+
neuron_model_type = str(model_type) if model_type is not None else model_type
306306
if original_model_type is None:
307307
update_func(
308308
"model_type", neuron_model_type

tests/exporters/exporters_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
"hubert": "hf-internal-testing/tiny-random-HubertModel",
5151
"levit": "hf-internal-testing/tiny-random-LevitModel",
5252
"mobilebert": "hf-internal-testing/tiny-random-MobileBertModel",
53-
"mobilenet-v2": "hf-internal-testing/tiny-random-MobileNetV2Model",
53+
"mobilenet_v2": "hf-internal-testing/tiny-random-MobileNetV2Model",
5454
# "mobilevit": "hf-internal-testing/tiny-random-mobilevit", # blocked since neuron sdk 2.23: timeout
5555
"modernbert": "hf-internal-testing/tiny-random-ModernBertModel",
5656
"mpnet": "hf-internal-testing/tiny-random-MPNetModel",
@@ -80,7 +80,7 @@
8080
"clip": "sentence-transformers/clip-ViT-B-32",
8181
}
8282

83-
WEIGHTS_NEFF_SEPARATION_UNSUPPORTED_ARCH = ["camembert", "roberta", "mobilenet-v2"]
83+
WEIGHTS_NEFF_SEPARATION_UNSUPPORTED_ARCH = ["camembert", "roberta", "mobilenet_v2"]
8484

8585
# Diffusers
8686

tests/inference/inference_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
"gpt2": "hf-internal-testing/tiny-random-gpt2",
4949
"levit": "hf-internal-testing/tiny-random-LevitModel",
5050
"mobilebert": "hf-internal-testing/tiny-random-MobileBertModel",
51-
"mobilenet-v2": "hf-internal-testing/tiny-random-MobileNetV2Model",
51+
"mobilenet_v2": "hf-internal-testing/tiny-random-MobileNetV2Model",
5252
"mobilevit": "hf-internal-testing/tiny-random-mobilevit",
5353
"modernbert": "hf-internal-testing/tiny-random-ModernBertModel",
5454
"mpnet": "hf-internal-testing/tiny-random-MPNetModel",

tests/inference/transformers/test_modeling.py

Lines changed: 1 addition & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -483,14 +483,6 @@ def _validate_outputs(self, model_arch, suffix, batch_size):
483483
f"Inference results between pytorch model and neuron model of {model_arch} not close enough."
484484
)
485485

486-
def test_load_vanilla_transformers_which_is_not_supported(self):
487-
with self.assertRaises(Exception) as context:
488-
_ = NeuronModelForMaskedLM.from_pretrained(
489-
"hf-internal-testing/tiny-random-t5", from_transformers=True, **self.STATIC_INPUTS_SHAPES
490-
)
491-
492-
self.assertIn("Unrecognized configuration class", str(context.exception))
493-
494486
@parameterized.expand(SUPPORTED_ARCHITECTURES, skip_on_empty=True)
495487
def test_compare_to_transformers_non_dyn_bs(self, model_arch):
496488
model_args = {
@@ -603,14 +595,6 @@ def _validate_outputs(self, model_arch, suffix, batch_size):
603595
if not result_close_end_logits:
604596
warnings.warn(f"End logits between pytorch model and neuron model of {model_arch} not close enough.")
605597

606-
def test_load_vanilla_transformers_which_is_not_supported(self):
607-
with self.assertRaises(Exception) as context:
608-
_ = NeuronModelForQuestionAnswering.from_pretrained(
609-
"hf-internal-testing/tiny-random-t5", from_transformers=True, **self.STATIC_INPUTS_SHAPES
610-
)
611-
612-
assert ("doesn't support" in str(context.exception)) or ("is not supported" in str(context.exception))
613-
614598
def test_compare_to_transformers_dyn_bs(self):
615599
model_arch = "albert"
616600
# Neuron model with dynamic batching
@@ -719,14 +703,6 @@ def _validate_outputs(self, model_arch, suffix, batch_size):
719703
f"Inference results between pytorch model and neuron model of {model_arch} not close enough."
720704
)
721705

722-
def test_load_vanilla_transformers_which_is_not_supported(self):
723-
with self.assertRaises(Exception) as context:
724-
_ = NeuronModelForSequenceClassification.from_pretrained(
725-
"hf-internal-testing/tiny-random-t5", from_transformers=True, **self.STATIC_INPUTS_SHAPES
726-
)
727-
728-
assert ("doesn't support" in str(context.exception)) or ("is not supported" in str(context.exception))
729-
730706
@parameterized.expand(SUPPORTED_ARCHITECTURES, skip_on_empty=True)
731707
def test_compare_to_transformers_non_dyn_bs(self, model_arch):
732708
model_args = {
@@ -831,14 +807,6 @@ def _validate_outputs(self, model_arch, suffix, batch_size):
831807
f"Inference results between pytorch model and neuron model of {model_arch} not close enough."
832808
)
833809

834-
def test_load_vanilla_transformers_which_is_not_supported(self):
835-
with self.assertRaises(Exception) as context:
836-
_ = NeuronModelForTokenClassification.from_pretrained(
837-
"hf-internal-testing/tiny-random-t5", from_transformers=True, **self.STATIC_INPUTS_SHAPES
838-
)
839-
840-
assert ("doesn't support" in str(context.exception)) or ("is not supported" in str(context.exception))
841-
842810
@parameterized.expand(SUPPORTED_ARCHITECTURES, skip_on_empty=True)
843811
def test_compare_to_transformers_non_dyn_bs(self, model_arch):
844812
model_args = {
@@ -987,7 +955,7 @@ class NeuronModelForImageClassificationIntegrationTest(NeuronModelTestMixin):
987955
"cvt",
988956
"deit",
989957
"levit",
990-
"mobilenet-v2",
958+
"mobilenet_v2",
991959
"mobilevit",
992960
"swin",
993961
"vit",
@@ -1046,11 +1014,6 @@ def test_compare_to_transformers_non_dyn_bs(self, model_arch):
10461014
"model_arch": model_arch,
10471015
"dynamic_batch_size": False,
10481016
}
1049-
# REMOVEME: convnextv2 contains a bug in the GRN layer, which is used in the convnextv2 model, but the bug has
1050-
# been fixed in the transformers library on newer versions. For more info see:
1051-
# https://github.com/huggingface/transformers/issues/38015
1052-
if model_arch == "convnextv2" and transformers_version.startswith("4.51"):
1053-
self.skipTest("convnextv2 contains a bug in this version of transformers.")
10541017
self._setup(model_args)
10551018
self._validate_outputs(model_arch, "_dyn_bs_false", batch_size=1)
10561019

0 commit comments

Comments
 (0)