Skip to content

Commit 08e6b3c

Browse files
sijunheLemonNoel
andauthored
Use tiny-random model for prompt tests to speed up CI (#4190)
* use small models * change test_modeling Co-authored-by: Noel <[email protected]>
1 parent ec30226 commit 08e6b3c

File tree

4 files changed

+16
-16
lines changed

4 files changed

+16
-16
lines changed

tests/prompt/test_prompt_model.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,12 @@
3030
class PromptModelTest(unittest.TestCase):
3131
@classmethod
3232
def setUpClass(cls):
33-
cls.tokenizer = AutoTokenizer.from_pretrained("__internal_testing__/ernie")
34-
cls.model = AutoModelForMaskedLM.from_pretrained("__internal_testing__/ernie")
33+
cls.tokenizer = AutoTokenizer.from_pretrained("__internal_testing__/tiny-random-ernie")
34+
cls.model = AutoModelForMaskedLM.from_pretrained("__internal_testing__/tiny-random-ernie")
35+
cls.num_labels = 2
36+
cls.seq_cls_model = AutoModelForSequenceClassification.from_pretrained(
37+
"__internal_testing__/tiny-random-ernie", num_labels=cls.num_labels
38+
)
3539

3640
cls.template = AutoTemplate.create_from(
3741
prompt="{'soft'}{'text': 'text'}{'mask'}", tokenizer=cls.tokenizer, max_length=512, model=cls.model
@@ -71,36 +75,32 @@ def test_sequence_classification_with_labels(self):
7175
self.assertEqual(model_outputs.hidden_states.shape[0], len(examples))
7276

7377
def test_efl_no_labels(self):
74-
num_labels = 2
75-
model = AutoModelForSequenceClassification.from_pretrained("__internal_testing__/ernie", num_labels=num_labels)
76-
prompt_model = PromptModelForSequenceClassification(model, self.template, verbalizer=None)
78+
prompt_model = PromptModelForSequenceClassification(self.seq_cls_model, self.template, verbalizer=None)
7779
examples = [{"text": "百度飞桨深度学习框架"}, {"text": "这是一个测试"}]
7880
encoded_examples = [self.template(i) for i in examples]
7981
logits, hidden_states = prompt_model(**self.data_collator(encoded_examples))
8082
self.assertEqual(logits.shape[0], len(examples))
81-
self.assertEqual(logits.shape[1], num_labels)
83+
self.assertEqual(logits.shape[1], self.num_labels)
8284
self.assertEqual(hidden_states.shape[0], len(examples))
8385

8486
model_outputs = prompt_model(**self.data_collator(encoded_examples), return_dict=True)
8587
self.assertIsNone(model_outputs.loss)
8688
self.assertEqual(model_outputs.logits.shape[0], len(examples))
87-
self.assertEqual(model_outputs.logits.shape[1], num_labels)
89+
self.assertEqual(model_outputs.logits.shape[1], self.num_labels)
8890
self.assertEqual(model_outputs.hidden_states.shape[0], len(examples))
8991

9092
def test_efl_with_labels(self):
91-
num_labels = 2
92-
model = AutoModelForSequenceClassification.from_pretrained("__internal_testing__/ernie", num_labels=num_labels)
93-
prompt_model = PromptModelForSequenceClassification(model, self.template, verbalizer=None)
93+
prompt_model = PromptModelForSequenceClassification(self.seq_cls_model, self.template, verbalizer=None)
9494
examples = [{"text": "百度飞桨深度学习框架", "labels": 0}, {"text": "这是一个测试", "labels": 1}]
9595
encoded_examples = [self.template(i) for i in examples]
9696
loss, logits, hidden_states = prompt_model(**self.data_collator(encoded_examples))
9797
self.assertIsNotNone(loss)
9898
self.assertEqual(logits.shape[0], len(examples))
99-
self.assertEqual(logits.shape[1], num_labels)
99+
self.assertEqual(logits.shape[1], self.num_labels)
100100
self.assertEqual(hidden_states.shape[0], len(examples))
101101

102102
model_outputs = prompt_model(**self.data_collator(encoded_examples), return_dict=True)
103103
self.assertIsNotNone(model_outputs.loss)
104104
self.assertEqual(model_outputs.logits.shape[0], len(examples))
105-
self.assertEqual(model_outputs.logits.shape[1], num_labels)
105+
self.assertEqual(model_outputs.logits.shape[1], self.num_labels)
106106
self.assertEqual(model_outputs.hidden_states.shape[0], len(examples))

tests/prompt/test_template.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@
2525
class TemplateTest(unittest.TestCase):
2626
@classmethod
2727
def setUpClass(cls):
28-
cls.tokenizer = AutoTokenizer.from_pretrained("ernie-3.0-nano-zh")
29-
cls.model = AutoModelForMaskedLM.from_pretrained("ernie-3.0-nano-zh")
28+
cls.tokenizer = AutoTokenizer.from_pretrained("__internal_testing__/tiny-random-ernie")
29+
cls.model = AutoModelForMaskedLM.from_pretrained("__internal_testing__/tiny-random-ernie")
3030
cls.example = {"text_a": "天气晴朗", "text_b": "下雪了", "choices": ["不", "很"], "labels": 0}
3131
cls.max_length = 20
3232
cls.tokenizer.add_special_tokens({"additional_special_tokens": ["[O-MASK]"]})

tests/transformers/bert/test_modeling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -590,7 +590,7 @@ def test_auto_model(self):
590590
class BertModelIntegrationTest(ModelTesterPretrainedMixin, unittest.TestCase):
591591
base_model_class = BertModel
592592
hf_remote_test_model_path = "PaddleCI/tiny-random-bert"
593-
paddlehub_remote_test_model_path = "__internal_testing__/bert"
593+
paddlehub_remote_test_model_path = "__internal_testing__/tiny-random-bert"
594594

595595
@slow
596596
def test_inference_no_attention(self):

tests/transformers/ernie/test_modeling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -439,7 +439,7 @@ def test_model_from_pretrained(self):
439439
class ErnieModelIntegrationTest(unittest.TestCase, ModelTesterPretrainedMixin):
440440
base_model_class = ErniePretrainedModel
441441
hf_remote_test_model_path = "PaddleCI/tiny-random-ernie"
442-
paddlehub_remote_test_model_path = "__internal_testing__/ernie"
442+
paddlehub_remote_test_model_path = "__internal_testing__/tiny-random-ernie"
443443

444444
@slow
445445
def test_inference_no_attention(self):

0 commit comments

Comments
 (0)