Skip to content

Commit 0d80c42

Browse files
committed
style
1 parent 9f9bf0b commit 0d80c42

File tree

2 files changed

+41
-5
lines changed

2 files changed

+41
-5
lines changed

_unittests/ut_tasks/try_tasks.py

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,8 +139,9 @@ def test_text_generation_phi4_mini(self):
139139
import torch
140140
from transformers import RobertaTokenizer, T5ForConditionalGeneration
141141

142-
tokenizer = RobertaTokenizer.from_pretrained("microsoft/Phi-4-mini-instruct")
143-
model = T5ForConditionalGeneration.from_pretrained("microsoft/Phi-4-mini-instruct")
142+
model_id = "microsoft/Phi-4-mini-instruct"
143+
tokenizer = RobertaTokenizer.from_pretrained(model_id)
144+
model = T5ForConditionalGeneration.from_pretrained(model_id)
144145

145146
text = "def greet(user): print(f'hello <extra_id_0>!')"
146147
input_ids = tokenizer(text, return_tensors="pt").input_ids
@@ -158,6 +159,41 @@ def test_text_generation_phi4_mini(self):
158159
)
159160
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
160161

162+
@never_test()
163+
def test_text_generation_phi3_mini(self):
164+
# clear&&NEVERTEST=1 python _unittests/ut_tasks/try_tasks.py -k phi3_mini
165+
166+
from transformers import Phi3ForCausalLM, AutoTokenizer
167+
168+
model_id = "microsoft/Phi-3-mini-4k-instruct"
169+
tokenizer = AutoTokenizer.from_pretrained(model_id)
170+
model = Phi3ForCausalLM.from_pretrained(model_id)
171+
172+
messages = [
173+
{
174+
"role": "system",
175+
"content": (
176+
"You are a helpful digital assistant. Please provide safe, "
177+
"ethical and accurate information to the user."
178+
),
179+
},
180+
{
181+
"role": "user",
182+
"content": (
183+
"Can you provide ways to eat combinations of bananas and dragonfruits?"
184+
),
185+
},
186+
]
187+
inputs = tokenizer.apply_chat_template(
188+
messages, add_generation_prompt=True, return_tensors="pt"
189+
)
190+
191+
# simply generate a single sequence
192+
print()
193+
with steal_forward(model):
194+
generated_ids = model.generate(inputs, max_length=100)
195+
print(tokenizer.decode(generated_ids[0], skip_special_tokens=True))
196+
161197
@never_test()
162198
@unittest.skip(
163199
reason="AttributeError: 'Phi4MMModel' object has no attribute "

onnx_diagnostic/tasks/image_text_to_text.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -183,9 +183,9 @@ def _get_inputs_gemma3(
183183
)
184184
dummies = {k: v for k, v in dummies.items() if k in shapes}
185185
expected = {"input_ids", "token_type_ids", "position_ids", "cache_position"}
186-
assert expected & set(dummies), (
187-
f"Unable to find expected inputs {expected} in loaded inputs {set(dummies)}"
188-
)
186+
assert expected & set(
187+
dummies
188+
), f"Unable to find expected inputs {expected} in loaded inputs {set(dummies)}"
189189

190190
inputs = dict(
191191
input_ids=input_ids,

0 commit comments

Comments
 (0)