Skip to content

Commit 597ed1a

Browse files
authored
Various test fixes for AMD (#39978)
* Add amd expectation in internvl * Add amd expectation to llama * Added bnb decorator for a llava test that requires bnb * Added amd expectation for mistral3 * Style
1 parent 6121e9e commit 597ed1a

File tree

4 files changed

+19
-19
lines changed

4 files changed

+19
-19
lines changed

tests/models/internvl/test_modeling_internvl.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -652,10 +652,11 @@ def test_llama_small_model_integration_forward(self):
652652

653653
expected_logits_all = Expectations(
654654
{
655-
("xpu", 3): torch.tensor([-9.8750, -0.5703, 1.4297, -10.3125, -10.3125], dtype=torch.float16),
656-
("cuda", 7): torch.tensor([-9.8750, -0.4861, 1.4648, -10.3359, -10.3359], dtype=torch.float16),
657-
("cuda", 8): torch.tensor([-9.8906, -0.4995, 1.4473, -10.3359, -10.3438], dtype=torch.float16),
658-
("rocm", (9, 5)): torch.tensor([ -9.8906, -0.4976, 1.4502, -10.3359, -10.3438], dtype=torch.float16),
655+
("xpu", 3): [-9.8750, -0.5703, 1.4297, -10.3125, -10.3125],
656+
("cuda", 7): [-9.8750, -0.4861, 1.4648, -10.3359, -10.3359],
657+
("cuda", 8): [-9.8906, -0.4995, 1.4473, -10.3359, -10.3438],
658+
("rocm", (9, 4)): [ -9.8750, -0.4885, 1.4668, -10.3359, -10.3359],
659+
("rocm", (9, 5)): [ -9.8906, -0.4976, 1.4502, -10.3359, -10.3438],
659660
}
660661
) # fmt: skip
661662
expected_logits = torch.tensor(expected_logits_all.get_expectation(), dtype=torch.float16)

tests/models/llama/test_modeling_llama.py

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -151,14 +151,16 @@ def test_model_7b_logits_bf16(self):
151151
{
152152
("xpu", 3): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]),
153153
("cuda", 7): torch.tensor([[-6.5061, -4.1147, -4.9669, -3.2038, 0.8069, -2.9694, 1.2864, -3.3786]]),
154-
("cuda", 8): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]])
155-
})
154+
("cuda", 8): torch.tensor([[-6.5208, -4.1218, -4.9377, -3.2536, 0.8127, -2.9811, 1.2918, -3.3848]]),
155+
("rocm", (9, 4)): torch.tensor([[-6.5094, -4.1329, -4.9754, -3.5042, 0.8082, -2.9443, 1.2830, -3.3539]]),
156+
})
156157

157-
expected_mean = expected_means.get_expectation()
158+
expected_mean = expected_means.get_expectation().to(torch_device)
159+
actual_mean = out.logits.float().mean(-1)
158160
self.assertTrue(
159161
torch.allclose(
160-
expected_mean.to(torch_device),
161-
out.logits.float().mean(-1),
162+
expected_mean,
163+
actual_mean,
162164
atol=1e-2,
163165
rtol=1e-2
164166
)
@@ -169,18 +171,13 @@ def test_model_7b_logits_bf16(self):
169171
{
170172
("xpu", 3): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]),
171173
("cuda", 7): torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.4688, -7.4375, -7.6875, -6.9375, -6.0312, -7.0000, -1.8594, 1.8438, -8.5000]]),
172-
("cuda", 8): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]])
174+
("cuda", 8): torch.tensor([[-12.5625, -7.1250, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9688, -6.0312, -7.0312, -1.8203, 1.8750, -8.5000]]),
175+
("rocm", (9, 4)): torch.tensor([[-12.5000, -7.0625, -0.6289, -7.8750, -6.9688, -7.8125, -6.5000, -7.4375, -7.6562, -6.9375, -6.0312, -7.0312, -1.8594, 1.8438, -8.5000]])
173176
})
174177
# fmt: on
175-
expected_slice = expected_slices.get_expectation()
176-
self.assertTrue(
177-
torch.allclose(
178-
expected_slice.to(torch_device),
179-
out.logits[0, 0, :15].float(),
180-
atol=1e-2,
181-
rtol=1e-2,
182-
)
183-
)
178+
expected_slice = expected_slices.get_expectation().to(torch_device)
179+
actual_slice = out.logits[0, 0, :15].float()
180+
self.assertTrue(torch.allclose(expected_slice, actual_slice, atol=1e-2, rtol=1e-2))
184181

185182
@slow
186183
def test_model_7b_logits(self):

tests/models/llava/test_modeling_llava.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -476,6 +476,7 @@ def test_small_model_integration_test_llama_batched_regression(self):
476476
@slow
477477
@require_torch
478478
@require_vision
479+
@require_bitsandbytes
479480
def test_batched_generation(self):
480481
model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf", load_in_4bit=True)
481482

tests/models/mistral3/test_modeling_mistral3.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,7 @@ def test_mistral3_integration_generate(self):
317317
{
318318
("xpu", 3): "The image features two cats resting on a pink blanket. The cat on the left is a kitten",
319319
("cuda", 8): 'The image features two cats lying on a pink surface, which appears to be a couch or a bed',
320+
("rocm", (9, 4)): "The image features two cats lying on a pink surface, which appears to be a couch or a bed",
320321
("rocm", (9, 5)): "The image features two tabby cats lying on a pink surface, which appears to be a cushion or"
321322
}
322323
) # fmt: skip

0 commit comments

Comments
 (0)