Skip to content

Commit 661d5b7

Browse files
committed
Add test_export for dpt and zoedepth
Signed-off-by: Phillip Kuznetsov <[email protected]>
1 parent 3e28a48 commit 661d5b7

File tree

2 files changed

+49
-0
lines changed

2 files changed

+49
-0
lines changed

tests/models/dpt/test_modeling_dpt.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818

1919
from transformers import DPTConfig
2020
from transformers.file_utils import is_torch_available, is_vision_available
21+
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
2122
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
2223

2324
from ...test_configuration_common import ConfigTester
@@ -410,3 +411,24 @@ def test_post_processing_depth_estimation(self):
410411
).squeeze()
411412
self.assertTrue(output_enlarged.shape == expected_shape)
412413
self.assertTrue(torch.allclose(predicted_depth_l, output_enlarged, rtol=1e-3))
414+
415+
def test_export(self):
416+
for strict in [True, False]:
417+
with self.subTest(strict=strict):
418+
if not is_torch_greater_or_equal_than_2_4:
419+
self.skipTest(reason="This test requires torch >= 2.4 to run.")
420+
model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device).eval()
421+
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
422+
image = prepare_img()
423+
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
424+
425+
exported_program = torch.export.export(
426+
model,
427+
args=(inputs["pixel_values"],),
428+
strict=strict,
429+
)
430+
with torch.no_grad():
431+
eager_outputs = model(**inputs)
432+
exported_outputs = exported_program.module().forward(inputs["pixel_values"])
433+
self.assertEqual(eager_outputs.logits.shape, exported_outputs.logits.shape)
434+
self.assertTrue(torch.allclose(eager_outputs.logits, exported_outputs.logits, atol=1e-4))

tests/models/zoedepth/test_modeling_zoedepth.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
from transformers import Dinov2Config, ZoeDepthConfig
2222
from transformers.file_utils import is_torch_available, is_vision_available
23+
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
2324
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
2425

2526
from ...test_configuration_common import ConfigTester
@@ -354,3 +355,29 @@ def test_inference_depth_estimation_post_processing_pad_flip(self):
354355
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti").to(torch_device)
355356

356357
self.check_post_processing_test(image_processor, images, model, pad_input=True, flip_aug=True)
358+
359+
def test_export(self):
360+
self.skipTest(
361+
reason="This test fails because the beit backbone of ZoeDepth is not compatible with torch.export"
362+
)
363+
for strict in [True, False]:
364+
with self.subTest(strict=True):
365+
if not is_torch_greater_or_equal_than_2_4:
366+
self.skipTest(reason="This test requires torch >= 2.4 to run.")
367+
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu").to(torch_device).eval()
368+
image_processor = ZoeDepthImageProcessor.from_pretrained("Intel/zoedepth-nyu")
369+
image = prepare_img()
370+
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
371+
372+
exported_program = torch.export.export(
373+
model,
374+
args=(inputs["pixel_values"],),
375+
strict=strict,
376+
)
377+
with torch.no_grad():
378+
eager_outputs = model(**inputs)
379+
exported_outputs = exported_program.module().forward(inputs["pixel_values"])
380+
self.assertEqual(eager_outputs.predicted_depth.shape, exported_outputs.predicted_depth.shape)
381+
self.assertTrue(
382+
torch.allclose(eager_outputs.predicted_depth, exported_outputs.predicted_depth, atol=1e-4)
383+
)

0 commit comments

Comments
 (0)