|
29 | 29 | make_bounding_boxes, |
30 | 30 | make_detection_masks, |
31 | 31 | make_image, |
| 32 | + make_image_cvcuda, |
32 | 33 | make_image_pil, |
33 | 34 | make_image_tensor, |
34 | 35 | make_keypoints, |
|
51 | 52 | from torchvision.transforms.v2 import functional as F |
52 | 53 | from torchvision.transforms.v2._utils import check_type, is_pure_tensor |
53 | 54 | from torchvision.transforms.v2.functional._geometry import _get_perspective_coeffs, _parallelogram_to_bounding_boxes |
| 55 | +from torchvision.transforms.v2.functional._type_conversion import _import_cvcuda_modules |
54 | 56 | from torchvision.transforms.v2.functional._utils import _get_kernel, _register_kernel_internal |
55 | 57 |
|
| 58 | +try: |
| 59 | + _import_cvcuda_modules() |
| 60 | + CVCUDA_AVAILABLE = True |
| 61 | +except ImportError: |
| 62 | + CVCUDA_AVAILABLE = False |
| 63 | +CUDA_AVAILABLE = torch.cuda.is_available() |
| 64 | + |
56 | 65 |
|
57 | 66 | # turns all warnings into errors for this module |
58 | 67 | pytestmark = [pytest.mark.filterwarnings("error")] |
@@ -6733,6 +6742,125 @@ def test_functional_error(self): |
6733 | 6742 | F.pil_to_tensor(object()) |
6734 | 6743 |
|
6735 | 6744 |
|
| 6745 | +@pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") |
| 6746 | +@pytest.mark.skipif(not CUDA_AVAILABLE, reason="test requires CUDA") |
| 6747 | +class TestToCVCUDATensor: |
| 6748 | + """Tests for to_cvcuda_tensor function following patterns from TestToPil""" |
| 6749 | + |
| 6750 | + @pytest.mark.parametrize("dtype", [torch.uint8, torch.uint16, torch.float32, torch.float64]) |
| 6751 | + def test_1_channel_to_cvcuda_tensor(self, dtype): |
| 6752 | + # Create tensor on CPU first, then move to CUDA to avoid CUDA context issues |
| 6753 | + if dtype in (torch.uint8, torch.uint16): |
| 6754 | + img_data = torch.randint(0, 256, (1, 4, 4), dtype=dtype) |
| 6755 | + else: |
| 6756 | + img_data = torch.rand(1, 4, 4, dtype=dtype) |
| 6757 | + img_data = img_data.cuda() |
| 6758 | + cvcuda_img = F.to_cvcuda_tensor(img_data) |
| 6759 | + assert cvcuda_img is not None |
| 6760 | + |
| 6761 | + @pytest.mark.parametrize("dtype", [torch.uint8, torch.uint16, torch.float32, torch.float64]) |
| 6762 | + def test_3_channel_to_cvcuda_tensor(self, dtype): |
| 6763 | + # Create tensor on CPU first, then move to CUDA to avoid CUDA context issues |
| 6764 | + if dtype in (torch.uint8, torch.uint16): |
| 6765 | + img_data = torch.randint(0, 256, (3, 4, 4), dtype=dtype) |
| 6766 | + else: |
| 6767 | + img_data = torch.rand(3, 4, 4, dtype=dtype) |
| 6768 | + img_data = img_data.cuda() |
| 6769 | + cvcuda_img = F.to_cvcuda_tensor(img_data) |
| 6770 | + assert cvcuda_img is not None |
| 6771 | + |
| 6772 | + def test_invalid_input_type(self): |
| 6773 | + with pytest.raises(TypeError, match=r"pic should be `torch.Tensor`"): |
| 6774 | + F.to_cvcuda_tensor("invalid_input") |
| 6775 | + |
| 6776 | + def test_invalid_dimensions(self): |
| 6777 | + # Test 1D array (too few dimensions) |
| 6778 | + # Create tensor on CPU first, then move to CUDA to avoid CUDA context issues |
| 6779 | + with pytest.raises(ValueError, match=r"pic should be 3 or 4 dimensional"): |
| 6780 | + img_data = torch.randint(0, 256, (4,), dtype=torch.uint8) |
| 6781 | + img_data = img_data.cuda() |
| 6782 | + F.to_cvcuda_tensor(img_data) |
| 6783 | + |
| 6784 | + # Test 2D array (no longer supported) |
| 6785 | + with pytest.raises(ValueError, match=r"pic should be 3 or 4 dimensional"): |
| 6786 | + img_data = torch.randint(0, 256, (4, 4), dtype=torch.uint8) |
| 6787 | + img_data = img_data.cuda() |
| 6788 | + F.to_cvcuda_tensor(img_data) |
| 6789 | + |
| 6790 | + # Test 5D array (too many dimensions) |
| 6791 | + with pytest.raises(ValueError, match=r"pic should be 3 or 4 dimensional"): |
| 6792 | + img_data = torch.randint(0, 256, (1, 1, 3, 4, 4), dtype=torch.uint8) |
| 6793 | + img_data = img_data.cuda() |
| 6794 | + F.to_cvcuda_tensor(img_data) |
| 6795 | + |
| 6796 | + @pytest.mark.parametrize("num_channels", [1, 3]) |
| 6797 | + @pytest.mark.parametrize("dtype", [torch.uint8, torch.uint16, torch.float32, torch.float64]) |
| 6798 | + def test_round_trip(self, num_channels, dtype): |
| 6799 | + # Setup: Create a tensor in CHW format (PyTorch standard) |
| 6800 | + # Create tensor on CPU first, then move to CUDA to avoid CUDA context issues |
| 6801 | + if dtype in (torch.uint8, torch.uint16): |
| 6802 | + original_tensor = torch.randint(0, 256, (num_channels, 4, 4), dtype=dtype) |
| 6803 | + else: |
| 6804 | + original_tensor = torch.rand(num_channels, 4, 4, dtype=dtype) |
| 6805 | + original_tensor = original_tensor.cuda() |
| 6806 | + |
| 6807 | + # Execute: Convert to CV-CUDA and back to tensor |
| 6808 | + # CHW -> (to_cvcuda_tensor) -> CV-CUDA NHWC -> (cvcuda_to_tensor) -> NCHW |
| 6809 | + cvcuda_tensor = F.to_cvcuda_tensor(original_tensor) |
| 6810 | + result_tensor = F.cvcuda_to_tensor(cvcuda_tensor) |
| 6811 | + |
| 6812 | + # Remove batch dimension that was added during conversion since original was unbatched |
| 6813 | + result_tensor = result_tensor.squeeze(0) |
| 6814 | + |
| 6815 | + # Assert: The round-trip conversion preserves the original tensor exactly |
| 6816 | + torch.testing.assert_close(result_tensor, original_tensor, rtol=0, atol=0) |
| 6817 | + |
| 6818 | + @pytest.mark.parametrize("num_channels", [1, 3]) |
| 6819 | + @pytest.mark.parametrize("dtype", [torch.uint8, torch.uint16, torch.float32, torch.float64]) |
| 6820 | + @pytest.mark.parametrize("batch_size", [1, 2, 4]) |
| 6821 | + def test_round_trip_batched(self, num_channels, dtype, batch_size): |
| 6822 | + # Setup: Create a batched tensor in NCHW format |
| 6823 | + # Create tensor on CPU first, then move to CUDA to avoid CUDA context issues |
| 6824 | + if dtype in (torch.uint8, torch.uint16): |
| 6825 | + original_tensor = torch.randint(0, 256, (batch_size, num_channels, 4, 4), dtype=dtype) |
| 6826 | + else: |
| 6827 | + original_tensor = torch.rand(batch_size, num_channels, 4, 4, dtype=dtype) |
| 6828 | + original_tensor = original_tensor.cuda() |
| 6829 | + |
| 6830 | + # Execute: Convert to CV-CUDA and back to tensor |
| 6831 | + # NCHW -> (to_cvcuda_tensor) -> CV-CUDA NHWC -> (cvcuda_to_tensor) -> NCHW |
| 6832 | + cvcuda_tensor = F.to_cvcuda_tensor(original_tensor) |
| 6833 | + result_tensor = F.cvcuda_to_tensor(cvcuda_tensor) |
| 6834 | + |
| 6835 | + # Assert: The round-trip conversion preserves the original batched tensor exactly |
| 6836 | + torch.testing.assert_close(result_tensor, original_tensor, rtol=0, atol=0) |
| 6837 | + # Also verify batch size is preserved |
| 6838 | + assert result_tensor.shape[0] == batch_size |
| 6839 | + |
| 6840 | + |
| 6841 | +@pytest.mark.skipif(not CVCUDA_AVAILABLE, reason="test requires CVCUDA") |
| 6842 | +@pytest.mark.skipif(not CUDA_AVAILABLE, reason="test requires CUDA") |
| 6843 | +class TestCVDUDAToTensor: |
| 6844 | + @pytest.mark.parametrize("color_space", ["RGB", "GRAY"]) |
| 6845 | + @pytest.mark.parametrize( |
| 6846 | + "fn", |
| 6847 | + [F.cvcuda_to_tensor, transform_cls_to_functional(transforms.CVCUDAToTensor)], |
| 6848 | + ) |
| 6849 | + def test_functional_and_transform(self, color_space, fn): |
| 6850 | + input = make_image_cvcuda(color_space=color_space) |
| 6851 | + |
| 6852 | + output = fn(input) |
| 6853 | + |
| 6854 | + assert isinstance(output, torch.Tensor) |
| 6855 | + # Convert input to tensor to compare sizes |
| 6856 | + input_tensor = F.cvcuda_to_tensor(input) |
| 6857 | + assert F.get_size(output) == F.get_size(input_tensor) |
| 6858 | + |
| 6859 | + def test_functional_error(self): |
| 6860 | + with pytest.raises(TypeError, match="cvcuda_img should be `cvcuda.Tensor`"): |
| 6861 | + F.cvcuda_to_tensor(object()) |
| 6862 | + |
| 6863 | + |
6736 | 6864 | class TestLambda: |
6737 | 6865 | @pytest.mark.parametrize("input", [object(), torch.empty(()), np.empty(()), "string", 1, 0.0]) |
6738 | 6866 | @pytest.mark.parametrize("types", [(), (torch.Tensor, np.ndarray)]) |
|
0 commit comments