diff --git a/test/test_tv_tensors.py b/test/test_tv_tensors.py index ed75ae35ecd..a8e59ab7531 100644 --- a/test/test_tv_tensors.py +++ b/test/test_tv_tensors.py @@ -162,6 +162,11 @@ def test_force_subclass_with_metadata(return_type): if return_type == "TVTensor": assert bbox.format, bbox.canvas_size == (format, canvas_size) + if torch.cuda.is_available(): + bbox = bbox.pin_memory() + if return_type == "TVTensor": + assert bbox.format, bbox.canvas_size == (format, canvas_size) + assert not bbox.requires_grad bbox.requires_grad_(True) if return_type == "TVTensor": diff --git a/torchvision/tv_tensors/_torch_function_helpers.py b/torchvision/tv_tensors/_torch_function_helpers.py index e6ea5fddf35..66812fb5ca6 100644 --- a/torchvision/tv_tensors/_torch_function_helpers.py +++ b/torchvision/tv_tensors/_torch_function_helpers.py @@ -69,4 +69,10 @@ def _must_return_subclass(): # For those ops we always want to preserve the original subclass instead of returning a pure Tensor -_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_} +_FORCE_TORCHFUNCTION_SUBCLASS = { + torch.Tensor.clone, + torch.Tensor.to, + torch.Tensor.detach, + torch.Tensor.requires_grad_, + torch.Tensor.pin_memory, +}