Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 2 additions & 10 deletions py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from torch.fx.node import Argument, Node, Target
from torch_tensorrt import ENABLED_FEATURES
from torch_tensorrt._features import needs_not_tensorrt_rtx
from torch_tensorrt._utils import is_tensorrt_version_supported, is_thor
from torch_tensorrt._utils import is_tensorrt_version_supported
from torch_tensorrt.dynamo._settings import CompilationSettings
from torch_tensorrt.dynamo._SourceIR import SourceIR
from torch_tensorrt.dynamo.conversion import impl
Expand Down Expand Up @@ -429,7 +429,7 @@ def index_nonbool_validator(
node: Node, settings: Optional[CompilationSettings] = None
) -> bool:
# for thor and tensorrt_rtx, we don't support boolean indices, due to nonzero op not supported
if is_thor() or ENABLED_FEATURES.tensorrt_rtx:
if ENABLED_FEATURES.tensorrt_rtx:
index = node.args[1]
for ind in index:
if ind is not None:
Expand Down Expand Up @@ -3621,18 +3621,10 @@ def aten_ops_full(
)


def nonzero_validator(
node: Node, settings: Optional[CompilationSettings] = None
) -> bool:
return not is_thor()


# currently nonzero is not supported for tensorrt_rtx
# TODO: lan to add the nonzero support once tensorrt_rtx team has added the support
# TODO: apbose to remove the capability validator once thor bug resolve in NGC
@dynamo_tensorrt_converter(
torch.ops.aten.nonzero.default,
capability_validator=nonzero_validator,
supports_dynamic_shapes=True,
requires_output_allocator=True,
)
Expand Down
6 changes: 3 additions & 3 deletions tests/py/dynamo/conversion/test_arange_aten.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@
import torch_tensorrt
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt._utils import is_tegra_platform, is_thor
from torch_tensorrt._utils import is_tegra_platform

from .harness import DispatchTestCase


@unittest.skipIf(
is_thor() or is_tegra_platform(),
"Skipped on Thor and Tegra platforms",
is_tegra_platform(),
"Skipped on Tegra platforms",
)
class TestArangeConverter(DispatchTestCase):
@parameterized.expand(
Expand Down
6 changes: 3 additions & 3 deletions tests/py/dynamo/conversion/test_cumsum_aten.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@
import torch_tensorrt
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt._utils import is_tegra_platform, is_thor
from torch_tensorrt._utils import is_tegra_platform

from .harness import DispatchTestCase


@unittest.skipIf(
is_thor() or is_tegra_platform(),
"Skipped on Thor and Tegra platforms",
is_tegra_platform(),
"Skipped on Tegra platforms",
)
class TestCumsumConverter(DispatchTestCase):
@parameterized.expand(
Expand Down
22 changes: 9 additions & 13 deletions tests/py/dynamo/conversion/test_index_aten.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt import ENABLED_FEATURES, Input
from torch_tensorrt._utils import is_tegra_platform, is_thor
from torch_tensorrt._utils import is_tegra_platform

from .harness import DispatchTestCase

Expand Down Expand Up @@ -114,8 +114,8 @@ def forward(self, input):
]
)
@unittest.skipIf(
is_thor() or ENABLED_FEATURES.tensorrt_rtx,
"Skipped on Thor or tensorrt_rtx due to nonzero not supported",
ENABLED_FEATURES.tensorrt_rtx,
"Skipped on tensorrt_rtx due to nonzero not supported",
)
def test_index_constant_bool_mask(self, _, index, input):
class TestModule(torch.nn.Module):
Expand Down Expand Up @@ -149,8 +149,8 @@ def forward(self, x, index0):
)

@unittest.skipIf(
is_thor() or ENABLED_FEATURES.tensorrt_rtx,
"Skipped on Thor or tensorrt_rtx due to nonzero not supported",
ENABLED_FEATURES.tensorrt_rtx,
"Skipped on tensorrt_rtx due to nonzero not supported",
)
def test_index_zero_two_dim_ITensor_mask(self):
class TestModule(nn.Module):
Expand All @@ -163,10 +163,6 @@ def forward(self, x, index0):
index0 = torch.tensor([True, False])
self.run_test(TestModule(), [input, index0], enable_passes=True)

@unittest.skipIf(
is_thor(),
"Skipped on Thor due to nonzero not supported",
)
def test_index_zero_index_three_dim_ITensor(self):
class TestModule(nn.Module):
def forward(self, x, index0):
Expand All @@ -180,8 +176,8 @@ def forward(self, x, index0):
self.run_test(TestModule(), [input, index0])

@unittest.skipIf(
is_thor() or ENABLED_FEATURES.tensorrt_rtx,
"Skipped on Thor or tensorrt_rtx due to nonzero not supported",
ENABLED_FEATURES.tensorrt_rtx,
"Skipped on tensorrt_rtx due to nonzero not supported",
)
def test_index_zero_index_three_dim_mask_ITensor(self):
class TestModule(nn.Module):
Expand Down Expand Up @@ -252,8 +248,8 @@ def forward(self, input):


@unittest.skipIf(
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(),
"nonzero is not supported for tensorrt_rtx",
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(),
"nonzero is not supported for tensorrt_rtx or Tegra platforms",
)
class TestIndexDynamicInputNonDynamicIndexConverter(DispatchTestCase):
def test_index_input_non_dynamic_index_dynamic(self):
Expand Down
6 changes: 3 additions & 3 deletions tests/py/dynamo/conversion/test_nonzero_aten.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt import Input
from torch_tensorrt._utils import is_tegra_platform, is_thor
from torch_tensorrt._utils import is_tegra_platform

from .harness import DispatchTestCase


@unittest.skipIf(
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_thor() or is_tegra_platform(),
"nonzero is not supported for tensorrt_rtx",
torch_tensorrt.ENABLED_FEATURES.tensorrt_rtx or is_tegra_platform(),
"nonzero is not supported for tensorrt_rtx or Tegra platforms",
)
class TestNonZeroConverter(DispatchTestCase):
@parameterized.expand(
Expand Down
5 changes: 0 additions & 5 deletions tests/py/dynamo/conversion/test_sym_size.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,10 @@
import torch.nn as nn
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
from torch_tensorrt._utils import is_thor

from .harness import DispatchTestCase


@unittest.skipIf(
is_thor(),
"Skipped on Thor",
)
class TestSymSizeConverter(DispatchTestCase):
@parameterized.expand(
[
Expand Down
19 changes: 14 additions & 5 deletions tests/py/dynamo/models/test_export_kwargs_serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,8 @@ def forward(self, x, b=5, c=None, d=None):
)

# Save the module
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(prefix="test_custom_model")
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
Expand Down Expand Up @@ -137,7 +138,8 @@ def forward(self, x, b=5, c=None, d=None):
)

# Save the module
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace")
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
Expand Down Expand Up @@ -208,7 +210,8 @@ def forward(self, x, b=5, c=None, d=None):
)

# Save the module
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(prefix="test_custom_model_with_dynamo_trace_dynamic")
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
Expand Down Expand Up @@ -298,7 +301,10 @@ def forward(self, x, b=None, c=None, d=None, e=[]):
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(
prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
)
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
Expand Down Expand Up @@ -388,7 +394,10 @@ def forward(self, x, b=None, c=None, d=None, e=[]):
msg=f"CustomKwargs Module TRT outputs don't match with the original model. Cosine sim score: {cos_sim} Threshold: {COSINE_THRESHOLD}",
)
# Save the module
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(
prefix="test_custom_model_with_dynamo_trace_kwarg_dynamic"
)
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
torchtrt.save(trt_gm, trt_ep_path, retrace=False)
# Clean up model env
torch._dynamo.reset()
Expand Down
32 changes: 30 additions & 2 deletions tests/py/dynamo/models/test_export_serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
if importlib.util.find_spec("torchvision"):
import torchvision.models as models

trt_ep_path = os.path.join(tempfile.gettempdir(), "trt.ep")


@pytest.mark.unit
@pytest.mark.critical
Expand All @@ -27,6 +25,8 @@ def test_base_full_compile(ir):
This tests export serde functionality on a base model
which is fully TRT convertible
"""
tmp_dir = tempfile.mkdtemp(prefix="test_base_full_compile")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -82,6 +82,9 @@ def test_base_full_compile_multiple_outputs(ir):
with multiple outputs which is fully TRT convertible
"""

tmp_dir = tempfile.mkdtemp(prefix="test_base_full_compile_multiple_outputs")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -141,6 +144,8 @@ def test_no_compile(ir):
This tests export serde functionality on a model
which won't convert to TRT because of min_block_size=5 constraint
"""
tmp_dir = tempfile.mkdtemp(prefix="test_no_compile")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
Expand Down Expand Up @@ -202,6 +207,9 @@ def test_hybrid_relu_fallback(ir):
fallback
"""

tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_relu_fallback")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -262,6 +270,9 @@ def test_resnet18(ir):
"""
This tests export save and load functionality on Resnet18 model
"""
tmp_dir = tempfile.mkdtemp(prefix="test_resnet18")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

model = models.resnet18().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")

Expand Down Expand Up @@ -307,6 +318,9 @@ def test_resnet18_cpu_offload(ir):
"""
This tests export save and load functionality on Resnet18 model
"""
tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_cpu_offload")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

model = models.resnet18().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")

Expand Down Expand Up @@ -359,6 +373,9 @@ def test_resnet18_dynamic(ir):
"""
This tests export save and load functionality on Resnet18 model
"""
tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_dynamic")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

model = models.resnet18().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")

Expand Down Expand Up @@ -399,6 +416,9 @@ def test_resnet18_torch_exec_ops_serde(ir):
"""
This tests export save and load functionality on Resnet18 model
"""
tmp_dir = tempfile.mkdtemp(prefix="test_resnet18_torch_exec_ops_serde")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

model = models.resnet18().eval().cuda()
input = torch.randn((1, 3, 224, 224)).to("cuda")

Expand Down Expand Up @@ -432,6 +452,9 @@ def test_hybrid_conv_fallback(ir):
model where a conv (a weighted layer) has been forced to fallback to Pytorch.
"""

tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_conv_fallback")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -493,6 +516,9 @@ def test_hybrid_conv_fallback_cpu_offload(ir):
model where a conv (a weighted layer) has been forced to fallback to Pytorch.
"""

tmp_dir = tempfile.mkdtemp(prefix="test_hybrid_conv_fallback_cpu_offload")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
Expand Down Expand Up @@ -555,6 +581,8 @@ def test_arange_export(ir):
Here the arange output is a static constant (which is registered as input to the graph)
in the exporter.
"""
tmp_dir = tempfile.mkdtemp(prefix="test_arange_export")
trt_ep_path = os.path.join(tmp_dir, "trt.ep")

class MyModule(torch.nn.Module):
def __init__(self):
Expand Down
10 changes: 8 additions & 2 deletions tests/py/dynamo/models/test_model_refit.py
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,10 @@ def test_refit_one_engine_bert_with_weightmap():
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_with_weightmap():
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(
prefix="test_refit_one_engine_inline_runtime_with_weightmap"
)
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=False).eval().to("cuda")
model2 = models.resnet18(pretrained=True).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down Expand Up @@ -889,7 +892,10 @@ def test_refit_one_engine_bert_without_weightmap():
)
@pytest.mark.unit
def test_refit_one_engine_inline_runtime_without_weightmap():
trt_ep_path = os.path.join(tempfile.gettempdir(), "compiled.ep")
tmp_dir = tempfile.mkdtemp(
prefix="test_refit_one_engine_inline_runtime_without_weightmap"
)
trt_ep_path = os.path.join(tmp_dir, "compiled.ep")
model = models.resnet18(pretrained=True).eval().to("cuda")
model2 = models.resnet18(pretrained=False).eval().to("cuda")
inputs = [torch.randn((1, 3, 224, 224)).to("cuda")]
Expand Down
Loading
Loading