Conversation
There was a problem hiding this comment.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:44:49.746067+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:45:24.467335+00:00
@@ -21,10 +21,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model
class SimpleModel(nn.Module):
def __init__(self):
@@ -60,11 +61,14 @@
# Compile with TensorRT
trt_module_method1 = torch_tensorrt.dynamo.compile(
exp_program,
inputs=[
torch_tensorrt.Input(
- min_shape=(1, 10), opt_shape=(8, 10), max_shape=(32, 10), dtype=torch.float32
+ min_shape=(1, 10),
+ opt_shape=(8, 10),
+ max_shape=(32, 10),
+ dtype=torch.float32,
)
],
enabled_precisions={torch.float32},
min_block_size=1,
)
@@ -154,11 +158,12 @@
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
-print("""
+print(
+ """
Method 1 (Explicit torch.export.Dim):
✓ More control over dimension naming
✓ Familiar to torch.export users
✗ Requires specifying dynamic_shapes twice (export and save)
✗ More verbose
@@ -170,11 +175,12 @@
✓ RECOMMENDED for most use cases
✗ Less control over Dim naming (auto-generated)
**Recommendation**: Use Method 2 (torch_tensorrt.Input) unless you need
fine-grained control over dimension names for specific torch.export use cases.
-""")
+"""
+)
# %%
# Multiple Dynamic Dimensions Example
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:44:49.746067+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:45:24.468461+00:00
@@ -20,10 +20,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model that we'll compile with dynamic batch size
class MyModel(nn.Module):
def __init__(self):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:44:49.765067+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:45:27.776410+00:00
@@ -806,13 +806,13 @@
Copy the metadata from anchor node to the replacement node. This should be used
if the anchor node is replaced with only a single replacement node i.e one-one replacement.
"""
for match_and_replacement in match_and_replacements:
anchor_node = match_and_replacement.nodes_map[match_and_replacement.anchor]
- assert len(match_and_replacement.replacements) == 1, (
- "Found more than 1 replacements for the anchor node."
- )
+ assert (
+ len(match_and_replacement.replacements) == 1
+ ), "Found more than 1 replacements for the anchor node."
replacement_node = match_and_replacement.replacements[0]
replacement_node.meta = anchor_node.meta
def flatten_nodes(nodes: Any) -> List[torch.fx.node.Node]:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:44:49.789067+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:45:33.790813+00:00
@@ -1312,13 +1312,11 @@
return self.linear(x)
model = SimpleModel().eval().cuda()
# Static Input (single shape, not min/opt/max)
- compile_inputs = [
- torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")
- ]
+ compile_inputs = [torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")]
compile_spec = {
"inputs": compile_inputs,
"ir": ir,
"min_block_size": 1,There was a problem hiding this comment.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:45:23.932804+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:45:58.466850+00:00
@@ -21,10 +21,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model
class SimpleModel(nn.Module):
def __init__(self):
@@ -60,11 +61,14 @@
# Compile with TensorRT
trt_module_method1 = torch_tensorrt.dynamo.compile(
exp_program,
inputs=[
torch_tensorrt.Input(
- min_shape=(1, 10), opt_shape=(8, 10), max_shape=(32, 10), dtype=torch.float32
+ min_shape=(1, 10),
+ opt_shape=(8, 10),
+ max_shape=(32, 10),
+ dtype=torch.float32,
)
],
enabled_precisions={torch.float32},
min_block_size=1,
)
@@ -154,11 +158,12 @@
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
-print("""
+print(
+ """
Method 1 (Explicit torch.export.Dim):
✓ More control over dimension naming
✓ Familiar to torch.export users
✗ Requires specifying dynamic_shapes twice (export and save)
✗ More verbose
@@ -170,11 +175,12 @@
✓ RECOMMENDED for most use cases
✗ Less control over Dim naming (auto-generated)
**Recommendation**: Use Method 2 (torch_tensorrt.Input) unless you need
fine-grained control over dimension names for specific torch.export use cases.
-""")
+"""
+)
# %%
# Multiple Dynamic Dimensions Example
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:45:23.933804+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:45:58.480629+00:00
@@ -20,10 +20,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model that we'll compile with dynamic batch size
class MyModel(nn.Module):
def __init__(self):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:45:23.951804+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:46:02.059481+00:00
@@ -806,13 +806,13 @@
Copy the metadata from anchor node to the replacement node. This should be used
if the anchor node is replaced with only a single replacement node i.e one-one replacement.
"""
for match_and_replacement in match_and_replacements:
anchor_node = match_and_replacement.nodes_map[match_and_replacement.anchor]
- assert len(match_and_replacement.replacements) == 1, (
- "Found more than 1 replacements for the anchor node."
- )
+ assert (
+ len(match_and_replacement.replacements) == 1
+ ), "Found more than 1 replacements for the anchor node."
replacement_node = match_and_replacement.replacements[0]
replacement_node.meta = anchor_node.meta
def flatten_nodes(nodes: Any) -> List[torch.fx.node.Node]:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:45:23.975805+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:46:08.702192+00:00
@@ -1312,13 +1312,11 @@
return self.linear(x)
model = SimpleModel().eval().cuda()
# Static Input (single shape, not min/opt/max)
- compile_inputs = [
- torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")
- ]
+ compile_inputs = [torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")]
compile_spec = {
"inputs": compile_inputs,
"ir": ir,
"min_block_size": 1,c4a5c3d to
be928f9
Compare
There was a problem hiding this comment.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:46:33.479227+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:47:07.112656+00:00
@@ -21,10 +21,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model
class SimpleModel(nn.Module):
def __init__(self):
@@ -60,11 +61,14 @@
# Compile with TensorRT
trt_module_method1 = torch_tensorrt.dynamo.compile(
exp_program,
inputs=[
torch_tensorrt.Input(
- min_shape=(1, 10), opt_shape=(8, 10), max_shape=(32, 10), dtype=torch.float32
+ min_shape=(1, 10),
+ opt_shape=(8, 10),
+ max_shape=(32, 10),
+ dtype=torch.float32,
)
],
enabled_precisions={torch.float32},
min_block_size=1,
)
@@ -154,11 +158,12 @@
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
-print("""
+print(
+ """
Method 1 (Explicit torch.export.Dim):
✓ More control over dimension naming
✓ Familiar to torch.export users
✗ Requires specifying dynamic_shapes twice (export and save)
✗ More verbose
@@ -170,11 +175,12 @@
✓ RECOMMENDED for most use cases
✗ Less control over Dim naming (auto-generated)
**Recommendation**: Use Method 2 (torch_tensorrt.Input) unless you need
fine-grained control over dimension names for specific torch.export use cases.
-""")
+"""
+)
# %%
# Multiple Dynamic Dimensions Example
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:46:33.480227+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:47:07.125919+00:00
@@ -20,10 +20,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model that we'll compile with dynamic batch size
class MyModel(nn.Module):
def __init__(self):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:46:33.499227+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:47:10.629536+00:00
@@ -806,13 +806,13 @@
Copy the metadata from anchor node to the replacement node. This should be used
if the anchor node is replaced with only a single replacement node i.e one-one replacement.
"""
for match_and_replacement in match_and_replacements:
anchor_node = match_and_replacement.nodes_map[match_and_replacement.anchor]
- assert len(match_and_replacement.replacements) == 1, (
- "Found more than 1 replacements for the anchor node."
- )
+ assert (
+ len(match_and_replacement.replacements) == 1
+ ), "Found more than 1 replacements for the anchor node."
replacement_node = match_and_replacement.replacements[0]
replacement_node.meta = anchor_node.meta
def flatten_nodes(nodes: Any) -> List[torch.fx.node.Node]:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:46:33.523226+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:47:16.514872+00:00
@@ -1312,13 +1312,11 @@
return self.linear(x)
model = SimpleModel().eval().cuda()
# Static Input (single shape, not min/opt/max)
- compile_inputs = [
- torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")
- ]
+ compile_inputs = [torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")]
compile_spec = {
"inputs": compile_inputs,
"ir": ir,
"min_block_size": 1,be928f9 to
ae76517
Compare
There was a problem hiding this comment.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:47:38.561177+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:48:07.873315+00:00
@@ -21,10 +21,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model
class SimpleModel(nn.Module):
def __init__(self):
@@ -60,11 +61,14 @@
# Compile with TensorRT
trt_module_method1 = torch_tensorrt.dynamo.compile(
exp_program,
inputs=[
torch_tensorrt.Input(
- min_shape=(1, 10), opt_shape=(8, 10), max_shape=(32, 10), dtype=torch.float32
+ min_shape=(1, 10),
+ opt_shape=(8, 10),
+ max_shape=(32, 10),
+ dtype=torch.float32,
)
],
enabled_precisions={torch.float32},
min_block_size=1,
)
@@ -154,11 +158,12 @@
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
-print("""
+print(
+ """
Method 1 (Explicit torch.export.Dim):
✓ More control over dimension naming
✓ Familiar to torch.export users
✗ Requires specifying dynamic_shapes twice (export and save)
✗ More verbose
@@ -170,11 +175,12 @@
✓ RECOMMENDED for most use cases
✗ Less control over Dim naming (auto-generated)
**Recommendation**: Use Method 2 (torch_tensorrt.Input) unless you need
fine-grained control over dimension names for specific torch.export use cases.
-""")
+"""
+)
# %%
# Multiple Dynamic Dimensions Example
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:47:38.561177+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:48:07.882942+00:00
@@ -20,10 +20,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model that we'll compile with dynamic batch size
class MyModel(nn.Module):
def __init__(self):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:47:38.575177+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:48:11.244302+00:00
@@ -806,13 +806,13 @@
Copy the metadata from anchor node to the replacement node. This should be used
if the anchor node is replaced with only a single replacement node i.e one-one replacement.
"""
for match_and_replacement in match_and_replacements:
anchor_node = match_and_replacement.nodes_map[match_and_replacement.anchor]
- assert len(match_and_replacement.replacements) == 1, (
- "Found more than 1 replacements for the anchor node."
- )
+ assert (
+ len(match_and_replacement.replacements) == 1
+ ), "Found more than 1 replacements for the anchor node."
replacement_node = match_and_replacement.replacements[0]
replacement_node.meta = anchor_node.meta
def flatten_nodes(nodes: Any) -> List[torch.fx.node.Node]:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:47:38.591177+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:48:17.398762+00:00
@@ -1312,13 +1312,11 @@
return self.linear(x)
model = SimpleModel().eval().cuda()
# Static Input (single shape, not min/opt/max)
- compile_inputs = [
- torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")
- ]
+ compile_inputs = [torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")]
compile_spec = {
"inputs": compile_inputs,
"ir": ir,
"min_block_size": 1,ae76517 to
4bcca50
Compare
There was a problem hiding this comment.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:48:52.526534+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:49:27.437557+00:00
@@ -21,10 +21,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model
class SimpleModel(nn.Module):
def __init__(self):
@@ -60,11 +61,14 @@
# Compile with TensorRT
trt_module_method1 = torch_tensorrt.dynamo.compile(
exp_program,
inputs=[
torch_tensorrt.Input(
- min_shape=(1, 10), opt_shape=(8, 10), max_shape=(32, 10), dtype=torch.float32
+ min_shape=(1, 10),
+ opt_shape=(8, 10),
+ max_shape=(32, 10),
+ dtype=torch.float32,
)
],
enabled_precisions={torch.float32},
min_block_size=1,
)
@@ -154,11 +158,12 @@
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
-print("""
+print(
+ """
Method 1 (Explicit torch.export.Dim):
✓ More control over dimension naming
✓ Familiar to torch.export users
✗ Requires specifying dynamic_shapes twice (export and save)
✗ More verbose
@@ -170,11 +175,12 @@
✓ RECOMMENDED for most use cases
✗ Less control over Dim naming (auto-generated)
**Recommendation**: Use Method 2 (torch_tensorrt.Input) unless you need
fine-grained control over dimension names for specific torch.export use cases.
-""")
+"""
+)
# %%
# Multiple Dynamic Dimensions Example
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:48:52.526534+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:49:27.441973+00:00
@@ -20,10 +20,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model that we'll compile with dynamic batch size
class MyModel(nn.Module):
def __init__(self):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:48:52.545534+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:49:30.866475+00:00
@@ -806,13 +806,13 @@
Copy the metadata from anchor node to the replacement node. This should be used
if the anchor node is replaced with only a single replacement node i.e one-one replacement.
"""
for match_and_replacement in match_and_replacements:
anchor_node = match_and_replacement.nodes_map[match_and_replacement.anchor]
- assert len(match_and_replacement.replacements) == 1, (
- "Found more than 1 replacements for the anchor node."
- )
+ assert (
+ len(match_and_replacement.replacements) == 1
+ ), "Found more than 1 replacements for the anchor node."
replacement_node = match_and_replacement.replacements[0]
replacement_node.meta = anchor_node.meta
def flatten_nodes(nodes: Any) -> List[torch.fx.node.Node]:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:48:52.570534+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:49:37.109646+00:00
@@ -1312,13 +1312,11 @@
return self.linear(x)
model = SimpleModel().eval().cuda()
# Static Input (single shape, not min/opt/max)
- compile_inputs = [
- torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")
- ]
+ compile_inputs = [torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")]
compile_spec = {
"inputs": compile_inputs,
"ir": ir,
"min_block_size": 1,|
|
||
| # Replace the pytorch submodule node (call_module) with the inlined subgraph output | ||
| gm_node.replace_all_uses_with(submodule_output) | ||
| # Special handling when submodule returns multiple outputs (tuple) |
There was a problem hiding this comment.
I am not too sure about this but it addresses some test cases
…the metadata to use in the case of reexport. Also removes the need to access the real tensorrt engine during reexport
4bcca50 to
fb44e84
Compare
There was a problem hiding this comment.
There are some changes that do not conform to Python style guidelines:
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:54:07.597822+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_both_methods.py 2026-02-12 07:54:40.192796+00:00
@@ -21,10 +21,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model
class SimpleModel(nn.Module):
def __init__(self):
@@ -60,11 +61,14 @@
# Compile with TensorRT
trt_module_method1 = torch_tensorrt.dynamo.compile(
exp_program,
inputs=[
torch_tensorrt.Input(
- min_shape=(1, 10), opt_shape=(8, 10), max_shape=(32, 10), dtype=torch.float32
+ min_shape=(1, 10),
+ opt_shape=(8, 10),
+ max_shape=(32, 10),
+ dtype=torch.float32,
)
],
enabled_precisions={torch.float32},
min_block_size=1,
)
@@ -154,11 +158,12 @@
print("\n" + "=" * 60)
print("Summary")
print("=" * 60)
-print("""
+print(
+ """
Method 1 (Explicit torch.export.Dim):
✓ More control over dimension naming
✓ Familiar to torch.export users
✗ Requires specifying dynamic_shapes twice (export and save)
✗ More verbose
@@ -170,11 +175,12 @@
✓ RECOMMENDED for most use cases
✗ Less control over Dim naming (auto-generated)
**Recommendation**: Use Method 2 (torch_tensorrt.Input) unless you need
fine-grained control over dimension names for specific torch.export use cases.
-""")
+"""
+)
# %%
# Multiple Dynamic Dimensions Example
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:54:07.597822+00:00
+++ /home/runner/work/TensorRT/TensorRT/examples/dynamo/save_dynamic_shapes_example.py 2026-02-12 07:54:40.197219+00:00
@@ -20,10 +20,11 @@
import tempfile
import torch
import torch.nn as nn
import torch_tensorrt
+
# %%
# Define a simple model that we'll compile with dynamic batch size
class MyModel(nn.Module):
def __init__(self):
--- /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:54:07.611823+00:00
+++ /home/runner/work/TensorRT/TensorRT/py/torch_tensorrt/dynamo/utils.py 2026-02-12 07:54:43.443494+00:00
@@ -806,13 +806,13 @@
Copy the metadata from anchor node to the replacement node. This should be used
if the anchor node is replaced with only a single replacement node i.e one-one replacement.
"""
for match_and_replacement in match_and_replacements:
anchor_node = match_and_replacement.nodes_map[match_and_replacement.anchor]
- assert len(match_and_replacement.replacements) == 1, (
- "Found more than 1 replacements for the anchor node."
- )
+ assert (
+ len(match_and_replacement.replacements) == 1
+ ), "Found more than 1 replacements for the anchor node."
replacement_node = match_and_replacement.replacements[0]
replacement_node.meta = anchor_node.meta
def flatten_nodes(nodes: Any) -> List[torch.fx.node.Node]:
--- /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:54:07.627823+00:00
+++ /home/runner/work/TensorRT/TensorRT/tests/py/dynamo/models/test_reexport.py 2026-02-12 07:54:49.550244+00:00
@@ -1312,13 +1312,11 @@
return self.linear(x)
model = SimpleModel().eval().cuda()
# Static Input (single shape, not min/opt/max)
- compile_inputs = [
- torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")
- ]
+ compile_inputs = [torchtrt.Input(shape=(4, 10), dtype=torch.float32, name="x")]
compile_spec = {
"inputs": compile_inputs,
"ir": ir,
"min_block_size": 1,
Description
Adds functionality to store shape expressions for compiled subgraph in the metadata pickle. At re-searlization time, these objects will apply these shape expressions on the input FakeTensor to describe the output shape in terms of symbolic shape.
Fixes N/A
Type of change
Please delete options that are not relevant and/or add your own.
Checklist: