From 8a3316055e52648ef1c8109026e2247b552eef9f Mon Sep 17 00:00:00 2001 From: cehongwang Date: Fri, 22 Aug 2025 20:54:31 +0000 Subject: [PATCH 1/2] Added the dynamic check in the validator --- .../dynamo/conversion/aten_ops_converters.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index abf721198b..2d52878325 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -23,6 +23,7 @@ get_positive_dim, is_only_operator_on_placeholder, ) +from torch_tensorrt.dynamo.utils import DYNAMIC_DIM _LOGGER: logging.Logger = logging.getLogger(__name__) @@ -2694,6 +2695,13 @@ def sort_validator(node: Node, settings: Optional[CompilationSettings] = None) - def topk_sort_validator(k: int) -> bool: + + # topk layer supports dynamic k value but we cannot determine supported dynamic topk value at + # compile time. + if k == DYNAMIC_DIM: + _LOGGER.debug("k value cannot be dynamic!") + return False + if k > 3840: _LOGGER.debug( f"Currently only topk values up to 3840 are supported, got k={k}." @@ -3160,7 +3168,9 @@ def aten_ops_upsample_bicubic2d( @dynamo_tensorrt_converter( - torch.ops.aten.topk.default, capability_validator=topk_validator + torch.ops.aten.topk.default, + capability_validator=topk_validator, + supports_dynamic_shapes=True, ) @enforce_tensor_types( { From a300a3460aa24f58e866208a18e14646db1d28fa Mon Sep 17 00:00:00 2001 From: cehongwang Date: Mon, 25 Aug 2025 18:19:28 +0000 Subject: [PATCH 2/2] rewrote the comment --- py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py | 6 ++++-- py/torch_tensorrt/dynamo/conversion/impl/topk.py | 4 ---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py index 2d52878325..f1a86e3f45 100644 --- a/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py +++ b/py/torch_tensorrt/dynamo/conversion/aten_ops_converters.py @@ -2699,12 +2699,14 @@ def topk_sort_validator(k: int) -> bool: # topk layer supports dynamic k value but we cannot determine supported dynamic topk value at # compile time. if k == DYNAMIC_DIM: - _LOGGER.debug("k value cannot be dynamic!") + _LOGGER.debug( + "[top_k validator] Converter does not support k being a dynamic value. Therefore, aten::topk will run in PyTorch" + ) return False if k > 3840: _LOGGER.debug( - f"Currently only topk values up to 3840 are supported, got k={k}." + f"[top_k validator] Currently only topk values up to 3840 are supported, got k={k}. Therefore, aten::topk will run in PyTorch" ) return False return True diff --git a/py/torch_tensorrt/dynamo/conversion/impl/topk.py b/py/torch_tensorrt/dynamo/conversion/impl/topk.py index 053a46ce2b..638cbf599e 100644 --- a/py/torch_tensorrt/dynamo/conversion/impl/topk.py +++ b/py/torch_tensorrt/dynamo/conversion/impl/topk.py @@ -209,10 +209,6 @@ def topk( get_axes_for_reduce_op(get_positive_dim(dim, len(input.shape))), ) - # topk layer supports dynamic k value but we cannot dertermin supported dynamic topk value at - # compile time. - assert k != DYNAMIC_DIM, "k value cannot be dynamic!" - # TensorRT ITopKLayer does not have a sorted flag, it is always returning the sorted topk elements # so here no matter sorted is True or False the returned the topk Tensor object is always sorted set_layer_name(topk_layer, target, f"{name}_topk", source_ir)