Skip to content

Commit 95674ca

Browse files
Removed redundant comments
Signed-off-by: greg-kwasniewski1 <[email protected]>
1 parent ddabeb8 commit 95674ca

File tree

2 files changed

+0
-8
lines changed

2 files changed

+0
-8
lines changed

tensorrt_llm/_torch/auto_deploy/transformations/library/sharding.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,6 @@ def detect_column_row_shard(
718718

719719
if sharding_config.use_sharding_from_factory and sharding_config.predefined_config is not None:
720720
ad_logger.info("Using TP sharding from config")
721-
# sharding_config.simple_shard_attention_layers()
722721
detect_tp_sharding_from_factory_config(gm, sharding_config)
723722
return
724723

tensorrt_llm/_torch/auto_deploy/transformations/transform.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -112,13 +112,6 @@ def __call__(self, cm: CachedSequenceInterface) -> nn.Module:
112112
# run BMM sharding across ranks
113113
detect_dp_bmm_shard(egm, sharding_config)
114114

115-
# print detected transformations
116-
ad_logger.info("\n\nTP sharding:")
117-
for tp_transform in sharding_config.tp_transforms:
118-
ad_logger.info(
119-
f"{tp_transform.target_node} {tp_transform.split_dim} {tp_transform.dist_op}"
120-
)
121-
122115
sharding_transform_executor(egm, sharding_config)
123116

124117
# let's run a shape propagation pass to update the graph with correct meta values for

0 commit comments

Comments
 (0)