Skip to content

Commit a891808

Browse files
Removed redundant comments
Signed-off-by: greg-kwasniewski1 <[email protected]>
1 parent 5eb4a8d commit a891808

File tree

2 files changed

+0
-8
lines changed

2 files changed

+0
-8
lines changed

tensorrt_llm/_torch/auto_deploy/transformations/library/sharding.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,6 @@ def detect_column_row_shard(
718718

719719
if sharding_config.use_sharding_from_factory and sharding_config.predefined_config is not None:
720720
ad_logger.info("Using TP sharding from config")
721-
# sharding_config.simple_shard_attention_layers()
722721
detect_tp_sharding_from_factory_config(gm, sharding_config)
723722
return
724723

tensorrt_llm/_torch/auto_deploy/transformations/transform.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -131,13 +131,6 @@ def __call__(self, cm: CachedSequenceInterface) -> nn.Module:
131131
# run BMM sharding across ranks
132132
detect_dp_bmm_shard(egm, sharding_config)
133133

134-
# print detected transformations
135-
ad_logger.info("\n\nTP sharding:")
136-
for tp_transform in sharding_config.tp_transforms:
137-
ad_logger.info(
138-
f"{tp_transform.target_node} {tp_transform.split_dim} {tp_transform.dist_op}"
139-
)
140-
141134
sharding_transform_executor(egm, sharding_config)
142135

143136
# let's run a shape propagation pass to update the graph with correct meta values for

0 commit comments

Comments
 (0)