Skip to content

Commit a02c00b

Browse files
peaceh-nvclaude
andcommitted
[TRTLLM-11289][fix] Fix pre-commit formatting violations
Apply ruff format/lint fixes: - Convert multi-line docstrings to single-line where appropriate (D200) - Remove f-string prefix on strings without placeholders (F541) - Remove unused import - Use consistent double-quote docstrings instead of single-quotes - Fix indentation in docstrings Signed-off-by: Peace He <peaceh@nvidia.com> Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 2f2647d commit a02c00b

File tree

5 files changed

+64
-108
lines changed

5 files changed

+64
-108
lines changed

tensorrt_llm/_torch/custom_ops/cute_dsl_custom_ops.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -331,8 +331,6 @@ def get_dense_gemm_approximate_cta_nums(
331331
STATE_SIZE as CLUSTER_TOPK_STATE_SIZE
332332
from ..cute_dsl_kernels.blackwell.top_k.single_pass_multi_cta_radix_topk_cluster import (
333333
SinglePassMultiCTARadixTopKClusterKernel, _query_max_cluster_size)
334-
from ..cute_dsl_kernels.blackwell.dense_gemm_persistent import \
335-
PersistentDenseGemmKernel
336334
from ..cute_dsl_kernels.blackwell.utils import make_ptr
337335

338336
class CuteDSLNVFP4BlackwellRunner(TunableRunner):
@@ -700,7 +698,7 @@ def forward(
700698
max_active_clusters,
701699
stream,
702700
swap_ab,
703-
options=f"--opt-level 2 --enable-tvm-ffi"
701+
options="--opt-level 2 --enable-tvm-ffi"
704702
if self.use_tvm_ffi else "--opt-level 2",
705703
)
706704

@@ -2413,7 +2411,7 @@ def forward(
24132411
c_cute_tensor,
24142412
max_active_clusters=max_active_clusters,
24152413
stream=stream,
2416-
options=f"--opt-level 2 --enable-tvm-ffi"
2414+
options="--opt-level 2 --enable-tvm-ffi"
24172415
if self.use_tvm_ffi else "--opt-level 2",
24182416
)
24192417
self.__class__.kernel_cache[cache_key] = compiled_gemm
@@ -2727,7 +2725,7 @@ def forward(
27272725
c_cute_tensor,
27282726
max_active_clusters=max_active_clusters,
27292727
stream=stream,
2730-
options=f"--opt-level 2 --enable-tvm-ffi"
2728+
options="--opt-level 2 --enable-tvm-ffi"
27312729
if self.use_tvm_ffi else "--opt-level 2",
27322730
)
27332731
self.__class__.kernel_cache[cache_key] = compiled_gemm
@@ -4402,7 +4400,7 @@ def forward(
44024400
a_stride_batch,
44034401
max_active_clusters=max_active_clusters,
44044402
stream=stream,
4405-
options=f"--opt-level 2 --enable-tvm-ffi"
4403+
options="--opt-level 2 --enable-tvm-ffi"
44064404
if self.use_tvm_ffi else "--opt-level 2",
44074405
)
44084406
self.__class__.kernel_cache[cache_key] = compiled_gemm
@@ -4681,7 +4679,7 @@ def forward(
46814679
c_cute_tensor,
46824680
max_active_clusters=max_active_clusters,
46834681
stream=stream,
4684-
options=f"--opt-level 2 --enable-tvm-ffi"
4682+
options="--opt-level 2 --enable-tvm-ffi"
46854683
if self.use_tvm_ffi else "--opt-level 2",
46864684
)
46874685
self.__class__.kernel_cache[cache_key] = compiled_gemm

tensorrt_llm/_torch/model_config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ def load_modelopt_quant_config(quant_config_file, checkpoint_dir,
296296
json_extended_quant_configs = json.load(fm)
297297
except Exception:
298298
logger.info(
299-
f"No quant_cfg.json found for layer quant info, using hf_quant_config.json."
299+
"No quant_cfg.json found for layer quant info, using hf_quant_config.json."
300300
)
301301
json_quant_configs.update(json_extended_quant_configs)
302302
# kv_cache_quant_algo is global regardless of MIXED_PRECISION

tensorrt_llm/_torch/models/modeling_deepseekv3.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1817,7 +1817,7 @@ def __init__(self, model_config: ModelConfig[PretrainedConfig]):
18171817
# at the end of __init__.
18181818
if model_config.mapping.has_cp_helix():
18191819
print(
1820-
f"[DeepseekV3ForCausalLM::__init__] Repurposing KVP ranks to TP while keeping other details the same."
1820+
"[DeepseekV3ForCausalLM::__init__] Repurposing KVP ranks to TP while keeping other details the same."
18211821
)
18221822
self.mapping_with_cp = copy.deepcopy(model_config.mapping)
18231823
# Repurpose KVP ranks to TP while keeping other details the same.
@@ -1880,8 +1880,7 @@ def __init__(self, model_config: ModelConfig[PretrainedConfig]):
18801880
# Undo any manipulations done to mapping.
18811881
if self.mapping_with_cp is not None:
18821882
print(
1883-
f"[DeepseekV3ForCausalLM::__init__] Restoring original mapping."
1884-
)
1883+
"[DeepseekV3ForCausalLM::__init__] Restoring original mapping.")
18851884
model_config._frozen = False
18861885
model_config.mapping = self.mapping_with_cp
18871886
model_config._frozen = True

0 commit comments

Comments
 (0)