Skip to content

Commit e1a1aea

Browse files
cyyeverpytorchmergebot
authored andcommitted
[1/N] Use key in dict for existence checks (pytorch#167035)
This PR uses `key in dict` expressions for existence checks of dict elements in Python code. This operation is more efficient than `key in dict.keys()`. Pull Request resolved: pytorch#167035 Approved by: https://github.com/janeyx99
1 parent 943227f commit e1a1aea

39 files changed

+50
-52
lines changed

torch/_dynamo/backends/registry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ def list_backends(exclude_tags=("debug", "experimental")) -> list[str]: # type:
146146

147147
backends = [
148148
name
149-
for name in _BACKENDS.keys()
149+
for name in _BACKENDS
150150
if name not in _COMPILER_FNS
151151
or not exclude_tags_set.intersection(_COMPILER_FNS[name]._tags) # type: ignore[attr-defined]
152152
]

torch/_dynamo/output_graph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2587,7 +2587,7 @@ def update_used_symbols(
25872587
real_script_obj
25882588
):
25892589
flat_dict = dict(real_script_obj.__obj_flatten__()) # type: ignore[attr-defined]
2590-
for attr in flat_dict.keys():
2590+
for attr in flat_dict:
25912591
fake_attr_val = getattr(
25922592
fake_script_obj.wrapped_obj, attr
25932593
)

torch/_export/converter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,7 @@ def __init__(
443443
self.blocks_to_lifted_attrs = blocks_to_lifted_attrs
444444

445445
# Populate methods for the standard operators.
446-
for k in kind_to_standard_operators.keys():
446+
for k in kind_to_standard_operators:
447447
handler_func_name = ir_name_to_func_name(k)
448448
# Create an indirect function call:
449449
# convert_<namespace>_<opname> --> lambda node: _convert_standard_operator(node)

torch/_guards.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -904,7 +904,7 @@ def patch(**kwargs: Any) -> Generator[None, None, None]:
904904
prior = {}
905905
ctx = TracingContext.get()
906906

907-
for key in kwargs.keys():
907+
for key in kwargs:
908908
# KeyError on invalid entry
909909
prior[key] = getattr(ctx, key)
910910
for key, val in kwargs.items():

torch/_inductor/augmented_graph_helper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def transfer_erased_node_deps(self, erased_to_new: dict[fx.Node, fx.Node]) -> No
164164
self.extra_uses[new_node].add(updated_use)
165165

166166
# Clean up erased nodes
167-
for old_node in erased_merge_sets.keys():
167+
for old_node in erased_merge_sets:
168168
self.extra_deps[old_node].clear()
169169
self.extra_uses[old_node].clear()
170170
del self.merge_sets[old_node]

torch/_inductor/bounds.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def swap_submodules(
8686
self, submodules: dict[str, Callable[..., Any]]
8787
) -> dict[str, Callable[..., ValueRanges[Expr]]]:
8888
result: dict[str, Callable[..., ValueRanges[Expr]]] = {}
89-
for key in submodules.keys():
89+
for key in submodules:
9090
if key == "get_index":
9191
result[key] = self.get_index
9292
elif "masked_subblock" in key:

torch/_inductor/codecache.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1681,7 +1681,7 @@ def set(
16811681

16821682
if config.aot_inductor.emit_multi_arch_kernel:
16831683
bin_type_to_ext = {"cubin": ".fatbin", "spv": ".spv"}
1684-
assert bin_type in bin_type_to_ext.keys(), (
1684+
assert bin_type in bin_type_to_ext, (
16851685
"multi_arch_kernel_binary only supported in CUDA/XPU"
16861686
)
16871687
base_path, _ = os.path.splitext(bin_path)

torch/_inductor/codegen/cpp_wrapper_gpu.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -337,7 +337,7 @@ def process_args_for_input_shape(arg, arg_type, arg_signature=None):
337337
elif (
338338
isinstance(arg_type, type(SymbolicCallArg))
339339
and arg_signature is not None
340-
and arg_signature in signature2dtype.keys()
340+
and arg_signature in signature2dtype
341341
) or arg_type in (sympy.Integer, int, sympy.Float, float):
342342
write_dummy_scalar_ivalue(arg_name)
343343
elif arg_signature and arg_signature.startswith("tensordesc<"):
@@ -719,7 +719,7 @@ def process_args(arg, arg_type, arg_signature=None):
719719
elif (
720720
isinstance(arg_type, type(SymbolicCallArg))
721721
and arg_signature is not None
722-
and arg_signature in signature2dtype.keys()
722+
and arg_signature in signature2dtype
723723
):
724724
code.writeline(
725725
f"{signature2dtype[arg_signature]} {var_name} = {cexpr(arg)};"

torch/_inductor/codegen/triton_combo_kernel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -699,7 +699,7 @@ def get_block_args(self) -> list[ConstexprArg]:
699699
block_names[f"{tree.prefix.upper()}BLOCK"] = tree.prefix
700700
self.block_args = list(block_names.keys())
701701

702-
return [ConstexprArg(x) for x in block_names.keys()]
702+
return [ConstexprArg(x) for x in block_names]
703703

704704
def add_numel_to_args(
705705
self, argdefs: list[ArgName], signature: list[Any]

torch/_inductor/cpu_vec_isa.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,7 @@ def get_isa_from_cpu_capability(
430430
"avx2": "avx2",
431431
"avx512": "avx512",
432432
}
433-
if capability in capability_to_isa_str.keys():
433+
if capability in capability_to_isa_str:
434434
# pyrefly: ignore [index-error]
435435
isa_str = capability_to_isa_str[capability]
436436
if isa_str == "INVALID_VEC_ISA":

0 commit comments

Comments
 (0)