Skip to content

Commit 80ec2ab

Browse files
cyyeverpytorchmergebot
authored andcommitted
[8/N] Fix unused loop variables in tests (pytorch#166921)
This PR continues to fix or remove unused loop variables in tests. Pull Request resolved: pytorch#166921 Approved by: https://github.com/mlazos
1 parent c724f00 commit 80ec2ab

File tree

5 files changed

+9
-16
lines changed

5 files changed

+9
-16
lines changed

test/quantization/core/test_workflow_ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -368,8 +368,8 @@ def _test_forward_per_tensor_cachemask_impl(self, device):
368368
float_types = (torch.float32, torch.float16, torch.float64, torch.bfloat16)
369369
torch_types = (torch.qint8, torch.quint8)
370370
Xs = (torch.randn(4, 8, device=device), torch.randn(4, 16, device=device)[:, ::2])
371-
tensor_qparam = (True, False)
372-
for float_type, torch_type, X, tensor_qparams in itertools.product(float_types, torch_types, Xs, tensor_qparam):
371+
tensor_qparams = (True, False)
372+
for float_type, torch_type, X, tensor_qparam in itertools.product(float_types, torch_types, Xs, tensor_qparams):
373373
# pick the scale + zp so that some values get clipped
374374
X = X.to(float_type)
375375
obs = torch.ao.quantization.MinMaxObserver(torch_type)

test/quantization/fx/test_quantize_fx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8807,7 +8807,7 @@ def forward(self, indices, offsets):
88078807

88088808
# check it works in None and static qconfig
88098809
for qconfig in [None, default_qconfig]:
8810-
qconfig_dict = {"": default_qconfig}
8810+
qconfig_dict = {"": qconfig}
88118811
m = M().eval()
88128812
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
88138813
self.checkGraphModuleNodes(m, expected_node_occurrence={

test/test_datapipe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1136,7 +1136,7 @@ def test_fork_iterdatapipe(self):
11361136
)
11371137
break
11381138
with warnings.catch_warnings(record=True) as wa:
1139-
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
1139+
for n1, n2 in zip(dp1, dp2):
11401140
output1.append(n1)
11411141
output2.append(n2)
11421142
self.assertEqual(len(wa), 1)

test/test_jit_fuser_te.py

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1682,11 +1682,8 @@ def apply(fn):
16821682
]
16831683
dtypes = ["int", "float", "bool"]
16841684
values = {"int": [10, 3], "float": [12.34, 2.78], "bool": [True, False]}
1685-
devices = self.devices
1686-
for dtype_x, dtype_y, op, device in product(
1687-
dtypes, dtypes, binary_ops, devices
1688-
):
1689-
code = ir_template.format(**locals())
1685+
for dtype_x, dtype_y, op in product(dtypes, dtypes, binary_ops):
1686+
code = ir_template.format(dtype_x=dtype_x, dtype_y=dtype_y, op=op)
16901687

16911688
# Interpret the graph
16921689
try:
@@ -1701,9 +1698,7 @@ def apply(fn):
17011698
try:
17021699
k = torch._C._te.TensorExprKernel(graph)
17031700
except Exception as e:
1704-
raise RuntimeError(
1705-
" ".join(["Compilation failed:", device, str(code)])
1706-
) from e
1701+
raise RuntimeError(" ".join(["Compilation failed:", str(code)])) from e
17071702

17081703
# Run the graph
17091704
for x, y in product(values[dtype_x], values[dtype_y]):
@@ -1713,9 +1708,7 @@ def apply(fn):
17131708
self.assertEqual(ref, res)
17141709
except Exception as e:
17151710
raise RuntimeError(
1716-
" ".join(
1717-
["Failed at runtime:", device, str(x), str(y), str(code)]
1718-
)
1711+
" ".join(["Failed at runtime:", str(x), str(y), str(code)])
17191712
) from e
17201713

17211714
def test_matmul(self):

torch/_export/serde/serialize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,7 @@ def get_triton_kernel_and_cache_entry(node: torch.fx.Node):
617617
return actual_kernel, matching_entries[0][1]
618618

619619
if is_autotuner:
620-
for sig_key, cache_entry in matching_entries:
620+
for _sig_key, cache_entry in matching_entries:
621621
entry_metadata = cache_entry.metadata
622622
# pyrefly: ignore [missing-attribute]
623623
for config in kernel.configs:

0 commit comments

Comments
 (0)