Skip to content

Commit 8d4926e

Browse files
recpytorchmergebot
authored andcommitted
Fix unused variables in test/torch.py (pytorch#143399)
Pull Request resolved: pytorch#143399 Approved by: https://github.com/albanD
1 parent 863e6e4 commit 8d4926e

File tree

1 file changed

+19
-30
lines changed

1 file changed

+19
-30
lines changed

test/test_torch.py

Lines changed: 19 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def test_dataloader_vitals(self):
118118
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
119119
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
120120
dataset = torch.utils.data.TensorDataset(inps, tgts)
121-
loader = torch.utils.data.DataLoader(dataset, batch_size=2)
121+
torch.utils.data.DataLoader(dataset, batch_size=2)
122122
self.assertIn('Dataloader.enabled\t\t True', torch.read_vitals())
123123

124124
# FIXME: document or deprecate whatever this is
@@ -392,7 +392,7 @@ def test_module_share_memory(self):
392392
# Test fix for issue #80733
393393
# See https://github.com/pytorch/pytorch/issues/80733
394394
model = torch.nn.Linear(3, 1)
395-
model_cuda = model.to('cuda')
395+
_model_cuda = model.to('cuda')
396396
model.share_memory()
397397

398398
@dtypes(torch.float32, torch.complex64)
@@ -644,8 +644,8 @@ def test_scalar_check(self, device):
644644
self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape)
645645
self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape)
646646

647-
zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device)
648-
one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device)
647+
torch.tensor(1, dtype=torch.uint8, device=device)
648+
torch.tensor([1], dtype=torch.uint8, device=device)
649649

650650
# mode
651651
self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)])
@@ -955,7 +955,7 @@ def test_dtypetensor_warnings(self, device):
955955
t = torch.cuda.FloatTensor([0])
956956

957957
with self.assertWarnsOnceRegex(UserWarning, msg):
958-
t = torch.cuda.DoubleTensor([0])
958+
torch.cuda.DoubleTensor([0])
959959

960960
def test_set_default_tensor_type_warnings(self, device):
961961
msg = '.*is deprecated as of PyTorch 2.1, please use torch.set_default_dtype().*'
@@ -1007,7 +1007,7 @@ def test_conv_transposed_large(self, device):
10071007
stride=2, padding=2, output_padding=1).to(device)
10081008

10091009
x = torch.rand([1, 64, 8, 128, 172]).to(device)
1010-
y = conv(x)
1010+
conv(x)
10111011

10121012
def test_is_set_to(self, device):
10131013
t1 = torch.empty(3, 4, 9, 10, device=device)
@@ -1222,7 +1222,6 @@ def test_case_info(fn_name, config):
12221222
return f'function "{fn_name}" with config "{"" if config is None else config}"'
12231223

12241224
# Create processes to test each combination of test cases and config settings
1225-
processes = []
12261225
for fn_name, arg_sizes in test_cases:
12271226
for config, is_config_deterministic in test_configs:
12281227
env = os.environ.copy()
@@ -2614,7 +2613,6 @@ def test_cdist_same_inputs(self, device):
26142613
x = torch.randn(sizex, device=device, dtype=torch.float)
26152614
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
26162615
y = x.clone()
2617-
eps = 1e-6
26182616
x.requires_grad = True
26192617
d = torch.cdist(x, y)
26202618
d.backward(dist_grad)
@@ -3172,7 +3170,7 @@ def test_copy_all_dtypes_and_devices(self, device):
31723170
from copy import copy
31733171
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
31743172
x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device)
3175-
x_clone = x.clone()
3173+
_x_clone = x.clone()
31763174
y = copy(x)
31773175
y.fill_(1)
31783176
# copy is a shallow copy, only copies the tensor view,
@@ -3994,10 +3992,6 @@ def test_masked_scatter_large_tensor(self, device):
39943992
# FIXME: find a test suite for the masked select operator
39953993
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
39963994
def test_masked_select(self, device, dtype):
3997-
if device == 'cpu':
3998-
warn = 'masked_select received a mask with dtype torch.uint8,'
3999-
else:
4000-
warn = 'indexing with dtype torch.uint8 is now deprecated, pl'
40013995
for maskType in integral_types_and(torch.bool):
40023996
num_src = 10
40033997
src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device)
@@ -4506,9 +4500,7 @@ def test_index_copy_mem_overlap(self, device):
45064500
@onlyNativeDeviceTypes
45074501
def test_index_fill_mem_overlap(self, device):
45084502
x = torch.rand((1,), device=device).expand((6,))
4509-
y = torch.rand((6,), device=device)
45104503
ind = torch.tensor([2, 1, 0], device=device)
4511-
value = torch.rand((3,), device=device)
45124504

45134505
with self.assertWarnsRegex(UserWarning, "index_fill_ on expanded tensors"):
45144506
x.index_fill_(0, ind, 1.0)
@@ -4871,7 +4863,7 @@ def test_helper(x, memory_format):
48714863

48724864
sparse = x.to_sparse()
48734865
with self.assertRaises(RuntimeError):
4874-
z = torch.empty_like(sparse, memory_format=torch.preserve_format)
4866+
torch.empty_like(sparse, memory_format=torch.preserve_format)
48754867

48764868
test_helper(torch.randn(4, 3, 8, 8, device=device), torch.channels_last)
48774869
test_helper(torch.randn(4, 3, 8, 8, 8, device=device), torch.channels_last_3d)
@@ -5277,7 +5269,7 @@ def test_lazy_clone_view_materialize(self, device, dtype):
52775269
def test_lazy_clone_binary_op_no_materialize(self, device, dtype):
52785270
t = torch.tensor([[0, 1], [2, 3]], device=device, dtype=dtype)
52795271
clone = t._lazy_clone()
5280-
res = t + clone
5272+
t + clone
52815273
self.assertTrue(torch._C._is_cow_tensor(t))
52825274
self.assertTrue(torch._C._is_cow_tensor(clone))
52835275

@@ -5404,7 +5396,6 @@ def make_prob_dist(shape, is_contiguous):
54045396
sample_indices = torch.multinomial(prob_dist, n_sample, True)
54055397
for sample_index in sample_indices:
54065398
self.assertNotEqual(sample_index, zero_prob_idx, msg="sampled an index with zero probability")
5407-
s_dim = sample_indices.dim()
54085399
self.assertEqual(sample_indices.dim(), 1, msg="wrong number of dimensions")
54095400
self.assertEqual(prob_dist.dim(), 1, msg="wrong number of prob_dist dimensions")
54105401
self.assertEqual(sample_indices.size(0), n_sample, msg="wrong number of samples")
@@ -6614,9 +6605,9 @@ def test_advancedindex_mixed_devices_error(self, devices) -> None:
66146605
def test(x: torch.Tensor, ia: torch.Tensor, ib: torch.Tensor) -> None:
66156606
# test getitem
66166607
with self.assertRaisesRegex(RuntimeError, fr"indices should be either .* \({x.device}\)"):
6617-
value = x[:, ia, None, ib, 0]
6608+
x[:, ia, None, ib, 0]
66186609
with self.assertRaisesRegex(RuntimeError, fr"indices should be either .* \({x.device}\)"):
6619-
value = x[ib]
6610+
x[ib]
66206611

66216612
cpu = torch.device('cpu')
66226613
for device in devices:
@@ -7181,7 +7172,6 @@ def test_tensor_set_errors(self):
71817172
# NOTE: test_equal will be deprecated in favor of torch.testing.assert_close
71827173
# once torch.testing is out of beta
71837174
def test_equal(self):
7184-
devices = [torch.cpu, torch.cuda]
71857175
for device in ["cpu", "cuda"]:
71867176
if device == "cuda" and not torch.cuda.is_available():
71877177
continue
@@ -8520,7 +8510,7 @@ def test_error_msg_type_translation(self):
85208510
weight = torch.nn.Parameter(torch.zeros(1, 1, 1, 3, dtype=torch.double))
85218511
model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False)
85228512
model.weight = weight
8523-
out = model(input)
8513+
model(input)
85248514

85258515
def test_apply(self):
85268516
x = torch.arange(1, 6)
@@ -8696,7 +8686,7 @@ def test_has_internal_overlap(self):
86968686
self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_TOO_HARD)
86978687

86988688
def test_allow_tensor_metadata_change(self):
8699-
a = torch.ones(2, 3)
8689+
torch.ones(2, 3)
87008690
# Metadata changes are allowed on view tensors that are created from detach().
87018691

87028692
def test_memory_format(self):
@@ -8991,7 +8981,6 @@ def test_add_meta_scalar(self):
89918981
self.assertEqual(y.size(), x.size())
89928982

89938983
def test_normal_shape(self):
8994-
warned = False
89958984
for device in get_all_device_types():
89968985
tensor1 = torch.rand(1, device=device)
89978986
tensor4 = torch.rand(4, device=device)
@@ -9131,7 +9120,7 @@ def test_dot_data_use(self):
91319120
weight = torch.zeros(1, 1, 1, 3, dtype=torch.complex64)
91329121
model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False)
91339122
model.weight.data = weight
9134-
out = model(input)
9123+
model(input)
91359124

91369125
def test_empty_storage_view(self):
91379126
# we should be able to "modify" slices of a 0-element
@@ -9934,7 +9923,7 @@ def __new__(cls, x, *args, **kwargs):
99349923
return super().__new__(cls, x, *args, **kwargs)
99359924

99369925
x = torch.ones(5)
9937-
test_tensor = TestTensor(x)
9926+
TestTensor(x)
99389927

99399928
def test_storage_base_new(self):
99409929

@@ -9946,7 +9935,7 @@ def __new__(cls, x, *args, **kwargs):
99469935
return super().__new__(cls, x, *args, **kwargs)
99479936

99489937
x = torch.UntypedStorage(5)
9949-
test_storage = TestStorage(x)
9938+
TestStorage(x)
99509939

99519940
def test_pyobj_preserved(self):
99529941
x = torch.empty(2)
@@ -10547,7 +10536,7 @@ def test_tensor_fix_weakref_no_leak(self):
1054710536
def callback(w):
1054810537
nonlocal called
1054910538
called = True
10550-
wa = weakref.ref(a, callback)
10539+
_wa = weakref.ref(a, callback)
1055110540
a._fix_weakref()
1055210541
del a
1055310542

@@ -10564,7 +10553,7 @@ def test_storage_fix_weakref_no_leak(self):
1056410553
def callback(w):
1056510554
nonlocal called
1056610555
called = True
10567-
wa = weakref.ref(a, callback)
10556+
_wa = weakref.ref(a, callback)
1056810557
a._fix_weakref()
1056910558
del a
1057010559

@@ -10745,7 +10734,7 @@ def test_swap_basic(self):
1074510734
with self.assertRaisesRegex(RuntimeError, "AccumulateGrad node that was poisoned by swap_tensors"):
1074610735
out.sum().backward()
1074710736

10748-
wr = weakref.ref(t1)
10737+
_wr = weakref.ref(t1)
1074910738
with self.assertRaisesRegex(RuntimeError, "has weakref"):
1075010739
torch.utils.swap_tensors(t1, t2)
1075110740

0 commit comments

Comments
 (0)