Skip to content

Commit 23e8c66

Browse files
author
Samantha Andow
authored
Fix cuda tests, skip mac unexpected successes (#864)
* a terrible hack that makes cuda work again * add mac test skips * reinstall with cuda pytorch--idk why this is needed * add higher tolerance for conv_transpose3d in vmap exhaustive * try without pip * use only conda, xfail vjpvmap clamp
1 parent eb12598 commit 23e8c66

File tree

3 files changed

+11
-11
lines changed

3 files changed

+11
-11
lines changed

.circleci/unittest/linux/scripts/install.sh

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,9 @@ gcc --version
5353
pip install expecttest
5454

5555
if [ "${CU_VERSION:-}" == cpu ] ; then
56-
# conda install -y pytorch torchvision cpuonly -c pytorch-nightly
57-
# use pip to install pytorch as conda can frequently pick older release
58-
pip install torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html --pre
56+
conda install -y pytorch torchvision cpuonly -c pytorch-nightly
5957
PYTORCH_VERSION="$(python -c "import torch; print(torch.__version__)")" python setup.py develop bdist_wheel -d $WHEELS_FOLDER
6058
else
61-
pip install torch torchvision -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html --pre
59+
conda install -y pytorch cudatoolkit=10.2 -c pytorch-nightly
6260
PYTORCH_VERSION="$(python -c "import torch; print(torch.__version__)")" python setup.py develop bdist_wheel -d $WHEELS_FOLDER
6361
fi

test/test_ops.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ def wrapped_fn(*args, **kwargs):
398398
# BUG: runs and produces numerical differences
399399
skip('nn.functional.max_unpool1d'), # fails everywhere except on mac
400400
skip('nn.functional.max_unpool2d'), # fails everywhere except on windows
401-
xfail('nn.functional.max_unpool3d'),
401+
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
402402
}))
403403
@opsToleranceOverride('TestOperators', 'test_jvp', (
404404
tol1('nn.functional.conv_transpose3d',
@@ -709,9 +709,9 @@ def test_vmapvjp(self, device, dtype, op):
709709
xfail('double'), # required rank 4 tensor to use channels_last format
710710
711711
# BUG: runs and produces numerical differences
712-
xfail('nn.functional.max_unpool1d', device_type='cpu'),
713-
xfail('nn.functional.max_unpool2d'),
714-
xfail('nn.functional.max_unpool3d'),
712+
skip('nn.functional.max_unpool1d', device_type='cpu'), # fails everywhere except on mac
713+
skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
714+
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
715715
716716
xfail('put'), # calls put_ during vmap with only vmaps over other, not self
717717
})
@@ -781,9 +781,8 @@ def test_vmapjvp(self, device, dtype, op):
781781
xfail('double'), # required rank 4 tensor to use channels_last format
782782

783783
skip('nn.functional.max_unpool1d'), # Flaky, seems to sometimes his max_unpool2d
784-
# BUG: runs and produces numerical differences
785-
xfail('nn.functional.max_unpool2d'),
786-
xfail('nn.functional.max_unpool3d'),
784+
skip('nn.functional.max_unpool2d'), # fails everywhere except on mac
785+
skip('nn.functional.max_unpool3d'), # fails everywhere except on mac
787786

788787
xfail('put'), # calls put_ during vmap with only vmaps over other, not self
789788
xfail('nn.functional.prelu'), # Call Tensor.as_strided
@@ -1051,6 +1050,7 @@ def test():
10511050
xfail('pca_lowrank', ''),
10521051
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
10531052
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
1053+
xfail('clamp'),
10541054
# something weird happening with channels_last
10551055
xfail('bfloat16'),
10561056
xfail('double'),

test/test_vmap.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3135,6 +3135,8 @@ class TestVmapOperatorsOpInfo(TestCase):
31353135
@opsToleranceOverride('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', (
31363136
tol1('linalg.det',
31373137
{torch.float32: tol(atol=1e-04, rtol=1e-04)}, device_type='cuda'),
3138+
tol1('nn.functional.conv_transpose3d',
3139+
{torch.float32: tol(atol=1.5e-04, rtol=1e-04)}, device_type='cuda'),
31383140
))
31393141
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
31403142
@skipOps('TestVmapOperatorsOpInfo', 'test_vmap_exhaustive', vmap_fail)

0 commit comments

Comments
 (0)