@@ -664,6 +664,8 @@ TEST_F(AtenXlaTensorTest, TestReflectionPad1dRank3) {
664
664
}
665
665
666
666
TEST_F (AtenXlaTensorTest, TestReflectionPad1dBackward) {
667
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
668
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
667
669
std::vector<int64_t > pad{2 , 2 };
668
670
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
669
671
return torch::reflection_pad1d (inputs[0 ], pad);
@@ -709,6 +711,8 @@ TEST_F(AtenXlaTensorTest, TestReflectionPad2dRank4) {
709
711
}
710
712
711
713
TEST_F (AtenXlaTensorTest, TestReflectionPad2dBackward) {
714
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
715
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
712
716
std::vector<int64_t > pad{2 , 3 , 1 , 2 };
713
717
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
714
718
return torch::reflection_pad2d (inputs[0 ], pad);
@@ -754,6 +758,8 @@ TEST_F(AtenXlaTensorTest, TestReflectionPad3dRank4) {
754
758
}
755
759
756
760
TEST_F (AtenXlaTensorTest, TestReflectionPad3dBackward) {
761
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
762
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
757
763
std::vector<int64_t > pad{1 , 1 , 1 , 1 , 1 , 1 };
758
764
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
759
765
return torch::reflection_pad3d (inputs[0 ], pad);
@@ -801,6 +807,8 @@ TEST_F(AtenXlaTensorTest, TestReplicationPad1dZeroPad) {
801
807
}
802
808
803
809
TEST_F (AtenXlaTensorTest, TestReplicationPad1dBackward) {
810
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
811
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
804
812
std::vector<int64_t > pad{2 , 3 };
805
813
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
806
814
return torch::replication_pad1d (inputs[0 ], pad);
@@ -848,6 +856,8 @@ TEST_F(AtenXlaTensorTest, TestReplicationPad2dZeroPad) {
848
856
}
849
857
850
858
TEST_F (AtenXlaTensorTest, TestReplicationPad2dBackward) {
859
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
860
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
851
861
std::vector<int64_t > pad{2 , 3 , 1 , 1 };
852
862
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
853
863
return torch::replication_pad2d (inputs[0 ], pad);
@@ -895,6 +905,8 @@ TEST_F(AtenXlaTensorTest, TestReplicationPad3dZeroPad) {
895
905
}
896
906
897
907
TEST_F (AtenXlaTensorTest, TestReplicationPad3dBackward) {
908
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
909
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
898
910
std::vector<int64_t > pad{2 , 3 , 1 , 1 , 1 , 1 };
899
911
auto testfn = [&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
900
912
return torch::replication_pad3d (inputs[0 ], pad);
@@ -1131,6 +1143,8 @@ TEST_F(AtenXlaTensorTest, TestAsStridedMultipleDimMismatch) {
1131
1143
}
1132
1144
1133
1145
TEST_F (AtenXlaTensorTest, TestAvgPool2DBackward) {
1146
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1147
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1134
1148
int kernel_size = 2 ;
1135
1149
for (int stride = 1 ; stride <= 2 ; ++stride) {
1136
1150
for (int padding = 0 ; padding <= 1 ; ++padding) {
@@ -1161,6 +1175,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool2DBackward) {
1161
1175
}
1162
1176
1163
1177
TEST_F (AtenXlaTensorTest, TestAvgPool3DBackward) {
1178
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1179
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1164
1180
int kernel_size = 2 ;
1165
1181
for (int stride = 1 ; stride <= 2 ; ++stride) {
1166
1182
for (int padding = 0 ; padding <= 1 ; ++padding) {
@@ -1192,6 +1208,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool3DBackward) {
1192
1208
}
1193
1209
1194
1210
TEST_F (AtenXlaTensorTest, TestAvgPool2DNoBatchBackward) {
1211
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1212
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1195
1213
int kernel_size = 2 ;
1196
1214
for (int stride = 1 ; stride <= 2 ; ++stride) {
1197
1215
for (int padding = 0 ; padding <= 1 ; ++padding) {
@@ -1222,6 +1240,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool2DNoBatchBackward) {
1222
1240
}
1223
1241
1224
1242
TEST_F (AtenXlaTensorTest, TestAvgPool3DNoBatchBackward) {
1243
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1244
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1225
1245
int kernel_size = 2 ;
1226
1246
for (int stride = 1 ; stride <= 2 ; ++stride) {
1227
1247
for (int padding = 0 ; padding <= 1 ; ++padding) {
@@ -1253,6 +1273,8 @@ TEST_F(AtenXlaTensorTest, TestAvgPool3DNoBatchBackward) {
1253
1273
}
1254
1274
1255
1275
TEST_F (AtenXlaTensorTest, TestAdaptiveAvgPool3DNoBatchBackward) {
1276
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1277
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1256
1278
for (int64_t output_size : {7 , 4 }) {
1257
1279
auto testfn =
1258
1280
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
@@ -1273,6 +1295,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool3DNoBatchBackward) {
1273
1295
}
1274
1296
1275
1297
TEST_F (AtenXlaTensorTest, TestAdaptiveAvgPool3DBackward) {
1298
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1299
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1276
1300
for (int64_t output_size : {7 , 4 }) {
1277
1301
auto testfn =
1278
1302
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
@@ -1293,6 +1317,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool3DBackward) {
1293
1317
}
1294
1318
1295
1319
TEST_F (AtenXlaTensorTest, TestAdaptiveAvgPool2DBackward) {
1320
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1321
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1296
1322
for (int64_t output_size : {7 , 8 }) {
1297
1323
auto testfn =
1298
1324
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
@@ -1312,6 +1338,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool2DBackward) {
1312
1338
}
1313
1339
1314
1340
TEST_F (AtenXlaTensorTest, TestAdaptiveAvgPool2DNoBatchBackward) {
1341
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1342
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1315
1343
for (int64_t output_size : {7 , 8 }) {
1316
1344
auto testfn =
1317
1345
[&](const std::vector<torch::Tensor>& inputs) -> torch::Tensor {
@@ -1329,6 +1357,8 @@ TEST_F(AtenXlaTensorTest, TestAdaptiveAvgPool2DNoBatchBackward) {
1329
1357
}
1330
1358
1331
1359
TEST_F (AtenXlaTensorTest, TestConv3DBackward) {
1360
+ GTEST_SKIP () << " failing due to PyTorch upstream changes. "
1361
+ << " See: https://github.com/pytorch/xla/issues/9651." ;
1332
1362
int in_channels = 4 ;
1333
1363
int out_channels = 8 ;
1334
1364
int kernel_size = 5 ;
0 commit comments