@@ -26,15 +26,22 @@ def stable_softmax(x):
26
26
27
27
28
28
class TestSoftmaxOp (OpTest ):
29
+ def get_x_shape (self ):
30
+ return [10 , 10 ]
31
+
29
32
def setUp (self ):
30
33
self .op_type = "softmax"
31
34
self .use_cudnn = False
32
35
self .use_mkldnn = False
33
36
self .dtype = np .float32
34
37
self .init_kernel_type ()
38
+ self .shape = self .get_x_shape ()
39
+
40
+ x = np .random .uniform (0.1 , 1 , self .shape ).astype (self .dtype )
41
+ out = np .apply_along_axis (stable_softmax , 1 ,
42
+ x .reshape ([- 1 , self .shape [- 1 ]]))
43
+ out = out .reshape (self .shape )
35
44
36
- x = np .random .uniform (0.1 , 1 , [10 , 10 ]).astype (self .dtype )
37
- out = np .apply_along_axis (stable_softmax , 1 , x )
38
45
self .inputs = {'X' : OpTest .np_dtype_to_fluid_dtype (x )}
39
46
self .outputs = {'Out' : out }
40
47
self .attrs = {
@@ -63,13 +70,25 @@ def test_check_grad(self):
63
70
self .check_grad (["X" ], "Out" , max_relative_error = 0.01 )
64
71
65
72
73
+ class TestSoftmaxOp2 (TestSoftmaxOp ):
74
+ def get_x_shape (self ):
75
+ return [2 , 3 , 4 , 5 ]
76
+
77
+
66
78
@unittest .skipIf (not core .is_compiled_with_cuda (),
67
79
"core is not compiled with CUDA" )
68
80
class TestSoftmaxCUDNNOp (TestSoftmaxOp ):
69
81
def init_kernel_type (self ):
70
82
self .use_cudnn = True
71
83
72
84
85
+ @unittest .skipIf (not core .is_compiled_with_cuda (),
86
+ "core is not compiled with CUDA" )
87
+ class TestSoftmaxCUDNNOp2 (TestSoftmaxCUDNNOp ):
88
+ def get_x_shape (self ):
89
+ return [2 , 3 , 4 , 5 ]
90
+
91
+
73
92
@unittest .skipIf (not core .is_compiled_with_cuda (),
74
93
"core is not compiled with CUDA" )
75
94
class TestSoftmaxFP16Op (TestSoftmaxOp ):
@@ -83,6 +102,13 @@ def test_check_output(self):
83
102
self .check_output_with_place (place , atol = 1e-3 )
84
103
85
104
105
+ @unittest .skipIf (not core .is_compiled_with_cuda (),
106
+ "core is not compiled with CUDA" )
107
+ class TestSoftmaxFP16Op2 (TestSoftmaxFP16Op ):
108
+ def get_x_shape (self ):
109
+ return [2 , 3 , 4 , 5 ]
110
+
111
+
86
112
@unittest .skipIf (not core .is_compiled_with_cuda (),
87
113
"core is not compiled with CUDA" )
88
114
class TestSoftmaxFP16CUDNNOp (TestSoftmaxOp ):
@@ -97,10 +123,22 @@ def test_check_output(self):
97
123
self .check_output_with_place (place , atol = 1e-3 )
98
124
99
125
126
+ @unittest .skipIf (not core .is_compiled_with_cuda (),
127
+ "core is not compiled with CUDA" )
128
+ class TestSoftmaxFP16CUDNNOp2 (TestSoftmaxFP16CUDNNOp ):
129
+ def get_x_shape (self ):
130
+ return [2 , 3 , 4 , 5 ]
131
+
132
+
100
133
class TestSoftmaxMKLDNNOp (TestSoftmaxOp ):
101
134
def init_kernel_type (self ):
102
135
self .use_mkldnn = True
103
136
104
137
138
+ class TestSoftmaxMKLDNNOp2 (TestSoftmaxMKLDNNOp ):
139
+ def get_x_shape (self ):
140
+ return [2 , 3 , 4 , 5 ]
141
+
142
+
105
143
if __name__ == "__main__" :
106
144
unittest .main ()
0 commit comments