@@ -187,99 +187,116 @@ def __set_tensor__(name, data=None):
187
187
188
188
189
189
class TestBatchNormOpInference (OpTest ):
190
+ def setUp (self ):
191
+ self .dtype = np .float32
192
+
190
193
def __assert_close (self , tensor , np_array , msg , atol = 1e-4 ):
191
194
self .assertTrue (np .allclose (np .array (tensor ), np_array , atol = atol ), msg )
192
195
193
- def test_inference (self ):
194
- def test_with_place (place , data_layout , dtype , shape ):
195
- epsilon = 0.00001
196
- if len (shape ) == 2 :
197
- x_shape = shape
198
- c = x_shape [1 ]
196
+ def check_with_place (place , data_layout , dtype , shape ):
197
+ epsilon = 0.00001
198
+ if len (shape ) == 2 :
199
+ x_shape = shape
200
+ c = x_shape [1 ]
201
+ else :
202
+ n , h , w , c = shape [0 ], shape [1 ], shape [2 ], shape [3 ]
203
+ if data_layout == "NHWC" :
204
+ x_shape = [n , h , w , c ]
205
+ elif data_layout == "NCHW" :
206
+ x_shape = [n , c , h , w ]
199
207
else :
200
- n , h , w , c = shape [0 ], shape [1 ], shape [2 ], shape [3 ]
201
- if data_layout == "NHWC" :
202
- x_shape = [n , h , w , c ]
203
- elif data_layout == "NCHW" :
204
- x_shape = [n , c , h , w ]
205
- else :
206
- raise ValueError ("Unknown data layout." )
207
- scale_shape = [c ]
208
-
209
- x_val = np .random .random_sample (x_shape ).astype (dtype )
210
- scale_val = np .random .random_sample (scale_shape ).astype (dtype )
211
- bias_val = np .random .random_sample (scale_shape ).astype (dtype )
212
-
213
- mean = np .zeros (scale_shape ).astype (dtype )
214
- variance = np .ones (scale_shape ).astype (dtype )
215
-
216
- y_out = _reference_testing (x_val , scale_val , bias_val , mean ,
217
- variance , epsilon ,
218
- data_layout ).astype (dtype )
208
+ raise ValueError ("Unknown data layout." )
209
+ scale_shape = [c ]
219
210
220
- scope = core .Scope ()
211
+ x_val = np .random .random_sample (x_shape ).astype (dtype )
212
+ scale_val = np .random .random_sample (scale_shape ).astype (dtype )
213
+ bias_val = np .random .random_sample (scale_shape ).astype (dtype )
221
214
222
- # create input
223
- x_tensor = create_or_get_tensor (
224
- scope , "x_val" , OpTest .np_dtype_to_fluid_dtype (x_val ), place )
225
- scale_tensor = create_or_get_tensor (
226
- scope , "scale_val" ,
227
- OpTest .np_dtype_to_fluid_dtype (scale_val ), place )
228
- bias_tensor = create_or_get_tensor (
229
- scope , "bias_val" ,
230
- OpTest .np_dtype_to_fluid_dtype (bias_val ), place )
231
- mean_tensor = create_or_get_tensor (
232
- scope , "mean" , OpTest .np_dtype_to_fluid_dtype (mean ), place )
233
- variance_tensor = create_or_get_tensor (
234
- scope , "variance" ,
235
- OpTest .np_dtype_to_fluid_dtype (variance ), place )
215
+ mean = np .zeros (scale_shape ).astype (dtype )
216
+ variance = np .ones (scale_shape ).astype (dtype )
236
217
237
- # create output
238
- y_tensor = create_or_get_tensor (scope , "y_out" , None , place )
239
- saved_mean_tensor = create_or_get_tensor (scope , "saved_mean" , None ,
240
- place )
241
- saved_variance_tensor = create_or_get_tensor (
242
- scope , "saved_variance" , None , place )
243
- mean_out_tensor = mean_tensor
244
- variance_out_tensor = variance_tensor
218
+ y_out = _reference_testing (x_val , scale_val , bias_val , mean , variance ,
219
+ epsilon , data_layout ).astype (dtype )
220
+
221
+ scope = core .Scope ()
222
+
223
+ # create input
224
+ x_tensor = create_or_get_tensor (scope , "x_val" ,
225
+ OpTest .np_dtype_to_fluid_dtype (x_val ),
226
+ place )
227
+ scale_tensor = create_or_get_tensor (
228
+ scope , "scale_val" ,
229
+ OpTest .np_dtype_to_fluid_dtype (scale_val ), place )
230
+ bias_tensor = create_or_get_tensor (
231
+ scope , "bias_val" , OpTest .np_dtype_to_fluid_dtype (bias_val ), place )
232
+ mean_tensor = create_or_get_tensor (scope , "mean" ,
233
+ OpTest .np_dtype_to_fluid_dtype (mean ),
234
+ place )
235
+ variance_tensor = create_or_get_tensor (
236
+ scope , "variance" , OpTest .np_dtype_to_fluid_dtype (variance ), place )
237
+
238
+ # create output
239
+ y_tensor = create_or_get_tensor (scope , "y_out" , None , place )
240
+ saved_mean_tensor = create_or_get_tensor (scope , "saved_mean" , None ,
241
+ place )
242
+ saved_variance_tensor = create_or_get_tensor (scope , "saved_variance" ,
243
+ None , place )
244
+ mean_out_tensor = mean_tensor
245
+ variance_out_tensor = variance_tensor
246
+
247
+ batch_norm_op = Operator (
248
+ "batch_norm" ,
249
+ # inputs
250
+ X = "x_val" ,
251
+ Scale = "scale_val" ,
252
+ Bias = "bias_val" ,
253
+ Mean = "mean" ,
254
+ Variance = "variance" ,
255
+ # outputs
256
+ Y = "y_out" ,
257
+ MeanOut = "mean" ,
258
+ VarianceOut = "variance" ,
259
+ SavedMean = "saved_mean" ,
260
+ SavedVariance = "saved_variance" ,
261
+ # attrs
262
+ is_test = True ,
263
+ data_layout = data_layout ,
264
+ epsilon = epsilon )
265
+
266
+ batch_norm_op .run (scope , place )
267
+
268
+ # check inference result
269
+ self .__assert_close (y_tensor , y_out ,
270
+ "inference output are different at " + str (place ) +
271
+ ", " + data_layout + ", " + str (np .dtype (dtype )))
272
+
273
+ def test_check_output (self ):
274
+ places = [core .CPUPlace ()]
275
+ if core .is_compiled_with_cuda () and core .op_support_gpu ("batch_norm" ):
276
+ places .append (core .CUDAPlace (0 ))
245
277
246
- batch_norm_op = Operator (
247
- "batch_norm" ,
248
- # inputs
249
- X = "x_val" ,
250
- Scale = "scale_val" ,
251
- Bias = "bias_val" ,
252
- Mean = "mean" ,
253
- Variance = "variance" ,
254
- # outputs
255
- Y = "y_out" ,
256
- MeanOut = "mean" ,
257
- VarianceOut = "variance" ,
258
- SavedMean = "saved_mean" ,
259
- SavedVariance = "saved_variance" ,
260
- # attrs
261
- is_test = True ,
262
- data_layout = data_layout ,
263
- epsilon = epsilon )
278
+ for place in places :
279
+ for data_format in ["NCHW" , "NHWC" ]:
280
+ check_with_place (place , data_format , self .dtype , [2 , 3 , 4 , 5 ])
281
+ check_with_place (place , data_format , self .dtype , [2 , 3 ])
264
282
265
- batch_norm_op .run (scope , place )
266
283
267
- # check inference result
268
- self .__assert_close (
269
- y_tensor , y_out , "inference output are different at " +
270
- str (place ) + ", " + data_layout + ", " + str (np .dtype (dtype )))
284
+ class TestFP16BatchNormOpInference (TestBatchNormOpInference ):
285
+ def setUp (self ):
286
+ self .dtype = np .float16
271
287
272
- places = [core .CPUPlace ()]
288
+ def test_check_output (self ):
289
+ places = []
273
290
if core .is_compiled_with_cuda () and core .op_support_gpu ("batch_norm" ):
274
291
place = core .CUDAPlace (0 )
275
- if self . dtype != np . float16 or core .is_float16_supported (place ):
292
+ if core .is_float16_supported (place ):
276
293
places .append (place )
277
294
278
295
for place in places :
279
296
for data_format in ["NCHW" , "NHWC" ]:
280
- for dtype in [ np . float32 , np . float16 ]:
281
- test_with_place ( place , data_format , dtype , [2 , 3 , 4 , 5 ])
282
- test_with_place (place , data_format , dtype , [2 , 3 ])
297
+ check_output_with_place ( place , data_format , self . dtype ,
298
+ [2 , 3 , 4 , 5 ])
299
+ check_output_with_place (place , data_format , self . dtype , [2 , 3 ])
283
300
284
301
285
302
class TestBatchNormOpTraining (OpTest ):
0 commit comments