@@ -229,13 +229,13 @@ def check_network_convergence(self,
229
229
if batch_size is not None :
230
230
batch_size *= fluid .core .get_cuda_device_count ()
231
231
begin = time .time ()
232
- first_loss , = exe .run ([loss .name ], feed_dict = feed_dict )
232
+ first_loss , = exe .run ([loss .name ], feed = feed_dict )
233
233
first_loss = numpy .array (first_loss )
234
234
235
235
for i in xrange (iter ):
236
- exe .run ([], feed_dict = feed_dict )
236
+ exe .run ([], feed = feed_dict )
237
237
238
- last_loss , = exe .run ([loss .name ], feed_dict = feed_dict )
238
+ last_loss , = exe .run ([loss .name ], feed = feed_dict )
239
239
end = time .time ()
240
240
241
241
if batch_size is not None :
@@ -277,11 +277,10 @@ def test_simple_fc(self):
277
277
"label" : label })
278
278
279
279
def test_simple_fc_parallel_accuracy (self ):
280
- single_first_loss , single_last_loss = self .check_network_convergence (
281
- simple_fc_net , seed = 0 , use_parallel_executor = False )
282
- parallel_first_loss , parallel_last_loss = self .check_network_convergence (
283
- simple_fc_net , seed = 0 , use_parallel_executor = True )
284
- print ("FUCK" )
280
+ #single_first_loss, single_last_loss = self.check_network_convergence(
281
+ # simple_fc_net, seed=0, use_parallel_executor=False)
282
+ #parallel_first_loss, parallel_last_loss = self.check_network_convergence(
283
+ # simple_fc_net, seed=0, use_parallel_executor=True)
285
284
print ('single_first_loss=' , single_first_loss )
286
285
print ('single_last_loss=' , single_last_loss )
287
286
print ('parallel_first_loss=' , parallel_first_loss )
@@ -515,10 +514,10 @@ def test_parallel_testing(self):
515
514
share_vars_from = train_exe )
516
515
517
516
for i in xrange (5 ):
518
- test_loss , = test_exe .run ([loss .name ], feed_dict = feed_dict )
517
+ test_loss , = test_exe .run ([loss .name ], feed = feed_dict )
519
518
test_loss = numpy .array (test_loss )
520
519
521
- train_loss , = train_exe .run ([loss .name ], feed_dict = feed_dict )
520
+ train_loss , = train_exe .run ([loss .name ], feed = feed_dict )
522
521
train_loss = numpy .array (train_loss )
523
522
self .assertTrue (
524
523
numpy .allclose (
@@ -668,5 +667,5 @@ def test_all(self):
668
667
for i in xrange (10 ):
669
668
cur_batch = next (data )
670
669
print map (numpy .array ,
671
- pe .run (feed_dict = feeder .feed (cur_batch ),
670
+ pe .run (feed = feeder .feed (cur_batch ),
672
671
fetch_list = [avg_cost .name ]))[0 ]
0 commit comments