11import numpy as np
2- import theano as _th
2+ import theano as th
33
44from kaggle_utils import multiclass_log_loss
55from examples .utils import make_progressbar
@@ -8,24 +8,17 @@ def validate(dataset_x, dataset_y, model, epoch, batch_size):
88 progress = make_progressbar ('Testing epoch #{}' .format (epoch ), len (dataset_x ))
99 progress .start ()
1010
11- mini_batch_input = np .empty (shape = (batch_size , 93 ), dtype = _th .config .floatX )
12- mini_batch_targets = np .empty (shape = (batch_size , ), dtype = _th .config .floatX )
1311 logloss = 0.
14-
1512 for j in range ((dataset_x .shape [0 ] + batch_size - 1 ) // batch_size ):
16- progress .update (j * batch_size )
17- for k in range (batch_size ):
18- if j * batch_size + k < dataset_x .shape [0 ]:
19- mini_batch_input [k ] = dataset_x [j * batch_size + k ]
20- mini_batch_targets [k ] = dataset_y [j * batch_size + k ]
13+ # Note: numpy correctly handles the size of the last minibatch.
14+ mini_batch_input = dataset_x [j * batch_size : (j + 1 )* batch_size ].astype (th .config .floatX )
15+ mini_batch_targets = dataset_y [j * batch_size : (j + 1 )* batch_size ].astype (th .config .floatX )
2116
2217 mini_batch_prediction = model .forward (mini_batch_input )
2318
24- if (j + 1 ) * batch_size > dataset_x .shape [0 ]:
25- mini_batch_prediction .resize ((dataset_x .shape [0 ] - j * batch_size , 9 ))
26- mini_batch_targets .resize ((dataset_x .shape [0 ] - j * batch_size , ))
27-
2819 logloss += multiclass_log_loss (mini_batch_targets , mini_batch_prediction , normalize = False )
2920
21+ progress .update (j * batch_size + len (mini_batch_input ))
22+
3023 progress .finish ()
3124 print ("Epoch #{}, Logloss: {:.5f}" .format (epoch , logloss / dataset_x .shape [0 ]))
0 commit comments