Skip to content

Commit 35e6abd

Browse files
committed
Change iter_parameters back and port unittests code to Python3
1 parent e849362 commit 35e6abd

File tree

195 files changed

+383
-371
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

195 files changed

+383
-371
lines changed

python/paddle/fluid/framework.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -963,9 +963,9 @@ def _var_recursive(self, name):
963963
raise ValueError("Var {0} is not found recursively".format(name))
964964

965965
def all_parameters(self):
966-
return list(self._iter_parameters())
966+
return list(self.iter_parameters())
967967

968-
def _iter_parameters(self):
968+
def iter_parameters(self):
969969
return (item[1] for item in list(self.vars.items())
970970
if isinstance(item[1], Parameter))
971971

@@ -1199,7 +1199,7 @@ def _copy_param_info_from(self, other):
11991199
if not isinstance(other, Block):
12001200
raise TypeError(
12011201
"_copy_param_info_from should be invoked with Block")
1202-
for p in other._iter_parameters():
1202+
for p in other.iter_parameters():
12031203
assert isinstance(p, Parameter)
12041204
v = self.vars.get(p.name, None)
12051205
if v is None:

python/paddle/fluid/tests/test_beam_search_decoder.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def train_loop(main_program):
155155
]
156156
feeder = fluid.DataFeeder(feed_list, place)
157157

158-
for pass_id in xrange(1):
158+
for pass_id in range(1):
159159
for batch_id, data in enumerate(train_reader()):
160160
outs = exe.run(main_program,
161161
feed=feeder.feed(data),
@@ -204,8 +204,8 @@ def decode_main(use_cuda):
204204
]
205205
feeder = fluid.DataFeeder(feed_list, place)
206206

207-
data = train_reader().next()
208-
feed_dict = feeder.feed(map(lambda x: [x[0]], data))
207+
data = next(train_reader())
208+
feed_dict = feeder.feed([[x[0]] for x in data])
209209
feed_dict['init_ids'] = init_ids
210210
feed_dict['init_scores'] = init_scores
211211

@@ -214,7 +214,7 @@ def decode_main(use_cuda):
214214
feed=feed_dict,
215215
fetch_list=[translation_ids, translation_scores],
216216
return_numpy=False)
217-
print result_ids.lod()
217+
print(result_ids.lod())
218218

219219

220220
class TestBeamSearchDecoder(unittest.TestCase):

python/paddle/fluid/tests/unittests/dist_se_resnext.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -301,15 +301,15 @@ def run_trainer(self, place, endpoints, trainer_id, trainers, is_dist=True):
301301
trainer_id=trainer_id)
302302

303303
feed_var_list = [
304-
var for var in trainer_prog.global_block().vars.itervalues()
304+
var for var in trainer_prog.global_block().vars.values()
305305
if var.is_data
306306
]
307307

308308
feeder = fluid.DataFeeder(feed_var_list, place)
309309
reader_generator = train_reader()
310310
first_loss, = exe.run(fetch_list=[avg_cost.name])
311311
print(first_loss)
312-
for i in xrange(5):
312+
for i in range(5):
313313
loss, = exe.run(fetch_list=[avg_cost.name])
314314
last_loss, = exe.run(fetch_list=[avg_cost.name])
315315
print(last_loss)

python/paddle/fluid/tests/unittests/op_test.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,16 @@
2525
from paddle.fluid.op import Operator
2626
from paddle.fluid.executor import Executor
2727
from paddle.fluid.framework import Program, OpProtoHolder, Variable
28-
from testsuite import create_op, set_input, append_input_output, append_loss_ops
28+
from .testsuite import create_op, set_input, append_input_output, append_loss_ops
29+
from functools import reduce
30+
from six.moves import zip
2931

3032

3133
def randomize_probability(batch_size, class_num, dtype='float32'):
3234
prob = np.random.uniform(
3335
0.1, 1.0, size=(batch_size, class_num)).astype(dtype)
3436
prob_sum = prob.sum(axis=1)
35-
for i in xrange(len(prob)):
37+
for i in range(len(prob)):
3638
prob[i] /= prob_sum[i]
3739
return prob
3840

@@ -86,7 +88,7 @@ def __set_elem__(tensor, i, e):
8688

8789
# we only compute gradient of one element each time.
8890
# we use a for loop to compute the gradient of every element.
89-
for i in xrange(tensor_size):
91+
for i in range(tensor_size):
9092
if in_place:
9193
set_input(scope, op, inputs, place)
9294

@@ -139,7 +141,7 @@ def infer_dtype(numpy_dict):
139141
assert isinstance(
140142
numpy_dict,
141143
dict), "self.inputs, self.outputs must be numpy_dict"
142-
for var_name, var_value in numpy_dict.iteritems():
144+
for var_name, var_value in numpy_dict.items():
143145
if isinstance(var_value, (np.ndarray, np.generic)):
144146
self.try_call_once(var_value.dtype)
145147
elif isinstance(var_value, (list, tuple)):
@@ -197,7 +199,7 @@ def _append_ops(self, block):
197199

198200
def _get_io_vars(self, block, numpy_inputs):
199201
inputs = {}
200-
for name, value in numpy_inputs.iteritems():
202+
for name, value in numpy_inputs.items():
201203
if isinstance(value, list):
202204
var_list = [
203205
block.var(sub_name) for sub_name, sub_value in value
@@ -240,7 +242,7 @@ def _calc_output(self, place, parallel=False):
240242
# if the fetch_list is customized by user, we use it directly.
241243
# if not, fill the fetch_list by the user configured outputs in test.
242244
if len(fetch_list) == 0:
243-
for var_name, var in outputs.iteritems():
245+
for var_name, var in outputs.items():
244246
if isinstance(var, list):
245247
for v in var:
246248
fetch_list.append(v)
@@ -252,7 +254,7 @@ def _calc_output(self, place, parallel=False):
252254
fetch_list.append(str(out_name))
253255
# fetch_list = map(block.var, fetch_list)
254256
if not isinstance(fetch_list[0], fluid.framework.Variable):
255-
fetch_list = map(block.var, fetch_list)
257+
fetch_list = list(map(block.var, fetch_list))
256258
outs = executor.run(program,
257259
feed=feed_map,
258260
fetch_list=fetch_list,
@@ -334,7 +336,7 @@ def check_output_customized(self, checker):
334336
def __assert_is_close(self, numeric_grads, analytic_grads, names,
335337
max_relative_error, msg_prefix):
336338

337-
for a, b, name in itertools.izip(numeric_grads, analytic_grads, names):
339+
for a, b, name in zip(numeric_grads, analytic_grads, names):
338340
abs_a = np.abs(a)
339341
abs_a[abs_a < 1e-3] = 1
340342

@@ -460,6 +462,6 @@ def _get_gradient(self,
460462
use_cuda=use_cuda, loss_name=loss.name, main_program=program)
461463
else:
462464
executor = Executor(place)
463-
return map(np.array,
464-
executor.run(prog, feed_dict, fetch_list,
465-
return_numpy=False))
465+
return list(
466+
map(np.array,
467+
executor.run(prog, feed_dict, fetch_list, return_numpy=False)))

python/paddle/fluid/tests/unittests/test_accuracy_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import unittest
1616
import numpy as np
17-
from op_test import OpTest
17+
from .op_test import OpTest
1818

1919

2020
class TestAccuracyOp(OpTest):
@@ -26,7 +26,7 @@ def setUp(self):
2626
label = np.random.randint(0, 2, (n, 1))
2727
self.inputs = {'Out': infer, 'Indices': indices, "Label": label}
2828
num_correct = 0
29-
for rowid in xrange(n):
29+
for rowid in range(n):
3030
for ele in indices[rowid]:
3131
if ele == label[rowid]:
3232
num_correct += 1

python/paddle/fluid/tests/unittests/test_activation_mkldnn_op.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
import unittest
1616
import numpy as np
1717
import paddle.fluid.core as core
18-
from op_test import OpTest
18+
from .op_test import OpTest
1919
from scipy.special import expit
20-
from test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
20+
from .test_activation_op import TestRelu, TestTanh, TestSqrt, TestAbs
2121

2222

2323
class TestMKLDNNReluDim2(TestRelu):

python/paddle/fluid/tests/unittests/test_activation_op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
import unittest
1616
import numpy as np
1717
import paddle.fluid.core as core
18-
from op_test import OpTest
18+
from .op_test import OpTest
1919
from scipy.special import expit
2020

2121

python/paddle/fluid/tests/unittests/test_adadelta_op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import unittest
1616
import numpy as np
17-
from op_test import OpTest
17+
from .op_test import OpTest
1818

1919

2020
class TestAdadeltaOp1(OpTest):

python/paddle/fluid/tests/unittests/test_adagrad_op.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
import numpy as np
1717
import paddle.fluid.core as core
1818
from paddle.fluid.op import Operator
19-
from op_test import OpTest
19+
from .op_test import OpTest
2020
import math
2121

2222

python/paddle/fluid/tests/unittests/test_adam_op.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
import unittest
1616
import numpy as np
17-
from op_test import OpTest
17+
from .op_test import OpTest
1818
from paddle.fluid import core
1919
from paddle.fluid.op import Operator
2020

@@ -273,7 +273,7 @@ def check_with_place(self, place):
273273
self.setup(scope, place)
274274

275275
op_args = dict()
276-
for key, np_array in self.dense_inputs.iteritems():
276+
for key, np_array in self.dense_inputs.items():
277277
var = scope.var(key).get_tensor()
278278
var.set(np_array, place)
279279
op_args[key] = key
@@ -290,7 +290,7 @@ def check_with_place(self, place):
290290
adam_op = Operator("adam", **op_args)
291291
adam_op.run(scope, place)
292292

293-
for key, np_array in self.outputs.iteritems():
293+
for key, np_array in self.outputs.items():
294294
out_var = scope.var(key).get_tensor()
295295
actual = np.array(out_var)
296296
actual = actual.reshape([actual.size])

0 commit comments

Comments
 (0)