Skip to content

Commit b805751

Browse files
authored
Merge pull request #13223 from velconia/open_python35_CI
Open python35 ci
2 parents 34e467d + 8059445 commit b805751

File tree

6 files changed

+43
-30
lines changed

6 files changed

+43
-30
lines changed

paddle/fluid/operators/fake_quantize_op.cu

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,8 @@ struct FindRangeAbsMaxFunctor<platform::CUDADeviceContext, T> {
119119
const framework::Tensor& last_scale,
120120
const framework::Tensor& iter, const int window_size,
121121
framework::Tensor* scales_arr, framework::Tensor* out_scale) {
122-
auto& gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
122+
const auto gpu_place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
123+
123124
T* scale_arr = scales_arr->mutable_data<T>(gpu_place);
124125
T* out_scale_data = out_scale->mutable_data<T>(gpu_place);
125126

paddle/scripts/paddle_build.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ function cmake_gen() {
115115
-DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF}
116116
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
117117
-DWITH_CONTRIB=${WITH_CONTRIB:-ON}
118+
-DWITH_INFERENCE=${WITH_INFERENCE:-ON}
118119
-DWITH_ANAKIN=${WITH_ANAKIN:-OFF}
119120
-DPY_VERSION=${PY_VERSION:-2.7}
120121
========================================
@@ -144,6 +145,7 @@ EOF
144145
-DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF} \
145146
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
146147
-DWITH_CONTRIB=${WITH_CONTRIB:-ON} \
148+
-DWITH_INFERENCE=${WITH_INFERENCE:-ON} \
147149
-DWITH_ANAKIN=${WITH_ANAKIN:-OFF} \
148150
-DPY_VERSION=${PY_VERSION:-2.7}
149151
}

python/paddle/fluid/tests/unittests/dist_transformer.py

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
import paddle.fluid.layers as layers
3737
from paddle.fluid import core
3838
from test_dist_base import TestDistRunnerBase, runtime_main
39+
import paddle.compat as cpt
3940
from paddle.compat import long_type
4041

4142
import hashlib
@@ -315,8 +316,9 @@ def pad_batch_data(insts,
315316
"""
316317
return_list = []
317318
max_len = max(len(inst) for inst in insts)
318-
num_token = reduce(lambda x, y: x + y,
319-
[len(inst) for inst in insts]) if return_num_token else 0
319+
num_token = six.moves.reduce(
320+
lambda x, y: x + y,
321+
[len(inst) for inst in insts]) if return_num_token else 0
320322
# Any token included in dict can be used to pad, since the paddings' loss
321323
# will be masked out by weights and make no effect on parameter gradients.
322324
inst_data = np.array(
@@ -328,7 +330,7 @@ def pad_batch_data(insts,
328330
return_list += [inst_weight.astype("float32").reshape([-1, 1])]
329331
else: # position data
330332
inst_pos = np.array([
331-
range(1, len(inst) + 1) + [0] * (max_len - len(inst))
333+
list(range(1, len(inst) + 1)) + [0] * (max_len - len(inst))
332334
for inst in insts
333335
])
334336
return_list += [inst_pos.astype("int64").reshape([-1, 1])]
@@ -385,10 +387,11 @@ def prepare_batch_input(insts, data_input_names, src_pad_idx, trg_pad_idx,
385387
return_num_token=True)
386388

387389
data_input_dict = dict(
388-
zip(data_input_names, [
389-
src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos,
390-
trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight
391-
]))
390+
list(
391+
zip(data_input_names, [
392+
src_word, src_pos, src_slf_attn_bias, trg_word, trg_pos,
393+
trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight
394+
])))
392395
return data_input_dict, np.asarray([num_token], dtype="float32")
393396

394397

@@ -561,7 +564,7 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
561564
np.log(TrainTaskConfig.label_smooth_eps / (
562565
ModelHyperParams.trg_vocab_size - 1) + 1e-20))
563566
init = False
564-
for pass_id in xrange(TrainTaskConfig.pass_num):
567+
for pass_id in six.moves.xrange(TrainTaskConfig.pass_num):
565568
pass_start_time = time.time()
566569
for batch_id, data in enumerate(train_data()):
567570
if batch_id >= 5:
@@ -587,11 +590,11 @@ def train_loop(exe, train_progm, dev_count, sum_cost, avg_cost, lr_scheduler,
587590
ModelHyperParams.eos_idx, ModelHyperParams.n_head,
588591
ModelHyperParams.d_model)
589592
total_num_token += num_token
590-
feed_kv_pairs = data_input_dict.items()
593+
feed_kv_pairs = list(data_input_dict.items())
591594
if TrainTaskConfig.local:
592-
feed_kv_pairs += {
595+
feed_kv_pairs += list({
593596
lr_scheduler.learning_rate.name: lr_rate
594-
}.items()
597+
}.items())
595598
feed_list.append(dict(feed_kv_pairs))
596599

597600
if not init:
@@ -873,6 +876,7 @@ def _load_lines(self, fpattern, tar_fname):
873876

874877
f = tarfile.open(fpaths[0], "r")
875878
for line in f.extractfile(tar_fname):
879+
line = cpt.to_text(line)
876880
fields = line.strip("\n").split(self._field_delimiter)
877881
if (not self._only_src and len(fields) == 2) or (
878882
self._only_src and len(fields) == 1):
@@ -882,8 +886,9 @@ def _load_lines(self, fpattern, tar_fname):
882886
if not os.path.isfile(fpath):
883887
raise IOError("Invalid file: %s" % fpath)
884888

885-
with open(fpath, "r") as f:
889+
with open(fpath, "rb") as f:
886890
for line in f:
891+
line = cpt.to_text(line)
887892
fields = line.strip("\n").split(self._field_delimiter)
888893
if (not self._only_src and len(fields) == 2) or (
889894
self._only_src and len(fields) == 1):
@@ -892,8 +897,9 @@ def _load_lines(self, fpattern, tar_fname):
892897
@staticmethod
893898
def load_dict(dict_path, reverse=False):
894899
word_dict = {}
895-
with open(dict_path, "r") as fdict:
900+
with open(dict_path, "rb") as fdict:
896901
for idx, line in enumerate(fdict):
902+
line = cpt.to_text(line)
897903
if reverse:
898904
word_dict[idx] = line.strip("\n")
899905
else:
@@ -1034,7 +1040,7 @@ def __combine_heads(x):
10341040
# size of the input as the output dimension size.
10351041
return layers.reshape(
10361042
x=trans_x,
1037-
shape=map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]]))
1043+
shape=list(map(int, [0, 0, trans_x.shape[2] * trans_x.shape[3]])))
10381044

10391045
def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate):
10401046
"""

python/paddle/fluid/tests/unittests/test_prelu_op.py

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
import unittest
1818
import numpy as np
19+
import six
1920
from op_test import OpTest
2021

2122

@@ -62,17 +63,20 @@ def test_check_grad_3_ignore_alpha(self):
6263

6364

6465
# TODO(minqiyang): Resume these test cases after fixing Python3 CI job issues
65-
# class TestCase1(PReluTest):
66-
# def initTestCase(self):
67-
# self.attrs = {'mode': "all"}
66+
if six.PY2:
6867

69-
# class TestCase2(PReluTest):
70-
# def initTestCase(self):
71-
# self.attrs = {'mode': "channel"}
68+
class TestCase1(PReluTest):
69+
def initTestCase(self):
70+
self.attrs = {'mode': "all"}
71+
72+
class TestCase2(PReluTest):
73+
def initTestCase(self):
74+
self.attrs = {'mode': "channel"}
75+
76+
class TestCase3(PReluTest):
77+
def initTestCase(self):
78+
self.attrs = {'mode': "element"}
7279

73-
# class TestCase3(PReluTest):
74-
# def initTestCase(self):
75-
# self.attrs = {'mode': "element"}
7680

7781
if __name__ == "__main__":
7882
unittest.main()

python/paddle/fluid/transpiler/details/program_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def block_to_code(block, block_idx):
153153

154154
indent += 1
155155
# sort all vars
156-
all_vars = sorted(block.vars.iteritems(), key=lambda x: x[0])
156+
all_vars = sorted(six.iteritems(block.vars), key=lambda x: x[0])
157157
for var in all_vars:
158158
print("{}{}".format(get_indent_space(indent), variable_to_code(var[1])))
159159

python/paddle/fluid/transpiler/distribute_transpiler.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ def transpile(self,
300300
input_deps = grad_name_to_send_dummy_out.values()
301301
program.global_block().append_op(
302302
type="send_barrier",
303-
inputs={"X": input_deps},
303+
inputs={"X": list(input_deps)},
304304
outputs={"Out": send_barrier_out},
305305
attrs={
306306
"endpoints": pserver_endpoints,
@@ -401,7 +401,7 @@ def _get_trainer_startup_program(self, recv_vars, eplist):
401401
402402
Args:
403403
recv_vars (list): Variable list to recv for current trainer_id
404-
eplist (list): A list of strings indicating
404+
eplist (list): A list of strings indicating
405405
406406
Returns:
407407
Program: trainer side startup program.
@@ -455,7 +455,7 @@ def _get_trainer_startup_program(self, recv_vars, eplist):
455455
if len(splited_var) <= 1:
456456
continue
457457
# NOTE: if enable memory optimization, origin vars maybe removed.
458-
if startup_program.global_block().vars.has_key(varname):
458+
if varname in startup_program.global_block().vars:
459459
orig_param = startup_program.global_block().vars[varname]
460460
else:
461461
origin_param_var = self.origin_program.global_block().vars[
@@ -690,7 +690,7 @@ def get_pserver_programs(self, endpoint):
690690
691691
Args:
692692
endpoint (str): current pserver endpoint.
693-
693+
694694
Returns:
695695
tuple: (main_program, startup_program), of type "Program"
696696
"""
@@ -713,7 +713,7 @@ def get_startup_program(self,
713713
endpoint (str): current pserver endpoint.
714714
pserver_program (Program): deprecated, call get_pserver_program first.
715715
startup_program (Program): deprecated, should pass startup_program
716-
when initalizing
716+
when initalizing
717717
718718
Returns:
719719
Program: parameter server side startup program.

0 commit comments

Comments
 (0)