Skip to content

Commit efc094f

Browse files
authored
Merge pull request #8543 from reyoung/feature/enhance_layer_generator
Enhance layer_function_generator
2 parents ed1a053 + 777a281 commit efc094f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+82
-75
lines changed

.gitignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@ third_party/
2727
cmake-build-*
2828

2929
# generated while compiling
30-
python/paddle/v2/fluid/core.so
3130
paddle/pybind/pybind.h
3231
CMakeFiles
3332
cmake_install.cmake

python/paddle/fluid/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
proto
2+
core.so

python/paddle/fluid/layers/layer_function_generator.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def generate_layer_fn(op_type):
130130
o_name = not_intermediate_outputs[0].name
131131
intermediate_output_names = [output.name for output in intermediate_outputs]
132132

133-
def infer_and_check_dtype(op_proto, **kwargs):
133+
def infer_and_check_dtype(op_proto, *args, **kwargs):
134134
"""
135135
This function performs the sanity check for dtype and
136136
instance type.
@@ -141,6 +141,10 @@ def infer_and_check_dtype(op_proto, **kwargs):
141141
val = kwargs.pop(name, [])
142142
if not isinstance(val, list) and not isinstance(val, tuple):
143143
val = [val]
144+
if len(val) == 0:
145+
val = [args[0]]
146+
args = args[1:]
147+
144148
for each in val:
145149
if not isinstance(each, Variable):
146150
raise ValueError("input of {0} must be variable".format(
@@ -155,17 +159,20 @@ def infer_and_check_dtype(op_proto, **kwargs):
155159

156160
return dtype
157161

158-
def func(**kwargs):
162+
def func(*args, **kwargs):
159163
helper = LayerHelper(op_type, **kwargs)
160164

161-
dtype = infer_and_check_dtype(op_proto, **kwargs)
165+
dtype = infer_and_check_dtype(op_proto, *args, **kwargs)
162166

163167
inputs = dict()
164168
for ipt in op_proto.inputs:
165169
name = _convert_(ipt.name)
166170
val = kwargs.pop(name, [])
167171
if not isinstance(val, list) and not isinstance(val, tuple):
168172
val = [val]
173+
if len(val) == 0 and len(args) != 0:
174+
val = args[0]
175+
args = args[1:]
169176
inputs[ipt.name] = val
170177

171178
outputs = dict()

python/paddle/fluid/layers/tensor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -160,8 +160,8 @@ def sums(input, out=None):
160160
a0 = layers.array_read(array=tmp, i=i)
161161
i = layers.increment(x=i)
162162
a1 = layers.array_read(array=tmp, i=i)
163-
mean_a0 = layers.mean(x=a0)
164-
mean_a1 = layers.mean(x=a1)
163+
mean_a0 = layers.mean(a0)
164+
mean_a1 = layers.mean(a1)
165165
a_sum = layers.sums(input=[mean_a0, mean_a1])
166166
"""
167167
helper = LayerHelper('sum', **locals())

python/paddle/fluid/tests/book/notest_rnn_encoder_decoer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def seq_to_seq_net():
147147
label = fluid.layers.data(
148148
name='label_sequence', shape=[1], dtype='int64', lod_level=1)
149149
cost = fluid.layers.cross_entropy(input=prediction, label=label)
150-
avg_cost = fluid.layers.mean(x=cost)
150+
avg_cost = fluid.layers.mean(cost)
151151

152152
return avg_cost, prediction
153153

python/paddle/fluid/tests/book/test_fit_a_line.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ def train(use_cuda, save_dirname):
2929
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
3030

3131
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
32-
avg_cost = fluid.layers.mean(x=cost)
32+
avg_cost = fluid.layers.mean(cost)
3333

3434
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
3535
sgd_optimizer.minimize(avg_cost)

python/paddle/fluid/tests/book/test_image_classification.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def train(net_type, use_cuda, save_dirname):
110110

111111
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
112112
cost = fluid.layers.cross_entropy(input=predict, label=label)
113-
avg_cost = fluid.layers.mean(x=cost)
113+
avg_cost = fluid.layers.mean(cost)
114114
acc = fluid.layers.accuracy(input=predict, label=label)
115115

116116
# Test program

python/paddle/fluid/tests/book/test_label_semantic_roles.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def train(use_cuda, save_dirname=None):
164164
label=target,
165165
param_attr=fluid.ParamAttr(
166166
name='crfw', learning_rate=mix_hidden_lr))
167-
avg_cost = fluid.layers.mean(x=crf_cost)
167+
avg_cost = fluid.layers.mean(crf_cost)
168168

169169
# TODO(qiao)
170170
# check other optimizers and check why out will be NAN

python/paddle/fluid/tests/book/test_machine_translation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def train_main(use_cuda, is_sparse):
178178
label = pd.data(
179179
name="target_language_next_word", shape=[1], dtype='int64', lod_level=1)
180180
cost = pd.cross_entropy(input=rnn_out, label=label)
181-
avg_cost = pd.mean(x=cost)
181+
avg_cost = pd.mean(cost)
182182

183183
optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
184184
optimizer.minimize(avg_cost)

python/paddle/fluid/tests/book/test_recognize_digits.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ def parse_arg():
4848
def loss_net(hidden, label):
4949
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
5050
loss = fluid.layers.cross_entropy(input=prediction, label=label)
51-
avg_loss = fluid.layers.mean(x=loss)
51+
avg_loss = fluid.layers.mean(loss)
5252
acc = fluid.layers.accuracy(input=prediction, label=label)
5353
return prediction, avg_loss, acc
5454

@@ -101,8 +101,8 @@ def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename):
101101

102102
avg_loss, acc = pd()
103103
# get mean loss and acc through every devices.
104-
avg_loss = fluid.layers.mean(x=avg_loss)
105-
acc = fluid.layers.mean(x=acc)
104+
avg_loss = fluid.layers.mean(avg_loss)
105+
acc = fluid.layers.mean(acc)
106106
else:
107107
prediction, avg_loss, acc = net_conf(img, label)
108108

0 commit comments

Comments
 (0)