Skip to content

Commit 61d6db5

Browse files
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into fix-7555
2 parents 41028f5 + 29603cf commit 61d6db5

29 files changed

+997
-129
lines changed

doc/faq/local/index_cn.rst

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,3 +211,49 @@ decoder_inputs = paddle.layer.fc(
211211
* list 中元素的个数等于网络中输出层的个数;
212212
* list 中每个元素是一个layer的输出结果矩阵,类型是numpy的ndarray;
213213
* 每一个layer输出矩阵的高度,在非序列输入时:等于样本数;序列输入时等于:输入序列中元素的总数;宽度等于配置中layer的size;
214+
215+
6. 如何在训练过程中获得某一个layer的output
216+
-----------------------------------------------
217+
218+
可以在event_handler中,通过 :code:`event.gm.getLayerOutputs("layer_name")` 获得在模型配置中某一层的name :code:`layer_name` 在当前
219+
mini-batch forward的output的值。获得的值类型均为 :code:`numpy.ndarray` ,可以通过这个输出来完成自定义的评估指标计算等功能。例如下面代码:
220+
221+
.. code-block:: python
222+
223+
def score_diff(right_score, left_score):
224+
return np.average(np.abs(right_score - left_score))
225+
226+
def event_handler(event):
227+
if isinstance(event, paddle.event.EndIteration):
228+
if event.batch_id % 25 == 0:
229+
diff = score_diff(
230+
event.gm.getLayerOutputs("right_score")["right_score"][
231+
"value"],
232+
event.gm.getLayerOutputs("left_score")["left_score"][
233+
"value"])
234+
logger.info(("Pass %d Batch %d : Cost %.6f, "
235+
"average absolute diff scores: %.6f") %
236+
(event.pass_id, event.batch_id, event.cost, diff))
237+
238+
注意:此方法不能获取 :code:`paddle.layer.recurrent_group` 里step的内容,但可以获取 :code:`paddle.layer.recurrent_group` 的输出。
239+
240+
7. 如何在训练过程中获得参数的权重和梯度
241+
-----------------------------------------------
242+
243+
在某些情况下,获得当前mini-batch的权重(或称作weights, parameters)有助于在训练时观察具体数值,方便排查以及快速定位问题。
244+
可以通过在 :code:`event_handler` 中打印其值(注意,需要使用 :code:`paddle.event.EndForwardBackward` 保证使用GPU训练时也可以获得),
245+
示例代码如下:
246+
247+
.. code-block:: python
248+
249+
...
250+
parameters = paddle.parameters.create(cost)
251+
...
252+
def event_handler(event):
253+
if isinstance(event, paddle.event.EndForwardBackward):
254+
if event.batch_id % 25 == 0:
255+
for p in parameters.keys():
256+
logger.info("Param %s, Grad %s",
257+
parameters.get(p), parameters.get_grad(p))
258+
259+
注意:“在训练过程中获得某一个layer的output”和“在训练过程中获得参数的权重和梯度”都会造成训练中的数据从C++拷贝到numpy,会对训练性能造成影响。不要在注重性能的训练场景下使用。

paddle/framework/executor.cc

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ limitations under the License. */
2323
#include "paddle/framework/op_registry.h"
2424
#include "paddle/platform/place.h"
2525

26+
DECLARE_bool(do_memory_benchmark);
2627
DEFINE_bool(check_nan_inf, false,
2728
"Checking whether operator produce NAN/INF or not. It will be "
2829
"extremely slow so please use this flag wisely.");
@@ -117,6 +118,10 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id,
117118
auto op = paddle::framework::OpRegistry::CreateOp(*op_desc);
118119
VLOG(3) << op->DebugStringEx(local_scope);
119120
op->Run(*local_scope, place_);
121+
if (FLAGS_do_memory_benchmark) {
122+
VLOG(2) << "Memory used after operator " + op->Type() + " running: "
123+
<< memory::memory_usage(place_);
124+
}
120125
if (FLAGS_check_nan_inf) {
121126
for (auto& vname : op->OutputVars(true)) {
122127
auto* var = local_scope->FindVar(vname);
@@ -130,6 +135,12 @@ void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id,
130135
if (create_vars && create_local_scope) {
131136
scope->DeleteScope(local_scope);
132137
}
138+
if (FLAGS_do_memory_benchmark) {
139+
VLOG(2) << "-------------------------------------------------------";
140+
VLOG(2) << "Memory used after deleting local scope: "
141+
<< memory::memory_usage(place_);
142+
VLOG(2) << "-------------------------------------------------------";
143+
}
133144
}
134145

135146
} // namespace framework

paddle/framework/scope.cc

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,10 @@ limitations under the License. */
2020
#include "paddle/framework/threadpool.h"
2121
#include "paddle/string/printf.h"
2222

23+
DEFINE_bool(do_memory_benchmark, false,
24+
"Doing memory benchmark. It will make deleting scope synchronized, "
25+
"and add some memory usage logs");
26+
2327
namespace paddle {
2428
namespace framework {
2529

@@ -88,8 +92,12 @@ void Scope::DeleteScope(Scope* scope) {
8892
auto it = std::find(this->kids_.begin(), this->kids_.end(), scope);
8993
PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope);
9094
this->kids_.erase(it);
91-
// Make delete async.
92-
Async([scope] { delete scope; });
95+
// When making memory benchmark on Fluid, we have to delete scope sync.
96+
if (FLAGS_do_memory_benchmark) {
97+
delete scope;
98+
} else {
99+
Async([scope] { delete scope; });
100+
}
93101
}
94102

95103
void Scope::Rename(const std::string& origin_name,

paddle/operators/compare_op.cc

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,11 @@ N-dim tensor. X and Y could be any type. The each element of the Out tensor is
3939
calculated by %s
4040
)DOC",
4141
comment.type, comment.equation));
42+
AddAttr<int>("axis",
43+
"(int, default -1). The start dimension index "
44+
"for broadcasting Y onto X.")
45+
.SetDefault(-1)
46+
.EqualGreaterThan(-1);
4247
}
4348
};
4449

@@ -95,11 +100,5 @@ REGISTER_LOGICAL_OP(less_than, "Out = X < Y");
95100
REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
96101
REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y");
97102
REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor);
98-
REGISTER_LOGICAL_OP(greater_than, "Out = X > Y");
99-
REGISTER_LOGICAL_KERNEL(greater_than, CPU,
100-
paddle::operators::GreaterThanFunctor);
101-
REGISTER_LOGICAL_OP(greater_equal, "Out = X >= Y");
102-
REGISTER_LOGICAL_KERNEL(greater_equal, CPU,
103-
paddle::operators::GreaterEqualFunctor);
104103
REGISTER_LOGICAL_OP(equal, "Out = X == Y");
105104
REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor);

paddle/operators/compare_op.cu

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,4 @@ limitations under the License. */
1616

1717
REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor);
1818
REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor);
19-
REGISTER_LOGICAL_KERNEL(greater_than, CUDA,
20-
paddle::operators::GreaterThanFunctor);
21-
REGISTER_LOGICAL_KERNEL(greater_equal, CUDA,
22-
paddle::operators::GreaterEqualFunctor);
2319
REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor);

paddle/operators/compare_op.h

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License. */
1616
#include <math.h>
1717
#include <type_traits>
1818
#include "paddle/framework/op_registry.h"
19+
#include "paddle/operators/elementwise_op_function.h"
1920
#include "paddle/platform/transform.h"
2021

2122
namespace paddle {
@@ -33,18 +34,6 @@ struct LessEqualFunctor {
3334
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; }
3435
};
3536

36-
template <typename T>
37-
struct GreaterThanFunctor {
38-
using ELEM_TYPE = T;
39-
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; }
40-
};
41-
42-
template <typename T>
43-
struct GreaterEqualFunctor {
44-
using ELEM_TYPE = T;
45-
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; }
46-
};
47-
4837
template <typename T>
4938
struct EqualFunctor {
5039
using ELEM_TYPE = T;
@@ -65,14 +54,7 @@ class CompareOpKernel
6554
public:
6655
void Compute(const framework::ExecutionContext& context) const override {
6756
using T = typename Functor::ELEM_TYPE;
68-
auto* x = context.Input<framework::Tensor>("X");
69-
auto* y = context.Input<framework::Tensor>("Y");
70-
auto* out = context.Output<framework::Tensor>("Out");
71-
Functor binary_func;
72-
platform::Transform<DeviceContext> trans;
73-
trans(context.template device_context<DeviceContext>(), x->data<T>(),
74-
x->data<T>() + x->numel(), y->data<T>(),
75-
out->mutable_data<bool>(context.GetPlace()), binary_func);
57+
ElementwiseComputeEx<Functor, DeviceContext, T, bool>(context);
7658
}
7759
};
7860

paddle/operators/elementwise_op_function.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -176,14 +176,15 @@ class MidWiseTransformIterator<T, platform::CUDADeviceContext>
176176
};
177177
#endif
178178

179-
template <typename Functor, typename T, typename DeviceContext>
179+
template <typename Functor, typename T, typename DeviceContext,
180+
typename OutType = T>
180181
class TransformFunctor {
181182
public:
182183
TransformFunctor(const framework::Tensor* x, const framework::Tensor* y,
183184
framework::Tensor* z, const DeviceContext& ctx, Functor func)
184185
: x_(x->data<T>()),
185186
y_(y->data<T>()),
186-
z_(z->mutable_data<T>(ctx.GetPlace())),
187+
z_(z->mutable_data<OutType>(ctx.GetPlace())),
187188
nx_(x->numel()),
188189
ctx_(ctx),
189190
func_(func) {}
@@ -208,7 +209,7 @@ class TransformFunctor {
208209
private:
209210
const T* x_;
210211
const T* y_;
211-
T* z_;
212+
OutType* z_;
212213
int64_t nx_;
213214
const DeviceContext& ctx_;
214215
Functor func_;
@@ -364,15 +365,16 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) {
364365
}
365366
}
366367

367-
template <typename Functor, typename DeviceContext, typename T>
368+
template <typename Functor, typename DeviceContext, typename T,
369+
typename OutType = T>
368370
void ElementwiseComputeEx(const framework::ExecutionContext& ctx) {
369371
using Tensor = framework::Tensor;
370372

371373
auto* x = ctx.Input<Tensor>("X");
372374
auto* y = ctx.Input<Tensor>("Y");
373375
auto* z = ctx.Output<Tensor>("Out");
374-
z->mutable_data<T>(ctx.GetPlace());
375-
TransformFunctor<Functor, T, DeviceContext> functor(
376+
z->mutable_data<OutType>(ctx.GetPlace());
377+
TransformFunctor<Functor, T, DeviceContext, OutType> functor(
376378
x, y, z, ctx.template device_context<DeviceContext>(), Functor());
377379

378380
auto x_dims = x->dims();

python/paddle/v2/dataset/__init__.py

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,23 @@
2424
import uci_housing
2525
import sentiment
2626
import wmt14
27+
import wmt16
2728
import mq2007
2829
import flowers
2930
import voc2012
3031

3132
__all__ = [
32-
'mnist', 'imikolov', 'imdb', 'cifar', 'movielens', 'conll05', 'sentiment'
33-
'uci_housing', 'wmt14', 'mq2007', 'flowers', 'voc2012'
33+
'mnist',
34+
'imikolov',
35+
'imdb',
36+
'cifar',
37+
'movielens',
38+
'conll05',
39+
'sentiment'
40+
'uci_housing',
41+
'wmt14',
42+
'wmt16',
43+
'mq2007',
44+
'flowers',
45+
'voc2012',
3446
]

python/paddle/v2/dataset/common.py

Lines changed: 15 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,12 @@
2525
import cPickle as pickle
2626

2727
__all__ = [
28-
'DATA_HOME', 'download', 'md5file', 'split', 'cluster_files_reader',
29-
'convert'
28+
'DATA_HOME',
29+
'download',
30+
'md5file',
31+
'split',
32+
'cluster_files_reader',
33+
'convert',
3034
]
3135

3236
DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset')
@@ -58,12 +62,15 @@ def md5file(fname):
5862
return hash_md5.hexdigest()
5963

6064

61-
def download(url, module_name, md5sum):
65+
def download(url, module_name, md5sum, save_name=None):
6266
dirname = os.path.join(DATA_HOME, module_name)
6367
if not os.path.exists(dirname):
6468
os.makedirs(dirname)
6569

66-
filename = os.path.join(dirname, url.split('/')[-1])
70+
filename = os.path.join(dirname,
71+
url.split('/')[-1]
72+
if save_name is None else save_name)
73+
6774
retry = 0
6875
retry_limit = 3
6976
while not (os.path.exists(filename) and md5file(filename) == md5sum):
@@ -196,9 +203,11 @@ def convert(output_path, reader, line_count, name_prefix):
196203
Convert data from reader to recordio format files.
197204
198205
:param output_path: directory in which output files will be saved.
199-
:param reader: a data reader, from which the convert program will read data instances.
206+
:param reader: a data reader, from which the convert program will read
207+
data instances.
200208
:param name_prefix: the name prefix of generated files.
201-
:param max_lines_to_shuffle: the max lines numbers to shuffle before writing.
209+
:param max_lines_to_shuffle: the max lines numbers to shuffle before
210+
writing.
202211
"""
203212

204213
assert line_count >= 1
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import paddle.v2.dataset.wmt16
16+
import unittest
17+
18+
19+
class TestWMT16(unittest.TestCase):
20+
def checkout_one_sample(self, sample):
21+
# train data has 3 field: source language word indices,
22+
# target language word indices, and target next word indices.
23+
self.assertEqual(len(sample), 3)
24+
25+
# test start mark and end mark in source word indices.
26+
self.assertEqual(sample[0][0], 0)
27+
self.assertEqual(sample[0][-1], 1)
28+
29+
# test start mask in target word indices
30+
self.assertEqual(sample[1][0], 0)
31+
32+
# test en mask in target next word indices
33+
self.assertEqual(sample[2][-1], 1)
34+
35+
def test_train(self):
36+
for idx, sample in enumerate(
37+
paddle.v2.dataset.wmt16.train(
38+
src_dict_size=100000, trg_dict_size=100000)()):
39+
if idx >= 10: break
40+
self.checkout_one_sample(sample)
41+
42+
def test_test(self):
43+
for idx, sample in enumerate(
44+
paddle.v2.dataset.wmt16.test(
45+
src_dict_size=1000, trg_dict_size=1000)()):
46+
if idx >= 10: break
47+
self.checkout_one_sample(sample)
48+
49+
def test_val(self):
50+
for idx, sample in enumerate(
51+
paddle.v2.dataset.wmt16.validation(
52+
src_dict_size=1000, trg_dict_size=1000)()):
53+
if idx >= 10: break
54+
self.checkout_one_sample(sample)
55+
56+
def test_get_dict(self):
57+
dict_size = 1000
58+
word_dict = paddle.v2.dataset.wmt16.get_dict("en", dict_size, True)
59+
self.assertEqual(len(word_dict), dict_size)
60+
self.assertEqual(word_dict[0], "<s>")
61+
self.assertEqual(word_dict[1], "<e>")
62+
self.assertEqual(word_dict[2], "<unk>")
63+
64+
65+
if __name__ == "__main__":
66+
unittest.main()

0 commit comments

Comments
 (0)