Skip to content

Commit a5734f7

Browse files
committed
Merge branch 'develop' into stride
2 parents d369577 + d94e1f5 commit a5734f7

File tree

20 files changed

+346
-65
lines changed

20 files changed

+346
-65
lines changed

doc/templates/conf.py.cn.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ extensions = [
5555
'sphinx.ext.napoleon',
5656
'sphinx.ext.graphviz'
5757
]
58+
mathjax_path="https://cdn.bootcss.com/mathjax/2.7.0/MathJax.js"
5859
table_styling_embed_css = True
5960

6061
autodoc_member_order = 'bysource'

paddle/gserver/layers/AgentLayer.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ void AgentLayer::forward(PassType passType) {
4242
// get Arguments from real layers
4343
if (numSamples_ > 0 && numSamples_ < realHeight) {
4444
if (realOutput.ids) {
45-
output_.ids->subVecFrom(*realOutput.ids, 0, numSamples_);
45+
output_.ids =
46+
IVector::create(realOutput.ids->getData(), numSamples_, useGpu_);
4647
} else {
4748
output_.subArgFrom(
4849
realOutput, /* offset */ 0, numSamples_, getSize(), useGpu_);

paddle/gserver/layers/ExpandConvBaseLayer.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,10 @@ void ExpandConvBaseLayer::expandOneFrame(MatrixPtr image,
107107
int channel = isDeconv_ ? numFilters_ : channels_[inIdx];
108108

109109
resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]);
110+
111+
CHECK_EQ(image->getWidth(),
112+
static_cast<size_t>(imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel));
113+
110114
real *imgData = image->getData() + startIdx * image->getWidth();
111115
MatrixPtr imageTmp =
112116
Matrix::create(imgData,

paddle/gserver/layers/HierarchicalSigmoidLayer.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ namespace paddle {
3636
* | |- 5
3737
* |
3838
* |-*- 0
39-
* |- 1
39+
* |- 1
4040
* @endcode
4141
*
4242
* where * indicates an internal node, and each leaf node represents a class.

paddle/scripts/docker/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ docker build -t paddle:dev --build-arg UBUNTU_MIRROR=mirror://mirrors.ubuntu.com
9494
Given the development image `paddle:dev`, the following command builds PaddlePaddle from the source tree on the development computer (host):
9595

9696
```bash
97-
docker run -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=OFF" -e "RUN_TEST=OFF" paddle:dev
97+
docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "WITH_TEST=OFF" -e "RUN_TEST=OFF" paddle:dev
9898
```
9999

100100
This command mounts the source directory on the host into `/paddle` in the container, so the default entry point of `paddle:dev`, `build.sh`, could build the source code with possible local changes. When it writes to `/paddle/build` in the container, it writes to `$PWD/build` on the host indeed.
@@ -110,7 +110,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF"
110110
- `WITH_AVX`: ***Required***. Set to "OFF" prevents from generating AVX instructions. If you don't know what is AVX, you might want to set "ON".
111111
- `WITH_TEST`: ***Optional, default OFF***. Build unit tests binaries. Once you've built the unit tests, you can run these test manually by the following command:
112112
```bash
113-
docker run -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" paddle:dev sh -c "cd /paddle/build; make coverall"
113+
docker run --rm -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" paddle:dev sh -c "cd /paddle/build; make coverall"
114114
```
115115
- `RUN_TEST`: ***Optional, default OFF***. Run unit tests after building. You can't run unit tests without building it.
116116

@@ -129,7 +129,7 @@ This production image is minimal -- it includes binary `paddle`, the shared libr
129129
Again the development happens on the host. Suppose that we have a simple application program in `a.py`, we can test and run it using the production image:
130130

131131
```bash
132-
docker run -it -v $PWD:/work paddle /work/a.py
132+
docker run --rm -it -v $PWD:/work paddle /work/a.py
133133
```
134134

135135
But this works only if all dependencies of `a.py` are in the production image. If this is not the case, we need to build a new Docker image from the production image and with more dependencies installs.

python/CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,9 @@ add_custom_target(paddle_python ALL DEPENDS
2424
${OUTPUT_DIR}/.timestamp)
2525

2626
add_subdirectory(paddle/trainer_config_helpers/tests)
27-
add_subdirectory(paddle/v2/reader/tests)
2827
add_subdirectory(paddle/v2/tests)
28+
add_subdirectory(paddle/v2/reader/tests)
29+
add_subdirectory(paddle/v2/plot/tests)
2930

3031
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/dist/
3132
DESTINATION opt/paddle/share/wheels

python/paddle/v2/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,19 +21,22 @@
2121
import topology
2222
import data_feeder
2323
import networks
24+
import evaluator
2425
from . import dataset
2526
from . import reader
27+
from . import plot
2628
import attr
2729
import pooling
2830
import inference
2931
import networks
3032
import py_paddle.swig_paddle as api
3133
import minibatch
34+
import plot
3235

3336
__all__ = [
3437
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
3538
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
36-
'topology', 'networks', 'infer'
39+
'topology', 'networks', 'infer', 'plot', 'evaluator'
3740
]
3841

3942

python/paddle/v2/config_base.py

Lines changed: 54 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -65,13 +65,27 @@ class Layer(object):
6565
def __init__(self, name=None, parent_layers=None):
6666
assert isinstance(parent_layers, dict)
6767
self.name = name
68-
self.__contex__ = {}
68+
self.__context__ = {}
6969
self.__parent_layers__ = parent_layers
70+
self.__children_layers__ = [] # used for evaluator.
71+
72+
def append_child(self, layer, parent_names):
73+
self.__children_layers__.append((layer, parent_names))
7074

7175
def to_proto(self, context):
7276
"""
7377
function to set proto attribute
7478
"""
79+
self.__context__ = context
80+
81+
# short cut if myself is parsed before.
82+
if self.context_name() in context:
83+
if self.use_context_name():
84+
return context[self.context_name()]
85+
else:
86+
return context[self.name]
87+
88+
# parse parent before myself
7589
kwargs = dict()
7690
for layer_name in self.__parent_layers__:
7791
if not isinstance(self.__parent_layers__[layer_name],
@@ -83,12 +97,29 @@ def to_proto(self, context):
8397
self.__parent_layers__[layer_name])
8498
kwargs[layer_name] = v1_layer
8599

100+
# parse myself.
101+
ret_val = self.to_proto_impl(**kwargs)
102+
103+
if self.context_name() is not None and \
104+
self.context_name() not in context:
105+
context[self.context_name()] = ret_val
106+
107+
# parse children.
108+
for layer, pnames in self.__children_layers__:
109+
drop = False
110+
111+
# child will only be parsed if all parents are in context.
112+
for pname in pnames:
113+
if pname not in context:
114+
drop = True
115+
break
116+
if drop:
117+
continue
118+
layer.to_proto(context=context)
119+
86120
if self.context_name() is None:
87-
return self.to_proto_impl(**kwargs)
88-
elif self.context_name() not in context:
89-
context[self.context_name()] = self.to_proto_impl(**kwargs)
90-
self.__contex__ = context
91-
if self.use_context_name():
121+
return ret_val
122+
elif self.use_context_name():
92123
return context[self.context_name()]
93124
else:
94125
return context[self.name]
@@ -113,10 +144,13 @@ def calculate_size(self):
113144
this layer is called.
114145
:return:
115146
"""
116-
return self.__contex__[self.context_name()].size
147+
return self.__context__[self.context_name()].size
117148

118149

119-
def __convert_to_v2__(method_name, parent_names, is_default_name=True):
150+
def __convert_to_v2__(method_name,
151+
parent_names,
152+
is_default_name=True,
153+
attach_parent=False):
120154
if is_default_name:
121155
wrapper = wrap_name_default(name_prefix=method_name)
122156
else:
@@ -129,9 +163,20 @@ def __init__(self, **kwargs):
129163
parent_layers = dict()
130164
other_kwargs = dict()
131165
for pname in parent_names:
132-
if kwargs.has_key(pname):
166+
if pname in kwargs:
133167
parent_layers[pname] = kwargs[pname]
134168

169+
if attach_parent:
170+
pnames = [x.context_name() for x in parent_layers.values()]
171+
172+
for pname in parent_layers:
173+
layers = kwargs[pname]
174+
if not isinstance(layers, collections.Sequence):
175+
layers = [layers]
176+
177+
for layer in layers:
178+
layer.append_child(self, pnames)
179+
135180
for key in kwargs.keys():
136181
if key not in parent_names:
137182
other_kwargs[key] = kwargs[key]

python/paddle/v2/dataset/wmt14.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,10 @@
1515
wmt14 dataset
1616
"""
1717
import tarfile
18+
import gzip
1819

1920
from paddle.v2.dataset.common import download
21+
from paddle.v2.parameters import Parameters
2022

2123
__all__ = ['train', 'test', 'build_dict']
2224

@@ -25,6 +27,9 @@
2527
# this is a small set of data for test. The original data is too large and will be add later.
2628
URL_TRAIN = 'http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz'
2729
MD5_TRAIN = 'a755315dd01c2c35bde29a744ede23a6'
30+
# this is the pretrained model, whose bleu = 26.92
31+
URL_MODEL = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_14/wmt14_model.tar.gz'
32+
MD5_MODEL = '6b097d23e15654608c6f74923e975535'
2833

2934
START = "<s>"
3035
END = "<e>"
@@ -103,5 +108,13 @@ def test(dict_size):
103108
download(URL_TRAIN, 'wmt14', MD5_TRAIN), 'test/test', dict_size)
104109

105110

111+
def model():
112+
tar_file = download(URL_MODEL, 'wmt14', MD5_MODEL)
113+
with gzip.open(tar_file, 'r') as f:
114+
parameters = Parameters.from_tar(f)
115+
return parameters
116+
117+
106118
def fetch():
107119
download(URL_TRAIN, 'wmt14', MD5_TRAIN)
120+
download(URL_MODEL, 'wmt14', MD5_MODEL)

python/paddle/v2/evaluator.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import paddle.trainer_config_helpers.evaluators as evs
16+
import inspect
17+
from config_base import __convert_to_v2__
18+
19+
__all__ = []
20+
21+
22+
def initialize():
23+
def convert_to_new_name(nm):
24+
return nm[:-len("_evaluator")]
25+
26+
for __ev_name__ in filter(lambda x: x.endswith('_evaluator'), evs.__all__):
27+
__ev__ = getattr(evs, __ev_name__)
28+
if hasattr(__ev__, 'argspec'):
29+
argspec = __ev__.argspec
30+
else:
31+
argspec = inspect.getargspec(__ev__)
32+
parent_names = filter(lambda x: x in ['input', 'label', 'weight'],
33+
argspec.args)
34+
v2_ev = __convert_to_v2__(
35+
__ev_name__,
36+
parent_names=parent_names,
37+
is_default_name='name' in argspec.args,
38+
attach_parent=True)
39+
40+
__new_name__ = convert_to_new_name(__ev_name__)
41+
42+
globals()[__new_name__] = v2_ev
43+
globals()[__new_name__].__name__ = __new_name__
44+
__all__.append(__new_name__)
45+
46+
47+
initialize()

0 commit comments

Comments
 (0)