Skip to content

Commit 3569466

Browse files
author
yi.wu
committed
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into dockerreadme
2 parents 5d0f9d3 + 4db0471 commit 3569466

File tree

23 files changed

+505
-229
lines changed

23 files changed

+505
-229
lines changed

.dockerignore

Lines changed: 0 additions & 1 deletion
This file was deleted.

.dockerignore

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
*.DS_Store
2+
build/
3+
*.user
4+
.vscode
5+
.idea
6+
.project
7+
.cproject
8+
.pydevproject
9+
Makefile
10+
.test_env/
11+
third_party/
12+
*~
13+
bazel-*
14+
15+
!build/*.deb

Dockerfile

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,20 +3,17 @@
33
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
44
MAINTAINER PaddlePaddle Authors <[email protected]>
55

6-
ARG DEBIAN_FRONTEND=noninteractive
76
ARG UBUNTU_MIRROR
87
RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi'
98

109
# ENV variables
1110
ARG BUILD_WOBOQ
12-
ARG BUILD_AND_INSTALL
1311
ARG WITH_GPU
1412
ARG WITH_AVX
1513
ARG WITH_DOC
1614
ARG WITH_STYLE_CHECK
1715

1816
ENV BUILD_WOBOQ=${BUILD_WOBOQ:-OFF}
19-
ENV BUILD_AND_INSTALL=${BUILD_AND_INSTALL:-OFF}
2017
ENV WITH_GPU=${WITH_AVX:-OFF}
2118
ENV WITH_AVX=${WITH_AVX:-ON}
2219
ENV WITH_DOC=${WITH_DOC:-OFF}
@@ -31,7 +28,7 @@ RUN apt-get update && \
3128
apt-get install -y wget unzip tar xz-utils bzip2 gzip coreutils && \
3229
apt-get install -y curl sed grep graphviz libjpeg-dev zlib1g-dev && \
3330
apt-get install -y python-numpy python-matplotlib gcc g++ gfortran && \
34-
apt-get install -y automake locales clang-format-3.8 && \
31+
apt-get install -y automake locales clang-format-3.8 swig && \
3532
apt-get clean -y
3633

3734
# git credential to skip password typing
@@ -51,8 +48,6 @@ RUN curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \
5148
cd cmake-3.4.1 && ./bootstrap && make -j `nproc` && make install && \
5249
cd .. && rm -rf cmake-3.4.1
5350

54-
RUN apt-get install -y swig
55-
5651
VOLUME ["/usr/share/nginx/html/data", "/usr/share/nginx/html/paddle"]
5752

5853
# Configure OpenSSH server. c.f. https://docs.docker.com/engine/examples/running_ssh_service

README.md

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33

44
[![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle)
5-
[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/)
6-
[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/cn/index.html)
5+
[![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/develop/doc/)
6+
[![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/doc_cn/)
77
[![Coverage Status](https://coveralls.io/repos/github/PaddlePaddle/Paddle/badge.svg?branch=develop)](https://coveralls.io/github/PaddlePaddle/Paddle?branch=develop)
88
[![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases)
99
[![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE)
@@ -59,36 +59,36 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl
5959
the capability of PaddlePaddle to make a huge impact for your product.
6060

6161
## Installation
62-
Check out the [Install Guide](http://paddlepaddle.org/doc/build/) to install from
63-
pre-built packages (**docker image**, **deb package**) or
64-
directly build on **Linux** and **Mac OS X** from the source code.
62+
63+
It is recommended to check out the
64+
[Docker installation guide](http://www.paddlepaddle.org/develop/doc/getstarted/build_and_install/docker_install_en.html)
65+
before looking into the
66+
[build from source guide](http://www.paddlepaddle.org/develop/doc/getstarted/build_and_install/build_from_source_en.html)
6567

6668
## Documentation
67-
Both [English Docs](http://paddlepaddle.org/doc/) and [Chinese Docs](http://paddlepaddle.org/doc_cn/) are provided for our users and developers.
6869

69-
- [Quick Start](http://paddlepaddle.org/doc/demo/quick_start/index_en) <br>
70-
You can follow the quick start tutorial to learn how use PaddlePaddle
71-
step-by-step.
70+
We provide [English](http://www.paddlepaddle.org/develop/doc/) and
71+
[Chinese](http://www.paddlepaddle.org/doc_cn/) documentation.
72+
73+
- [Deep Learning 101](http://book.paddlepaddle.org/index.en.html)
74+
75+
You might want to start from the this online interactive book that can run in Jupyter Notebook.
76+
77+
- [Distributed Training](http://www.paddlepaddle.org/develop/doc/howto/usage/cluster/cluster_train_en.html)
78+
79+
You can run distributed training jobs on MPI clusters.
80+
81+
- [Distributed Training on Kubernetes](http://www.paddlepaddle.org/develop/doc/howto/usage/k8s/k8s_en.html)
7282

73-
- [Example and Demo](http://paddlepaddle.org/doc/demo/) <br>
74-
We provide five demos, including: image classification, sentiment analysis,
75-
sequence to sequence model, recommendation, semantic role labeling.
83+
You can also run distributed training jobs on Kubernetes clusters.
7684

77-
- [Distributed Training](http://paddlepaddle.org/doc/cluster) <br>
78-
This system supports training deep learning models on multiple machines
79-
with data parallelism.
85+
- [Python API](http://www.paddlepaddle.org/develop/doc/api/index_en.html)
8086

81-
- [Python API](http://paddlepaddle.org/doc/ui/) <br>
82-
PaddlePaddle supports using either Python interface or C++ to build your
83-
system. We also use SWIG to wrap C++ source code to create a user friendly
84-
interface for Python. You can also use SWIG to create interface for your
85-
favorite programming language.
87+
Our new API enables much shorter programs.
8688

87-
- [How to Contribute](http://paddlepaddle.org/doc/build/contribute_to_paddle.html) <br>
88-
We sincerely appreciate your interest and contributions. If you would like to
89-
contribute, please read the contribution guide.
89+
- [How to Contribute](http://www.paddlepaddle.org/develop/doc/howto/dev/contribute_to_paddle_en.html)
9090

91-
- [Source Code Documents](http://paddlepaddle.org/doc/source/) <br>
91+
We appreciate your contributions!
9292

9393
## Ask Questions
9494

cmake/FindSphinx.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination )
7272
${source}
7373
${destination}
7474
COMMENT "Generating sphinx documentation: ${builder}"
75-
COMMAND cd ${destination} && ln -s ./index_*.html index.html
75+
COMMAND cd ${destination} && ln -sf ./index_*.html index.html
7676
)
7777

7878
set_property(

paddle/gserver/dataproviders/DataProvider.h

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -164,15 +164,6 @@ class DataBatch {
164164
argu.value = value;
165165
data_.push_back(argu);
166166
}
167-
/**
168-
* @brief Append user defined data
169-
* @param[in] ptr user defined data
170-
*/
171-
void appendUserDefinedPtr(UserDefinedVectorPtr ptr) {
172-
Argument argu;
173-
argu.udp = ptr;
174-
data_.push_back(argu);
175-
}
176167

177168
/*
178169
* @brief Append argument

paddle/gserver/layers/CostLayer.cpp

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,59 @@ void SumOfSquaresCostLayer::backwardImp(Matrix& output,
192192
outputG.sumOfSquaresBp(output, *label.value);
193193
}
194194

195+
//
196+
// class SmoothL1CostLayer
197+
//
198+
199+
REGISTER_LAYER(smooth_l1, SmoothL1CostLayer);
200+
201+
bool SmoothL1CostLayer::init(const LayerMap& layerMap,
202+
const ParameterMap& parameterMap) {
203+
return CostLayer::init(layerMap, parameterMap);
204+
}
205+
206+
void SmoothL1CostLayer::forwardImp(Matrix& output,
207+
Argument& label,
208+
Matrix& target) {
209+
MatrixPtr targetCpu, outputCpu, labelCpu;
210+
if (useGpu_) {
211+
targetCpu =
212+
Matrix::create(target.getHeight(), target.getWidth(), false, false);
213+
outputCpu =
214+
Matrix::create(output.getHeight(), output.getWidth(), false, false);
215+
labelCpu = Matrix::create(
216+
label.value->getHeight(), label.value->getWidth(), false, false);
217+
targetCpu->copyFrom(target);
218+
outputCpu->copyFrom(output);
219+
labelCpu->copyFrom(*label.value);
220+
targetCpu->smoothL1(*outputCpu, *(labelCpu));
221+
target.copyFrom(*targetCpu);
222+
} else {
223+
target.smoothL1(output, *label.value);
224+
}
225+
}
226+
227+
void SmoothL1CostLayer::backwardImp(Matrix& output,
228+
Argument& label,
229+
Matrix& outputG) {
230+
MatrixPtr outputGCpu, outputCpu, labelCpu;
231+
if (useGpu_) {
232+
outputGCpu =
233+
Matrix::create(outputG.getHeight(), outputG.getWidth(), false, false);
234+
outputCpu =
235+
Matrix::create(output.getHeight(), output.getWidth(), false, false);
236+
labelCpu = Matrix::create(
237+
label.value->getHeight(), label.value->getWidth(), false, false);
238+
outputGCpu->copyFrom(outputG);
239+
outputCpu->copyFrom(output);
240+
labelCpu->copyFrom(*label.value);
241+
outputGCpu->smoothL1Bp(*outputCpu, *labelCpu);
242+
outputG.copyFrom(*outputGCpu);
243+
} else {
244+
outputG.smoothL1Bp(output, *label.value);
245+
}
246+
}
247+
195248
//
196249
// class RankingCost
197250
//

paddle/gserver/layers/CostLayer.h

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,29 @@ class SumOfSquaresCostLayer : public CostLayer {
159159
Matrix& outputGrad) override;
160160
};
161161

162+
/**
163+
* This cost layer compute smooth L1 loss for real-valued regression
164+
* tasks.
165+
* \f[
166+
* L =
167+
* (output - label)^2 * 0.5 / -1 < (output - label) < 1 /
168+
* (output - label) - 0.5 / otherwise /
169+
* \f]
170+
*/
171+
class SmoothL1CostLayer : public CostLayer {
172+
public:
173+
explicit SmoothL1CostLayer(const LayerConfig& config) : CostLayer(config) {}
174+
175+
bool init(const LayerMap& layerMap,
176+
const ParameterMap& parameterMap) override;
177+
178+
void forwardImp(Matrix& output, Argument& label, Matrix& cost) override;
179+
180+
void backwardImp(Matrix& outputValue,
181+
Argument& label,
182+
Matrix& outputGrad) override;
183+
};
184+
162185
/**
163186
* A cost layer for learning to rank (LTR) task. This layer contains at leat
164187
* three inputs.

paddle/gserver/layers/SequencePoolLayer.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,16 @@ void SequencePoolLayer::forward(PassType passType) {
5656
CHECK_EQ(newBatchSize_, starts->getSize() - 1);
5757

5858
resetOutput(newBatchSize_, dim);
59-
if (type_) {
60-
CHECK(input.subSequenceStartPositions)
61-
<< "when trans_type = seq, input must hasSubseq";
62-
}
59+
6360
/* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq,
6461
* thus, in this case, output_ has no sequenceStartPositions.
6562
* If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this
6663
* case, we should compute the new sequenceStartPositions.
6764
*/
6865
if (type_) {
69-
output_.degradeSequence(input, useGpu_);
66+
CHECK(input.subSequenceStartPositions)
67+
<< "when trans_type = seq, input must hasSubseq";
68+
output_.degradeSequence(input);
7069
}
7170
}
7271

paddle/gserver/tests/test_LayerGrad.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1602,6 +1602,20 @@ TEST(Layer, PadLayer) {
16021602
}
16031603
}
16041604

1605+
TEST(Layer, smooth_l1) {
1606+
TestConfig config;
1607+
config.layerConfig.set_type("smooth_l1");
1608+
1609+
config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0});
1610+
config.inputDefs.push_back({INPUT_DATA_TARGET, "layer_1", 1, 0});
1611+
config.layerConfig.add_inputs();
1612+
config.layerConfig.add_inputs();
1613+
1614+
for (auto useGpu : {false, true}) {
1615+
testLayerGrad(config, "smooth_l1", 100, false, useGpu, false, 2.0);
1616+
}
1617+
}
1618+
16051619
int main(int argc, char** argv) {
16061620
testing::InitGoogleTest(&argc, argv);
16071621
initMain(argc, argv);

0 commit comments

Comments
 (0)