Skip to content

Commit 94e38bb

Browse files
committed
Merge branch 'develop' of github.com:baidu/Paddle into memory.set_input
2 parents bd4ec1b + 53090e3 commit 94e38bb

File tree

6 files changed

+13
-11
lines changed

6 files changed

+13
-11
lines changed

cmake/FindSphinx.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination )
7272
${source}
7373
${destination}
7474
COMMENT "Generating sphinx documentation: ${builder}"
75-
COMMAND cd ${destination} && ln -s ./index_*.html index.html
75+
COMMAND cd ${destination} && ln -sf ./index_*.html index.html
7676
)
7777

7878
set_property(

paddle/gserver/layers/SequencePoolLayer.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,16 @@ void SequencePoolLayer::forward(PassType passType) {
5656
CHECK_EQ(newBatchSize_, starts->getSize() - 1);
5757

5858
resetOutput(newBatchSize_, dim);
59-
if (type_) {
60-
CHECK(input.subSequenceStartPositions)
61-
<< "when trans_type = seq, input must hasSubseq";
62-
}
59+
6360
/* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq,
6461
* thus, in this case, output_ has no sequenceStartPositions.
6562
* If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this
6663
* case, we should compute the new sequenceStartPositions.
6764
*/
6865
if (type_) {
69-
output_.degradeSequence(input, useGpu_);
66+
CHECK(input.subSequenceStartPositions)
67+
<< "when trans_type = seq, input must hasSubseq";
68+
output_.degradeSequence(input);
7069
}
7170
}
7271

paddle/parameter/Argument.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -583,7 +583,7 @@ void Argument::checkSubset() const {
583583
}
584584
}
585585

586-
void Argument::degradeSequence(const Argument& input, bool useGpu) {
586+
void Argument::degradeSequence(const Argument& input) {
587587
CHECK_EQ(input.hasSubseq(), 1UL);
588588
size_t numSequences = input.getNumSequences();
589589
size_t numSubSequences = input.getNumSubSequences();

paddle/parameter/Argument.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ struct Argument {
296296
/*
297297
sequence has sub-sequence degrades to a sequence.
298298
*/
299-
void degradeSequence(const Argument& input, bool useGpu);
299+
void degradeSequence(const Argument& input);
300300

301301
/**
302302
* @brief getValueString will return the argument's output in string. There

python/paddle/v2/dataset/wmt14.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
URL_DEV_TEST = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz'
2424
MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5'
2525
# this is a small set of data for test. The original data is too large and will be add later.
26-
URL_TRAIN = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz'
26+
URL_TRAIN = 'http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz'
2727
MD5_TRAIN = 'a755315dd01c2c35bde29a744ede23a6'
2828

2929
START = "<s>"

python/paddle/v2/tests/test_layer.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,9 @@
2222

2323
pixel = layer.data(name='pixel', type=data_type.dense_vector(128))
2424
label = layer.data(name='label', type=data_type.integer_value(10))
25-
weight = layer.data(name='weight', type=data_type.dense_vector(10))
25+
weight = layer.data(name='weight', type=data_type.dense_vector(1))
26+
combine_weight = layer.data(
27+
name='weight_combine', type=data_type.dense_vector(10))
2628
score = layer.data(name='score', type=data_type.dense_vector(1))
2729

2830
hidden = layer.fc(input=pixel,
@@ -81,7 +83,8 @@ def test_aggregate_layer(self):
8183
class MathLayerTest(unittest.TestCase):
8284
def test_math_layer(self):
8385
addto = layer.addto(input=[pixel, pixel])
84-
linear_comb = layer.linear_comb(weights=weight, vectors=hidden, size=10)
86+
linear_comb = layer.linear_comb(
87+
weights=combine_weight, vectors=hidden, size=10)
8588
interpolation = layer.interpolation(
8689
input=[hidden, hidden], weight=score)
8790
bilinear = layer.bilinear_interp(input=conv, out_size_x=4, out_size_y=4)

0 commit comments

Comments
 (0)