Skip to content

Commit a6ad9a1

Browse files
author
xuwei06
committed
Fix unittest
Change-Id: Ic80845c892c96c37a0df0ddc433fe1aeaa5a9d1c
1 parent bf6f690 commit a6ad9a1

File tree

4 files changed

+72
-15
lines changed

4 files changed

+72
-15
lines changed

paddle/gserver/layers/CostLayer.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -605,7 +605,7 @@ class SumCostLayer : public Layer {
605605
int batchSize = input->getHeight();
606606
int size = 1;
607607
resizeOutput(batchSize, size);
608-
output_.value->sumRows(*input);
608+
output_.value->sumRows(*input, /* scaleSum= */1, /* scaleDest= */0);
609609
}
610610

611611
virtual void backward(const UpdateCallback& callback = nullptr) {

paddle/math/BaseMatrix.cu

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1473,6 +1473,21 @@ int BaseMatrixT<real>::applyRow(Agg agg, Saver sv, BaseMatrixT& b) {
14731473
return 0;
14741474
}
14751475

1476+
template<>
1477+
template <class Agg>
1478+
int BaseMatrixT<real>::applyRow(
1479+
Agg agg, real scaleDest, real scaleAgg, BaseMatrixT& b) {
1480+
if (scaleDest != 0) {
1481+
applyRow(agg, base::binary::add2(scaleDest, scaleAgg), b);
1482+
} else {
1483+
applyRow(agg, base::binary::second(), b);
1484+
if (scaleAgg != 1) {
1485+
mulScalar(scaleAgg);
1486+
}
1487+
}
1488+
return 0;
1489+
}
1490+
14761491
template<>
14771492
template <class Agg, class Op, class Saver>
14781493
int BaseMatrixT<real>::applyRow(Agg agg, Op op, Saver sv,
@@ -1490,6 +1505,21 @@ int BaseMatrixT<real>::applyRow(Agg agg, Op op, Saver sv,
14901505
return 0;
14911506
}
14921507

1508+
template<>
1509+
template <class Agg, class Op>
1510+
int BaseMatrixT<real>::applyRow(Agg agg, Op op, real scaleDest, real scaleAgg,
1511+
BaseMatrixT& b, BaseMatrixT& c) {
1512+
if (scaleDest != 0) {
1513+
applyRow(agg, op, base::binary::add2(scaleDest, scaleAgg), b, c);
1514+
} else {
1515+
applyRow(agg, op, base::binary::second(), b, c);
1516+
if (scaleAgg != 1) {
1517+
mulScalar(scaleAgg);
1518+
}
1519+
}
1520+
return 0;
1521+
}
1522+
14931523
template<>
14941524
template <class Agg>
14951525
int BaseMatrixT<real>::applyCol(Agg agg, BaseMatrixT& b) {
@@ -1518,9 +1548,24 @@ int BaseMatrixT<real>::applyCol(Agg agg, Saver sv, BaseMatrixT& b) {
15181548
return 0;
15191549
}
15201550

1551+
template<>
1552+
template <class Agg>
1553+
int BaseMatrixT<real>::applyCol(
1554+
Agg agg, real scaleDest, real scaleAgg, BaseMatrixT& b) {
1555+
if (scaleDest != 0) {
1556+
applyCol(agg, base::binary::add2(scaleDest, scaleAgg), b);
1557+
} else {
1558+
applyCol(agg, base::binary::second(), b);
1559+
if (scaleAgg != 1) {
1560+
mulScalar(scaleAgg);
1561+
}
1562+
}
1563+
return 0;
1564+
}
1565+
15211566
template<>
15221567
void BaseMatrixT<real>::sumRows(BaseMatrixT& b, real scaleSum, real scaleDest) {
1523-
applyRow(aggregate::sum(), base::binary::add2(scaleDest, scaleSum), b);
1568+
applyRow(aggregate::sum(), scaleDest, scaleSum, b);
15241569
}
15251570

15261571
template<>
@@ -1550,21 +1595,21 @@ void BaseMatrixT<real>::minCols(BaseMatrixT& b) {
15501595

15511596
template<>
15521597
void BaseMatrixT<real>::sumCols(BaseMatrixT& b, real scaleSum, real scaleDest) {
1553-
applyCol(aggregate::sum(), base::binary::add2(scaleDest, scaleSum), b);
1598+
applyCol(aggregate::sum(), scaleDest, scaleSum, b);
15541599
}
15551600

15561601
template<>
15571602
void BaseMatrixT<real>::sumOfSquaredDiffs(
15581603
BaseMatrixT& b, BaseMatrixT& c, real scaleSum, real scaleDest) {
15591604
applyRow(aggregate::sum(), base::binary::squaredDiff(),
1560-
base::binary::add2(scaleDest, scaleSum), b, c);
1605+
scaleDest, scaleSum, b, c);
15611606
}
15621607

15631608
template<>
15641609
void BaseMatrixT<real>::sumOfProducts(
15651610
BaseMatrixT& b, BaseMatrixT& c, real scaleSum, real scaleDest) {
15661611
applyRow(aggregate::sum(), base::binary::mul(),
1567-
base::binary::add2(scaleDest, scaleSum), b, c);
1612+
scaleDest, scaleSum, b, c);
15681613
}
15691614

15701615
template class BaseMatrixT<real>;

paddle/math/BaseMatrix.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -317,6 +317,11 @@ class BaseMatrixT {
317317
template <class Agg, class Op, class Saver>
318318
int applyRow(Agg agg, Op op, Saver sv, BaseMatrixT& b, BaseMatrixT& c);
319319

320+
// Same as the above with the special handing of sv=add2(scaleDest, scaleAgg)
321+
template <class Agg, class Op>
322+
int applyRow(Agg agg, Op op, real scaleDest, real scaleAgg,
323+
BaseMatrixT& b, BaseMatrixT& c);
324+
320325
/**
321326
* a aggregate expression that apply each row of matrix b.
322327
*
@@ -329,6 +334,10 @@ class BaseMatrixT {
329334
template <class Agg, class Saver>
330335
int applyRow(Agg agg, Saver sv, BaseMatrixT& b);
331336

337+
// Same as the above with the special handing of sv=add2(scaleDest, scaleAgg)
338+
template <class Agg>
339+
int applyRow(Agg agg, real scaleDest, real scaleAgg, BaseMatrixT& b);
340+
332341
/**
333342
* a aggregate expression that apply each column of matrix b.
334343
*
@@ -352,6 +361,10 @@ class BaseMatrixT {
352361
template <class Agg, class Saver>
353362
int applyCol(Agg agg, Saver sv, BaseMatrixT& b);
354363

364+
// Same as the above with the special handing of sv=add2(scaleDest, scaleAgg)
365+
template <class Agg>
366+
int applyCol(Agg agg, real scaleDest, real scaleAgg, BaseMatrixT& b);
367+
355368
bool useGpu() const { return useGpu_; }
356369

357370
const T* rowBuf(size_t row) const { return data_ + width_ * row; }

python/paddle/trainer_config_helpers/layers.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
import pickle
3030
import copy
3131

32-
<<<<<<< 0ba0f02c685e52b14632f6b9bfca4321494505c7
3332
__all__ = [
3433
"full_matrix_projection",
3534
"AggregateLevel",
@@ -1456,11 +1455,11 @@ def bilinear_interp_layer(input,
14561455
.. code-block:: python
14571456
14581457
bilinear = bilinear_interp_layer(input=layer1, out_size_x=64, out_size_y=64)
1459-
1458+
14601459
:param input: A input layer.
14611460
:type input: LayerOutput.
14621461
:param out_size_x: bilinear interpolation output width.
1463-
:type out_size_x: int|None
1462+
:type out_size_x: int|None
14641463
:param out_size_y: bilinear interpolation output height.
14651464
:type out_size_y: int|None
14661465
:param name: The layer's name, which cna not be specified.
@@ -1772,11 +1771,11 @@ def img_conv_layer(input,
17721771
The details of convolution layer, please refer UFLDL's `convolution
17731772
<http://ufldl.stanford.edu/tutorial/supervised/
17741773
FeatureExtractionUsingConvolution/>`_ .
1775-
1776-
Convolution Transpose (deconv) layer for image. Paddle only support square
1774+
1775+
Convolution Transpose (deconv) layer for image. Paddle only support square
17771776
input currently and thus input image's width equals height.
17781777
1779-
The details of convolution transpose layer,
1778+
The details of convolution transpose layer,
17801779
please refer to the following explanation and references therein
17811780
<http://datascience.stackexchange.com/questions/6107/
17821781
what-are-deconvolutional-layers/>`_ .
@@ -4422,7 +4421,7 @@ def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None):
44224421
44234422
.. code-block:: python
44244423
4425-
cost = cross_entropy(input=input_layer,
4424+
cost = cross_entropy(input=input_layer,
44264425
label=label_layer)
44274426
44284427
:param input: The first input layer.
@@ -4462,7 +4461,7 @@ def cross_entropy_with_selfnorm(input,
44624461
44634462
.. code-block:: python
44644463
4465-
cost = cross_entropy_with_selfnorm(input=input_layer,
4464+
cost = cross_entropy_with_selfnorm(input=input_layer,
44664465
label=label_layer)
44674466
44684467
:param input: The first input layer.
@@ -4532,7 +4531,7 @@ def huber_cost(input, label, name=None, coeff=1.0, layer_attr=None):
45324531
45334532
.. code-block:: python
45344533
4535-
cost = huber_cost(input=input_layer,
4534+
cost = huber_cost(input=input_layer,
45364535
label=label_layer)
45374536
45384537
:param input: The first input layer.
@@ -4572,7 +4571,7 @@ def multi_binary_label_cross_entropy(input,
45724571
45734572
.. code-block:: python
45744573
4575-
cost = multi_binary_label_cross_entropy(input=input_layer,
4574+
cost = multi_binary_label_cross_entropy(input=input_layer,
45764575
label=label_layer)
45774576
45784577
:param input: The first input layer.

0 commit comments

Comments
 (0)