Skip to content

Commit 3beafff

Browse files
authored
Merge pull request #8415 from emailweixu/print_op
Make print_op able to show the value of bool tensor
2 parents fcadb45 + 004df46 commit 3beafff

File tree

4 files changed

+25
-24
lines changed

4 files changed

+25
-24
lines changed

paddle/fluid/operators/elementwise_op_function.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -314,7 +314,6 @@ EIGEN_FUNCTOR(Div, EIGEN_DIV);
314314
template <typename DeviceContext, typename T, typename functor,
315315
typename broadcastfunctor, typename broadcast2functor>
316316
void ElementwiseGradCompute(const framework::ExecutionContext& ctx,
317-
318317
const framework::Tensor* x,
319318
const framework::Tensor* y,
320319
const framework::Tensor* out,

paddle/fluid/operators/print_op.cc

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ struct Formater {
4646
}
4747

4848
private:
49-
void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message; }
49+
void PrintMessage() { CLOG << std::time(nullptr) << "\t" << message << "\t"; }
5050
void PrintName() {
5151
if (!name.empty()) {
5252
CLOG << "Tensor[" << name << "]" << std::endl;
@@ -85,15 +85,16 @@ struct Formater {
8585
// print float
8686
if (dtype.hash_code() == typeid(float).hash_code()) {
8787
Display<float>(size);
88-
}
89-
if (dtype.hash_code() == typeid(double).hash_code()) {
88+
} else if (dtype.hash_code() == typeid(double).hash_code()) {
9089
Display<double>(size);
91-
}
92-
if (dtype.hash_code() == typeid(int).hash_code()) {
90+
} else if (dtype.hash_code() == typeid(int).hash_code()) {
9391
Display<int>(size);
94-
}
95-
if (dtype.hash_code() == typeid(int64_t).hash_code()) {
92+
} else if (dtype.hash_code() == typeid(int64_t).hash_code()) {
9693
Display<int64_t>(size);
94+
} else if (dtype.hash_code() == typeid(bool).hash_code()) {
95+
Display<bool>(size);
96+
} else {
97+
CLOG << "\tdata: unprintable type: " << dtype.name() << std::endl;
9798
}
9899
}
99100

@@ -182,6 +183,7 @@ class TensorPrintOp : public framework::OperatorBase {
182183
}
183184

184185
Formater formater;
186+
formater.message = Attr<std::string>("message");
185187
if (Attr<bool>("print_tensor_name")) {
186188
formater.name = printed_var_name;
187189
}

python/paddle/v2/fluid/layers/control_flow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def Print(input,
174174
print_tensor_type (bool): Print the tensor type.
175175
print_tensor_shape (bool): Print the tensor shape.
176176
print_tensor_lod (bool): Print the tensor lod.
177-
print_phase (bool): Which phase to displace, including 'forward',
177+
print_phase (str): Which phase to displace, including 'forward',
178178
'backward' and 'both'. If set to 'backward' or 'both', will
179179
print the gradients of input tensor.
180180

python/paddle/v2/fluid/layers/nn.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1579,7 +1579,7 @@ def layer_norm(input,
15791579
"""
15801580
**Layer Normalization**
15811581
1582-
Assume feature vectors exist on dimensions
1582+
Assume feature vectors exist on dimensions
15831583
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
15841584
along these dimensions for each feature vector :math:`a` with size
15851585
:math:`H`, then normalize each feature vector using the corresponding
@@ -1600,13 +1600,13 @@ def layer_norm(input,
16001600
16011601
Args:
16021602
input(Variable): The input tensor variable.
1603-
scale(bool): Whether to learn the adaptive gain :math:`g` after
1603+
scale(bool): Whether to learn the adaptive gain :math:`g` after
16041604
normalization.
1605-
shift(bool): Whether to learn the adaptive bias :math:`b` after
1605+
shift(bool): Whether to learn the adaptive bias :math:`b` after
16061606
normalization.
1607-
begin_norm_axis(bool): The normalization will be performed along
1607+
begin_norm_axis(bool): The normalization will be performed along
16081608
dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
1609-
epsilon(float): The small value added to the variance to prevent
1609+
epsilon(float): The small value added to the variance to prevent
16101610
division by zero.
16111611
param_attr(ParamAttr|None): The parameter attribute for the learnable
16121612
gain :math:`g`.
@@ -2070,7 +2070,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
20702070
Tensor variable with a single element, otherwise must be in the
20712071
range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`,
20722072
the dimension to reduce is :math:`rank + dim`.
2073-
keep_dim (bool): Whether to reserve the reduced dimension in the
2073+
keep_dim (bool|False): Whether to reserve the reduced dimension in the
20742074
output Tensor. The result tensor will have one fewer dimension
20752075
than the :attr:`input` unless :attr:`keep_dim` is true.
20762076
name(str|None): A name for this layer(optional). If set None, the layer
@@ -3098,33 +3098,33 @@ def multiplex(inputs, index):
30983098
def softmax_with_cross_entropy(logits, label, soft_label=False):
30993099
"""
31003100
**Softmax With Cross Entropy Operator.**
3101-
3101+
31023102
Cross entropy loss with softmax is used as the output layer extensively. This
31033103
operator computes the softmax normalized values for each row of the input
31043104
tensor, after which cross-entropy loss is computed. This provides a more
31053105
numerically stable gradient.
3106-
3106+
31073107
Because this operator performs a softmax on logits internally, it expects
31083108
unscaled logits. This operator should not be used with the output of
31093109
softmax operator since that would produce incorrect results.
3110-
3110+
31113111
When the attribute soft_label is set false, this operators expects mutually
31123112
exclusive hard labels, each sample in a batch is in exactly one class with a
31133113
probability of 1.0. Each sample in the batch will have a single label.
3114-
3114+
31153115
The equation is as follows:
3116-
3116+
31173117
1) Hard label (one-hot label, so every sample has exactly one class)
3118-
3118+
31193119
.. math::
31203120
31213121
loss_j = -\\text{logit}_{label_j} +
31223122
\\log\\left(\\sum_{i=0}^{K}\\exp(\\text{logit}_i)\\right), j = 1,..., K
3123-
3123+
31243124
2) Soft label (each sample can have a distribution over all classes)
31253125
31263126
.. math::
3127-
3127+
31283128
loss_j = -\\sum_{i=0}^{K}\\text{label}_i
31293129
\\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K}
31303130
\\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K
@@ -3169,7 +3169,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
31693169
The operator takes the first dimension of X and Y as batch size.
31703170
For each instance, it computes the smooth l1 loss element by element first
31713171
and then sums all the losses. So the shape of Out is [batch_size, 1].
3172-
3172+
31733173
Args:
31743174
x (Variable): A tensor with rank at least 2. The input value of smooth
31753175
l1 loss op with shape [batch_size, dim1, ..., dimN].

0 commit comments

Comments
 (0)