Skip to content

Commit 937d4d7

Browse files
committed
refine error message
1 parent 9df3eef commit 937d4d7

File tree

9 files changed

+21
-21
lines changed

9 files changed

+21
-21
lines changed

paddle/cinn/runtime/cuda/cuda_util.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1841,7 +1841,7 @@ void cinn_call_cholesky_nvgpu(void *v_args,
18411841
PADDLE_ENFORCE_EQ(host_info[i],
18421842
0,
18431843
::common::errors::PreconditionNotMet(
1844-
"Cholesky decomposition fail, please check the %d"
1844+
"Cholesky decomposition fail, please check the %d "
18451845
"th input matrix.",
18461846
i + 1));
18471847
}

paddle/fluid/framework/ir/layer_norm_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ bool validateReduceOpAttrs(const Node* node,
5757
EXPECT_TRUE(
5858
!PADDLE_GET_CONST(bool, op->GetAttr("reduce_all")),
5959
::paddle::string::Sprintf(
60-
"The LayerNorm fusion %s"
60+
"The LayerNorm fusion %s "
6161
"reduction must have \'reduce_all\' attribute set to false.",
6262
name));
6363
}

paddle/fluid/inference/tensorrt/op_teller.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1420,7 +1420,7 @@ struct SimpleOpTypeSetTeller : public Teller {
14201420
#endif
14211421
if (dtype != -1 && dtype != 2 && dtype != 3 && dtype != 5 && dtype != 6) {
14221422
VLOG(3)
1423-
<< "the fill_any_like only supports int32/int64/float32/float64 by"
1423+
<< "the fill_any_like only supports int32/int64/float32/float64 by "
14241424
"trt8.4 below";
14251425
return false;
14261426
}

paddle/fluid/operators/fused/fused_adam_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ class FusedAdamOpMaker : public framework::OpProtoAndCheckerMaker {
115115
.SetDefault(0);
116116
AddAttr<bool>("use_adamw",
117117
"(bool, default False) "
118-
"Whether to use AdamW"
118+
"Whether to use AdamW. "
119119
"True for decoupled weight decay")
120120
.SetDefault(false);
121121
AddAttr<bool>("multi_precision",

paddle/fluid/operators/fused/fused_attention_op.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -124,9 +124,9 @@ class FusedAttentionOp : public framework::OperatorWithKernel {
124124
PADDLE_ENFORCE_EQ(y_dim.size(),
125125
2,
126126
common::errors::InvalidArgument(
127-
"The dimensions of qkv_weight must be 2 if enable"
128-
"transpose_qkv_wb: (dim_embed, 3 * dim_embed),"
129-
"but received dimensions of"
127+
"The dimensions of qkv_weight must be 2 if enable "
128+
"transpose_qkv_wb: (dim_embed, 3 * dim_embed), "
129+
"but received dimensions of "
130130
"Input is [%d]",
131131
y_dim.size()));
132132
PADDLE_ENFORCE_GT(num_heads,
@@ -159,7 +159,7 @@ class FusedAttentionOp : public framework::OperatorWithKernel {
159159
PADDLE_ENFORCE_EQ(y_dim.size(),
160160
4,
161161
common::errors::InvalidArgument(
162-
"The dimensions of qkv_weight must be 4 if not"
162+
"The dimensions of qkv_weight must be 4 if not "
163163
"enable transpose_qkv_wb: (3, num_head, dim_head, "
164164
"dim_embed), but received [%d]",
165165
y_dim.size()));
@@ -186,8 +186,8 @@ class FusedAttentionOp : public framework::OperatorWithKernel {
186186
x_dim.size(),
187187
3,
188188
common::errors::InvalidArgument("The dimensions of x must be 3"
189-
"(batch_size, seq_len, dim_embed),"
190-
"but received dimensions of"
189+
"(batch_size, seq_len, dim_embed), "
190+
"but received dimensions of "
191191
"Input is [%d]",
192192
x_dim.size()));
193193

@@ -431,7 +431,7 @@ class FusedAttentionOpMaker : public framework::OpProtoAndCheckerMaker {
431431
"attn_dropout_implementation",
432432
"[\"downgrade_in_infer\"|\"upscale_in_train\"]"
433433
"There are two kinds of ways to implement dropout"
434-
"(the mask below is a tensor have the same shape with input"
434+
"(the mask below is a tensor have the same shape with input, "
435435
"the value of mask is 0 or 1, the ratio of 0 is dropout_rate)"
436436
"1. downgrade_in_infer(default), downgrade the outcome at inference "
437437
"time"

paddle/fluid/operators/fused/fused_conv2d_op.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,13 @@ TODO: Documentation of conv2d op.
5353
protected:
5454
void Apply() {
5555
AddInput("Bias",
56-
"(Tensor) Bias to be added to each output of filter application."
57-
"The format of output tensor is X (one-dimensional) of size equal"
56+
"(Tensor) Bias to be added to each output of filter application. "
57+
"The format of output tensor is X (one-dimensional) of size equal "
5858
"to the number of output channels. Only used with MKL-DNN.")
5959
.AsDispensable();
6060
AddInput("ResidualData",
6161
"(Tensor) Tensor with residual data "
62-
"to which convolution output will be added."
62+
"to which convolution output will be added. "
6363
"Used with fuse_residual_connection fusion.")
6464
.AsDispensable();
6565
AddAttr<std::string>("fuse_activation",

paddle/fluid/operators/fused/fused_multi_transformer_int8_op.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,16 +72,16 @@ class FusedMultiTransformerINT8Op : public framework::OperatorWithKernel {
7272
x_dim.size(),
7373
3,
7474
common::errors::InvalidArgument("The dimensions of x must be 3"
75-
"(batch_size, seq_len, dim_embed),"
76-
"but received dimensions of"
75+
"(batch_size, seq_len, dim_embed), "
76+
"but received dimensions of "
7777
"Input is [%d]",
7878
x_dim.size()));
7979
PADDLE_ENFORCE_EQ(
8080
y_dim.size(),
8181
4,
8282
common::errors::InvalidArgument("The dimensions of qkv_weight must be 4"
83-
"(3, num_head, dim_head, dim_embed),"
84-
"but received dimensions of"
83+
"(3, num_head, dim_head, dim_embed), "
84+
"but received dimensions of "
8585
"Input is [%d]",
8686
y_dim.size()));
8787
PADDLE_ENFORCE_EQ(

paddle/fluid/operators/fused/multi_gru_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ void MultiGRUOp::InferShape(framework::InferShapeContext* ctx) const {
6464
wx_dims[i][0],
6565
x_mat_dims[1],
6666
common::errors::InvalidArgument(
67-
"The first dimension of flattened WeightX #%d"
67+
"The first dimension of flattened WeightX #%d "
6868
"should equal to last dimension of flattened input X, but "
6969
"received fattened WeightX dimension is:%d, flattened X dimension "
7070
"is:%d",
@@ -205,7 +205,7 @@ void MultiGRUOpMaker::Make() {
205205
"Number of stacked GRU layers.")
206206
.SetDefault(1);
207207
AddAttr<bool>("origin_mode",
208-
"bool"
208+
"bool "
209209
"use origin mode in article https://arxiv.org/abs/1412.3555")
210210
.SetDefault(false);
211211
AddAttr<std::string>(

paddle/fluid/operators/sequence_ops/sequence_mask_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker {
5050
AddInput("X", "The input tensor of sequence_mask op.");
5151
AddOutput("Y", "The output mask of sequence_mask op.");
5252
AddInput("MaxLenTensor",
53-
"Max length tensor"
53+
"Max length tensor "
5454
"have higher priority than maxlen attribute")
5555
.AsDispensable();
5656
AddAttr<int>("maxlen",

0 commit comments

Comments
 (0)