Skip to content

Commit c8dbf0d

Browse files
authored
fix typos (PaddlePaddle#76353)
* fix typos * fix typos
1 parent 494d73f commit c8dbf0d

7 files changed

+8
-12
lines changed

paddle/fluid/framework/ir/layer_norm_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ bool validateReduceOpAttrs(const Node* node,
7272
}
7373
for (size_t i = 1; i < dims.size(); ++i) {
7474
if (1 != dims[i] - dims[i - 1]) {
75-
LOG(WARNING) << "The LayerNorm dim of mean must be continuous";
75+
LOG(WARNING) << "The LayerNorm dim of mean must be continuous";
7676
return false;
7777
}
7878
}

paddle/fluid/framework/ir/preln_residual_bias_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ int PrelnResidualBiasFusePass::ApplyPattern(ir::Graph *graph,
217217
// elementwise1_out. This will lead to two or more PrelnResidualBias
218218
// patterns is found near elementwise1_out, and these patterns will interact
219219
// on each other, so we make below check to ensure only one
220-
// PrelnResidualBias pattern is delalted with.
220+
// PrelnResidualBias pattern is dealt with.
221221
for (auto op : elementwise1_out->inputs) {
222222
if (op->Name() == "fused_bias_dropout_residual_layer_norm") return;
223223
}

paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,7 @@ void PrelnSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
117117
"use_varseqlen, preln_embedding_eltwise_layernorm_fuse_pass, "
118118
"trt_multihead_matmul_fuse_pass, "
119119
"set pos_id, set mask_id, with_dynamic_shape. Stop this pass, "
120-
"please "
121-
"reconfig.";
120+
"please reconfig.";
122121
return;
123122
}
124123
int found_subgraph_count = 0;

paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ PDNode* BuildSeqExpandConcatPattern(PDPattern* pattern) {
3737
// concat output
3838

3939
// So the following variables will be removed:
40-
// sequence-expand output
41-
// sequence-expand output
40+
// sequence_expand output
41+
// sequence_expand output
4242

4343
// Three operators
4444
auto* sequence_expand0 = pattern->NewNode(

paddle/fluid/framework/ir/trt_cross_multihead_matmul_fuse_pass.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -311,11 +311,11 @@ int TrtCrossMultiHeadMatmulFusePass::BuildCrossFusion(
311311
int hidden_out = wq_tensor->dims()[1];
312312
int head_size = hidden_out / head_number;
313313
if (abs(scale_attr - 1.0f / sqrt(static_cast<float>(head_size))) > 1e-5) {
314-
VLOG(3) << "scale of muilthead matmul do not fit the requirement of "
314+
VLOG(3) << "scale of multihead matmul do not fit the requirement of "
315315
"flash attention plugin, Stop fusing.";
316316
return;
317317
}
318-
VLOG(5) << "trt cross attention get wq_tensor name = " << mul0_w->Name()
318+
VLOG(5) << "trt cross attention wq_tensor name = " << mul0_w->Name()
319319
<< "trt cross attention wk_tensor name = " << mul1_w->Name()
320320
<< "trt cross attention wv_tensor name = " << mul2_w->Name();
321321

paddle/fluid/framework/ir/trt_multihead_matmul_fuse_pass.cc

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,6 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
6262
Node* scale,
6363
Node* scale_out) {
6464
auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale"));
65-
// auto scale_bias = PADDLE_GET_CONST(float, scale->Op()->GetAttr("bias"));
66-
// bool after_scale =
67-
// PADDLE_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale"));
6865

6966
// create multihead
7067
OpDesc multihead_op_desc(mul0->Op()->Block());

paddle/fluid/framework/ir/trt_qk_multihead_matmul_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ PDNode* TrtQKMultiHeadMatmulPattern::operator()() {
228228
.LinksTo({matmul_qk_out_var});
229229
scale->LinksFrom({matmul_qk_out_var}).LinksTo({scale_out_var});
230230
softmax_qk->LinksFrom({scale_out_var}).LinksTo({softmax_qk_out_var});
231-
// V path
231+
// V path
232232
mul2->LinksFrom({input1, mul2_w_var}).LinksTo({mul2_out_var});
233233
elementwise2->LinksFrom({mul2_out_var, elementwise2_w})
234234
.LinksTo({elementwise2_out});

0 commit comments

Comments
 (0)