Skip to content

Commit e8f6488

Browse files
fix typo word (#22765)
1 parent fd94507 commit e8f6488

File tree

152 files changed

+519
-520
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

152 files changed

+519
-520
lines changed

paddle/fluid/framework/data_set.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ class Dataset {
126126
virtual void DestroyPreLoadReaders() = 0;
127127
// set preload thread num
128128
virtual void SetPreLoadThreadNum(int thread_num) = 0;
129-
// seperate train thread and dataset thread
129+
// separate train thread and dataset thread
130130
virtual void DynamicAdjustChannelNum(int channel_num) = 0;
131131
virtual void DynamicAdjustReadersNum(int thread_num) = 0;
132132
// set fleet send sleep seconds

paddle/fluid/framework/details/build_strategy.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,10 +132,10 @@ struct BuildStrategy {
132132
// The picture is here:
133133
// https://github.com/PaddlePaddle/Paddle/pull/17263#discussion_r285411396
134134
bool use_hierarchical_allreduce_{false};
135-
// Nccl ranks in a node when use hierarchical allreduce, it's setted to gpu
135+
// Nccl ranks in a node when use hierarchical allreduce, it's set to gpu
136136
// cards' number in most cases.
137137
size_t hierarchical_allreduce_inter_nranks_{0};
138-
// Nccl ranks bewteen nodes when use hierarchical allreduce, it's setted to
138+
// Nccl ranks bewteen nodes when use hierarchical allreduce, it's set to
139139
// nodes number.
140140
size_t hierarchical_allreduce_exter_nranks_{0};
141141

paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ namespace ir {
3333
GET_IR_NODE(act_op); \
3434
GET_IR_NODE(act_out);
3535

36-
// Inherient the basic infomation from `base_desc`, and modify some fields.
36+
// Inherient the basic information from `base_desc`, and modify some fields.
3737
framework::proto::OpDesc PrepareOpDesc(
3838
const framework::proto::OpDesc& base_desc, const std::string& bias,
3939
const std::string& bias1, const std::string& activation,

paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ namespace ir {
3131
GET_IR_NODE(act_op); \
3232
GET_IR_NODE(act_out);
3333

34-
// Inherient the basic infomation from `base_desc`, and modify some fields.
34+
// Inherient the basic information from `base_desc`, and modify some fields.
3535
framework::proto::OpDesc PrepareOpDesc(
3636
const framework::proto::OpDesc& base_desc, const std::string& bias,
3737
const std::string& activation, const std::string& output) {

paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,7 @@ const VarDesc *FuseOptimizerOpPass::GetVarDescFromVarsInfo(
382382
const std::string &var_name) const {
383383
auto grad_iter = vars_info.find(var_name);
384384
PADDLE_ENFORCE_EQ(grad_iter != vars_info.end(), true,
385-
"The gradient varibale %s is not found.", var_name);
385+
"The gradient variable %s is not found.", var_name);
386386
PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true,
387387
"The gradient var node %s is not found.", var_name);
388388
PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var(),

paddle/fluid/framework/ir/graph_pattern_detector.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ bool GraphPatternDetector::MarkPDNodesInGraph(const ir::Graph &graph) {
131131
}
132132

133133
// The intermediate Nodes can only link to the nodes inside the pattern, or this
134-
// subgraph will be droped.
134+
// subgraph will be dropped.
135135
void GraphPatternDetector::ValidateByNodeRole(
136136
std::vector<GraphPatternDetector::subgraph_t> *subgraphs) {
137137
std::vector<GraphPatternDetector::subgraph_t> result;

paddle/fluid/framework/ir/multi_batch_merge_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const {
179179
ir::Node* var = nullptr;
180180
auto updated_var = UpdateGradVarDesc(in_node->Var(), i, grad_names,
181181
bn_vars_need_rename);
182-
// should be initialized by startup, how to initilize tensor in the
182+
// should be initialized by startup, how to initialize tensor in the
183183
// scope?
184184
if (node->Name() == "batch_norm" &&
185185
bn_vars_need_rename.find(in_node->Name()) !=

paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1041,7 +1041,7 @@ void DistSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
10411041
// There are 4 conditions:
10421042
// 1. GPU && Reduce: Reduce gradient then broadcast gradient to other GPUS.
10431043
// Need to broadcast received parameters to other GPU.
1044-
// 2. GPU && AllReduce: AllReduce all graident to each GPU. Need to
1044+
// 2. GPU && AllReduce: AllReduce all gradient to each GPU. Need to
10451045
// broadcast received parameters to other GPU.
10461046
// 3. CPU && AllReduce: AllReduce all gradient to each thread. Need to
10471047
// broadcast received parameters to other scope.

paddle/fluid/framework/op_desc.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ class CompileTimeInferShapeContext : public InferShapeContext {
8080
PADDLE_ENFORCE_EQ(
8181
in_var_names.size(), out_var_names.size(),
8282
platform::errors::PreconditionNotMet(
83-
"Op [%s]: Input var number shoule be equal with output var number",
83+
"Op [%s]: Input var number should be equal with output var number",
8484
op_.Type()));
8585

8686
for (size_t i = 0; i < in_var_names.size(); ++i) {
@@ -663,7 +663,7 @@ void OpDesc::Flush() {
663663

664664
void OpDesc::CheckAttrs() {
665665
PADDLE_ENFORCE(!Type().empty(),
666-
"CheckAttr() can not be called before type is setted.");
666+
"CheckAttr() can not be called before type is set.");
667667
auto *checker = OpInfoMap::Instance().Get(Type()).Checker();
668668
if (checker == nullptr) {
669669
// checker is not configured. That operator could be generated by Paddle,
@@ -706,7 +706,7 @@ void OpDesc::InferShape(const BlockDesc &block) const {
706706
void OpDesc::InferVarType(BlockDesc *block) const {
707707
// There are a few places that var type can be set.
708708
// When VarDesc is created, default set to LOD_TENSOR.
709-
// When output variable is created, default is defaut set to LOD_TENSOR.
709+
// When output variable is created, default is default set to LOD_TENSOR.
710710
// We limit here to be the only place that operator defines its customized
711711
// var type inference. Hence, we don't do any "default" setting here.
712712
auto &info = OpInfoMap::Instance().Get(this->Type());

paddle/fluid/framework/operator.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -648,7 +648,7 @@ class RuntimeInferShapeContext : public InferShapeContext {
648648
PADDLE_ENFORCE_EQ(
649649
in_var_list.size(), out_var_list.size(),
650650
platform::errors::PreconditionNotMet(
651-
"Op [%s]: Input var size should be equal with ouput var size",
651+
"Op [%s]: Input var size should be equal with output var size",
652652
op_.Type()));
653653

654654
auto& out_var_names = op_.Outputs(out);

0 commit comments

Comments
 (0)