Skip to content

Commit 7244b2a

Browse files
Fix typos (#22799)
1 parent e8f6488 commit 7244b2a

File tree

73 files changed

+446
-189
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

73 files changed

+446
-189
lines changed

cmake/configure.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ if(WIN32)
4848
SET(CMAKE_C_RESPONSE_FILE_LINK_FLAG "@")
4949
SET(CMAKE_CXX_RESPONSE_FILE_LINK_FLAG "@")
5050

51-
# set defination for the dll export
51+
# set definition for the dll export
5252
if (NOT MSVC)
5353
message(FATAL "Windows build only support msvc. Which was binded by the nvcc compiler of NVIDIA.")
5454
endif(NOT MSVC)

cmake/third_party.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ if(${CMAKE_VERSION} VERSION_GREATER "3.5.2")
174174
set(SHALLOW_CLONE "GIT_SHALLOW TRUE") # adds --depth=1 arg to git clone of External_Projects
175175
endif()
176176

177-
########################### include third_party accoring to flags ###############################
177+
########################### include third_party according to flags ###############################
178178
include(external/zlib) # download, build, install zlib
179179
include(external/gflags) # download, build, install gflags
180180
include(external/glog) # download, build, install glog

paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -857,7 +857,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
857857
op_dev_id = GetVarDeviceID(node->inputs[0]->Name());
858858
PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
859859
"This hack no longer holds, please fix.");
860-
// the variable name which contains .block means it was splited by
860+
// the variable name which contains .block means it was split by
861861
// split_byref op
862862
if (strategy_.reduce_ ==
863863
details::BuildStrategy::ReduceStrategy::kAllReduce &&

paddle/fluid/framework/operator.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -981,7 +981,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
981981
}
982982

983983
if (!transfered_inplace_vars.empty()) {
984-
// there is inplace variable has been transfered.
984+
// there is inplace variable has been transferred.
985985
TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
986986
}
987987
if (FLAGS_enable_unused_var_check) {

paddle/fluid/framework/operator.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -517,7 +517,8 @@ class OperatorWithKernel : public OperatorBase {
517517
RuntimeContext* runtime_ctx) const;
518518

519519
/**
520-
* Transfer data from scope to a transfered scope. If there is no data need to
520+
* Transfer data from scope to a transferred scope. If there is no data need
521+
* to
521522
* be tranfered, it returns nullptr.
522523
*
523524
* * transfered_inplace_vars is a output vector.

paddle/fluid/framework/parallel_executor.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -87,18 +87,18 @@ class ParallelExecutorPrivate {
8787
inline bool HasGarbageCollectors() const { return !gcs_.empty(); }
8888

8989
/**
90-
* NOTE(zengjinle): the feeded variables of users should not be reused,
91-
* because users may feed them into another network. Changing the feeded
90+
* NOTE(zengjinle): the fed variables of users should not be reused,
91+
* because users may feed them into another network. Changing the fed
9292
* variables that users can visit may cause calculation wrong, which is
9393
* a very subtle bug when traning networks. However, these variables
9494
* can be garbage collected.
9595
*
9696
* ParallelExecutor provides 2 methods to feed variables:
9797
*
98-
* - FeedTensorsIntoLocalScopes: this method would share memory of feeded
98+
* - FeedTensorsIntoLocalScopes: this method would share memory of fed
9999
* variables, so we have to skip these.
100100
*
101-
* - FeedAndSplitTensorIntoLocalScopes: this method would copy data of feeded
101+
* - FeedAndSplitTensorIntoLocalScopes: this method would copy data of fed
102102
* variables, so we do not need to skip
103103
* them.
104104
*/

paddle/fluid/framework/reader.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ class ReaderBase {
5353
// they are readers just before read op.
5454
std::unordered_set<ReaderBase*> GetEndPoints();
5555

56-
// Returns the shapes of the feeded variables
56+
// Returns the shapes of the fed variables
5757
const std::vector<DDim>& Shapes() const { return shapes_; }
5858

59-
// Returns the dtypes of the feeded variables
59+
// Returns the dtypes of the fed variables
6060
const std::vector<proto::VarType::Type>& VarTypes() const {
6161
return var_types_;
6262
}
@@ -80,13 +80,13 @@ class ReaderBase {
8080

8181
mutable std::mutex mu_;
8282

83-
// The shapes of the feeded variables.
83+
// The shapes of the fed variables.
8484
std::vector<DDim> shapes_;
8585

86-
// The dtypes of the feeded variables.
86+
// The dtypes of the fed variables.
8787
std::vector<proto::VarType::Type> var_types_;
8888

89-
// Whether to check the shape and dtype of feeded variables.
89+
// Whether to check the shape and dtype of fed variables.
9090
// For Backward compatibility, variables created by old API fluid.layers.data
9191
// doesn't check shape but fluid.data checks.
9292
std::vector<bool> need_check_feed_;

paddle/fluid/imperative/tests/test_prepare_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -210,7 +210,7 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
210210
attr_map);
211211
framework::RuntimeContext ctx = PrepareRuntimeContext(ins, outs);
212212

213-
// test if it never transfered on GPU place
213+
// test if it never transferred on GPU place
214214
PreparedOp prepared_op = PreparedOp::Prepare(
215215
ins, outs, dynamic_cast<framework::OperatorWithKernel&>(*op), cpu_place,
216216
&attr_map);

paddle/fluid/inference/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
#
1515

1616
if(WITH_TESTING)
17-
include(tests/test.cmake) # some generic cmake funtion for inference
17+
include(tests/test.cmake) # some generic cmake function for inference
1818
endif()
1919

2020
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?

paddle/fluid/inference/api/demo_ci/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ There are several demos:
1212
- Each line contains a single record
1313
- Each record's format is
1414
```
15-
<space splitted floats as data>\t<space splitted ints as shape>
15+
<space split floats as data>\t<space split ints as shape>
1616
```
1717
1818
To build and execute the demos, simply run

0 commit comments

Comments
 (0)