Skip to content

Commit f8c16f4

Browse files
authored
[CodeStyle][Typos][C-[32-38]] Fix typos (compatiblity,compability,compitable,Compitable,compatable,compling,comple,complition,complext,compsite) (#69847)
1 parent 30defc0 commit f8c16f4

File tree

21 files changed

+51
-61
lines changed

21 files changed

+51
-61
lines changed

_typos.toml

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -61,16 +61,6 @@ cann = 'cann'
6161
vart = 'vart'
6262
checkings = 'checkings'
6363
childs = 'childs'
64-
compability = 'compability'
65-
compatiblity = 'compatiblity'
66-
Compitable = 'Compitable'
67-
compatable = 'compatable'
68-
compitable = 'compitable'
69-
compling = 'compling'
70-
comple = 'comple'
71-
complition = 'complition'
72-
complext = 'complext'
73-
compsite = 'compsite'
7464
comsume = 'comsume'
7565
Continer = 'Continer'
7666
contenst = 'contenst'

paddle/cinn/ir/ir.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -602,7 +602,7 @@ Expr Store::Make(Expr tensor, Expr value, const std::vector<Expr> &indices) {
602602
node->tensor = tensor;
603603
node->value = value;
604604
node->indices =
605-
utils::GetCompitableStoreLoadIndices(tensor.as_tensor_ref(), indices);
605+
utils::GetCompatibleStoreLoadIndices(tensor.as_tensor_ref(), indices);
606606

607607
if (tensor->type() != Void()) {
608608
node->set_type(
@@ -904,7 +904,7 @@ Expr Load::Make(Expr tensor, const std::vector<Expr> &origin_indices) {
904904
true,
905905
::common::errors::InvalidArgument("The tensor type is not valid. "
906906
"A valid tensor type is required."));
907-
const auto indices = utils::GetCompitableStoreLoadIndices(
907+
const auto indices = utils::GetCompatibleStoreLoadIndices(
908908
tensor.as_tensor_ref(), origin_indices);
909909
PADDLE_ENFORCE_EQ(
910910
!indices.empty(),

paddle/cinn/ir/ir_utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
namespace cinn::ir::utils {
2020

2121
// FIXME(Aurelius84): Return [Expr(1)] for 0D Tensor as the shape.
22-
static inline std::vector<Expr> GetCompitableShape(
22+
static inline std::vector<Expr> GetCompatibleShape(
2323
const std::vector<Expr>& shape) {
2424
return shape.empty() ? std::vector<Expr>({Expr(1)}) : shape;
2525
}
@@ -32,7 +32,7 @@ static inline bool MaybeZeroRankTensor(const Tensor& tensor) {
3232
}
3333

3434
// FIXME(Aurelius84): Return [Expr(0)] for 0D Tensor as the indices.
35-
static inline std::vector<Expr> GetCompitableStoreLoadIndices(
35+
static inline std::vector<Expr> GetCompatibleStoreLoadIndices(
3636
const Tensor& tensor, const std::vector<Expr>& indices) {
3737
const bool should_fill_zero = indices.empty() && MaybeZeroRankTensor(tensor);
3838
return should_fill_zero ? std::vector<Expr>({Expr(0)}) : indices;

paddle/cinn/ir/stmt.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ Store _Store_::Make(Expr tensor, Expr value, const std::vector<Expr> &indices) {
8080
ref->set_tensor(tensor);
8181
ref->set_value(value);
8282
ref->set_indices(
83-
utils::GetCompitableStoreLoadIndices(tensor.as_tensor_ref(), indices));
83+
utils::GetCompatibleStoreLoadIndices(tensor.as_tensor_ref(), indices));
8484

8585
if (tensor->type() != Void()) {
8686
ref->set_type(

paddle/cinn/ir/tensor.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ Tensor _Tensor_::Make(const std::string &name,
5151
"Required tensor name shall not be empty."));
5252
auto n = make_shared<_Tensor_>();
5353
n->name = name;
54-
n->shape = utils::GetCompitableShape(shape);
54+
n->shape = utils::GetCompatibleShape(shape);
5555
n->domain = domain;
5656
n->reduce_axis = reduce_axis;
5757
n->set_type(dtype);
@@ -71,7 +71,7 @@ Tensor _Tensor_::Make(const std::string &name,
7171
"Required tensor name shall not be empty."));
7272
auto n = make_shared<_Tensor_>();
7373
n->name = name;
74-
n->shape = utils::GetCompitableShape(shape);
74+
n->shape = utils::GetCompatibleShape(shape);
7575
n->domain = domain;
7676
n->reduce_axis = reduce_axis;
7777
n->operation = PlaceholderOp::Make(n->name, n->shape, Float(32));
@@ -178,14 +178,14 @@ Expr Tensor::operator()(const std::vector<Expr> &indices) const {
178178
::common::errors::PreconditionNotMet(
179179
"Required tensor shall not be tuple type."));
180180
auto *node = operator->();
181-
const auto compitable_indices =
182-
utils::GetCompitableStoreLoadIndices(*this, indices);
181+
const auto compatible_indices =
182+
utils::GetCompatibleStoreLoadIndices(*this, indices);
183183

184-
PADDLE_ENFORCE_EQ(compitable_indices.size(),
184+
PADDLE_ENFORCE_EQ(compatible_indices.size(),
185185
ndims(),
186186
::common::errors::PreconditionNotMet(
187187
"number of indices not match the dimension"));
188-
return Load::Make(*this, compitable_indices);
188+
return Load::Make(*this, compatible_indices);
189189
}
190190

191191
Expr _Tensor_::inline_expanded(const std::vector<Expr> &indices) {

paddle/cinn/lang/placeholder.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ ir::Tensor CreatePlaceHolder(const std::vector<int> &shape,
3232
expr_shape.push_back(Expr(s));
3333
}
3434
return CreatePlaceHolder(
35-
ir::utils::GetCompitableShape(expr_shape), type, name);
35+
ir::utils::GetCompatibleShape(expr_shape), type, name);
3636
}
3737

3838
ir::Tensor CreatePlaceHolder(const std::vector<ir::Dim> &shape,
@@ -75,7 +75,7 @@ ir::Tensor CreatePlaceHolder(const std::vector<ir::Dim> &shape,
7575
ir::Tensor CreatePlaceHolder(const std::vector<Expr> &origin_shape,
7676
Type type,
7777
const std::string &name) {
78-
const auto shape = ir::utils::GetCompitableShape(origin_shape);
78+
const auto shape = ir::utils::GetCompatibleShape(origin_shape);
7979
if (type.is_float(32)) {
8080
return Placeholder<float>(name, shape);
8181
} else if (type.is_float(64)) {

paddle/cinn/operator_fusion/pir_graph_analyzing/dim_relation.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ static DimUsageRelation CreateOpRelativenessForBroadcast(pir::Operation* op) {
109109
static DimUsageRelation CreateOpRelativenessForReduce(pir::Operation* op) {
110110
const auto& reduce_axis_idx = GetReduceAxisIdx(op);
111111
DimUsageRelation res;
112-
const size_t input_rank = GetCompitableRank(op->operand_source(0));
112+
const size_t input_rank = GetCompatibleRank(op->operand_source(0));
113113
int out_idx = 0;
114114
bool keep_dim = GetReduceOpKeepDims(op);
115115
for (size_t i = 0; i < input_rank; i++) {

paddle/cinn/operator_fusion/pir_graph_analyzing/fusion_iters.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ FusionItersSignature FusionItersManager::GetItersSignature(pir::Operation* op) {
134134
if (axes.reduce_size > 0) {
135135
PADDLE_ENFORCE_LE(
136136
axes.reduce_size,
137-
GetCompitableRank(op->operand(0).source()),
137+
GetCompatibleRank(op->operand(0).source()),
138138
::common::errors::InvalidArgument("The number of reduce_axis should be "
139139
"no more than output value ranks."));
140140
}

paddle/cinn/operator_fusion/pir_graph_analyzing/shardable_axes_base.cc

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,11 @@ ShardableAxesSignature CreateDefaultSignature(pir::Operation* op) {
7777
ShardableAxesSignature result = ShardableAxesSignature();
7878
for (int i = 0; i < op->num_operands(); ++i) {
7979
result.inputs.emplace_back(
80-
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(i))));
80+
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(i))));
8181
}
8282
for (int i = 0; i < op->num_results(); ++i) {
8383
result.outputs.emplace_back(
84-
CreateNewNamesWithRank(GetCompitableRank(op->result(i))));
84+
CreateNewNamesWithRank(GetCompatibleRank(op->result(i))));
8585
}
8686
return result;
8787
}
@@ -109,7 +109,7 @@ ShardableAxesSignature CreateSignatureForReduce(pir::Operation* reduce_op) {
109109
1,
110110
::common::errors::PreconditionNotMet(
111111
"Required reduce_op->num_results() shall be equal 1."));
112-
const size_t input_rank = GetCompitableRank(reduce_op->operand_source(0));
112+
const size_t input_rank = GetCompatibleRank(reduce_op->operand_source(0));
113113
auto input_axes = CreateNewNamesWithRank(input_rank);
114114

115115
const std::vector<int64_t> reduce_axis_idx = GetReduceAxisIdx(reduce_op);
@@ -152,20 +152,20 @@ ShardableAxesSignature CreateSignatureForReduce(pir::Operation* reduce_op) {
152152
ShardableAxesSignature CreateSignatureForElementWise(pir::Operation* op) {
153153
ShardableAxesSignature result = ShardableAxesSignature();
154154

155-
int64_t rank = GetCompitableRank(op->result(0));
155+
int64_t rank = GetCompatibleRank(op->result(0));
156156
auto same_axes = CreateNewNamesWithRank(rank);
157157

158158
for (int i = 0; i < op->num_operands(); ++i) {
159159
PADDLE_ENFORCE_EQ(rank,
160-
GetCompitableRank(op->operand_source(i)),
160+
GetCompatibleRank(op->operand_source(i)),
161161
::common::errors::PreconditionNotMet(
162162
"Required all inputs rank shall be equal output in "
163163
"elementwise op."));
164164
result.inputs.emplace_back(same_axes);
165165
}
166166
for (int i = 0; i < op->num_results(); ++i) {
167167
PADDLE_ENFORCE_EQ(rank,
168-
GetCompitableRank(op->result(i)),
168+
GetCompatibleRank(op->result(i)),
169169
::common::errors::PreconditionNotMet(
170170
"Required all outputs rank shall be equal each other "
171171
"in elementwise op."));
@@ -188,7 +188,7 @@ ShardableAxesSignature CreateSignatureForTranspose(pir::Operation* op) {
188188
"Required transpose_op->num_results() shall be equal 1."));
189189

190190
const auto input_axes =
191-
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(0)));
191+
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(0)));
192192

193193
std::vector<int32_t> perm =
194194
GetInt32ArrayAttributeData(op->attributes().at("perm"));
@@ -224,7 +224,7 @@ ShardableAxesSignature CreateSignatureForSlice(
224224
"Required slice_op->num_results() shall be equal 1."));
225225

226226
const auto input_axes =
227-
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(0)));
227+
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(0)));
228228

229229
const auto [slice_axis, keepdim] = GetSliceAxis(op);
230230
const auto output_axes = [&]() -> decltype(auto) {
@@ -266,8 +266,8 @@ ShardableAxesSignature CreateSignatureForBroadcast(
266266
"Required broad_cast_value is not empty."));
267267

268268
const auto& [input_value, output_value] = broad_cast_value.value();
269-
const int input_rank = GetCompitableRank(input_value);
270-
const int output_rank = GetCompitableRank(output_value);
269+
const int input_rank = GetCompatibleRank(input_value);
270+
const int output_rank = GetCompatibleRank(output_value);
271271
PADDLE_ENFORCE_GE(
272272
output_rank,
273273
input_rank,
@@ -278,7 +278,7 @@ ShardableAxesSignature CreateSignatureForBroadcast(
278278
// output.
279279
for (int i = 0; i < op->num_operands(); ++i) {
280280
result.inputs.emplace_back(
281-
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(i))));
281+
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(i))));
282282
}
283283

284284
// Create output axes. Compare axis one by one, from back to front.
@@ -309,8 +309,8 @@ ShardableAxesSignature CreateSignatureForReshape(
309309
pir::ShapeConstraintIRAnalysis* shape_analysis) {
310310
const auto input_value = op->operand_source(0);
311311
const auto output_value = op->result(0);
312-
const auto input_rank = GetCompitableRank(op->operand_source(0));
313-
const auto output_rank = GetCompitableRank(op->result(0));
312+
const auto input_rank = GetCompatibleRank(op->operand_source(0));
313+
const auto output_rank = GetCompatibleRank(op->result(0));
314314
const auto in_shape = GetDimExprsFromValue(input_value);
315315
const auto out_shape = GetDimExprsFromValue(output_value);
316316

@@ -320,7 +320,7 @@ ShardableAxesSignature CreateSignatureForReshape(
320320

321321
if (op->name() == "pd_op.reshape" && op->num_operands() == 2) {
322322
result.inputs.emplace_back(
323-
CreateNewNamesWithRank(GetCompitableRank(op->operand_source(1))));
323+
CreateNewNamesWithRank(GetCompatibleRank(op->operand_source(1))));
324324
}
325325

326326
if (GetRank(input_value) == 0 || GetRank(output_value) == 0) {
@@ -387,7 +387,7 @@ ShardableAxesSignature CreateSignatureForReshape(
387387

388388
ShardableAxesSignature CreateSignatureForConcat(
389389
pir::Operation* op, ShardableAxesInfoManager* axes_manager) {
390-
size_t rank = GetCompitableRank(op->result(0));
390+
size_t rank = GetCompatibleRank(op->result(0));
391391
const auto same_axes = CreateNewNamesWithRank(rank - 1);
392392

393393
const auto axis_attr =
@@ -406,7 +406,7 @@ ShardableAxesSignature CreateSignatureForConcat(
406406
ShardableAxesSignature result = ShardableAxesSignature();
407407
for (int i = 0; i < op->num_operands(); ++i) {
408408
PADDLE_ENFORCE_EQ(rank,
409-
GetCompitableRank(op->operand_source(i)),
409+
GetCompatibleRank(op->operand_source(i)),
410410
::common::errors::PreconditionNotMet(
411411
"Required all inputs rank shall be equal output in "
412412
"concat op."));

paddle/cinn/operator_fusion/utils.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ std::vector<int32_t> GetInt32ArrayAttributeData(
5555
}
5656

5757
std::vector<int64_t> GetReduceAxisIdx(pir::Operation* reduce_op) {
58-
const size_t input_rank = GetCompitableRank(reduce_op->operand_source(0));
58+
const size_t input_rank = GetCompatibleRank(reduce_op->operand_source(0));
5959
const auto& attr_val = reduce_op->attributes().at("axis");
6060
PADDLE_ENFORCE_EQ(attr_val.isa<::pir::ArrayAttribute>(),
6161
true,

0 commit comments

Comments
 (0)