-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[mlir] Strip away lambdas (NFC) #143280
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[mlir] Strip away lambdas (NFC) #143280
Conversation
We don't need lambdas here.
|
@llvm/pr-subscribers-mlir-sparse @llvm/pr-subscribers-mlir-affine Author: Kazu Hirata (kazutakahirata) ChangesWe don't need lambdas here. Full diff: https://github.com/llvm/llvm-project/pull/143280.diff 4 Files Affected:
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index fe53d03249369..01cc500148385 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -402,9 +402,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
return !VectorType::isValidElementType(type);
}))
return true;
- return llvm::any_of(op.getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- });
+ return !llvm::all_of(op.getResultTypes(), VectorType::isValidElementType);
});
SmallVector<NestedMatch, 8> opsMatched;
types.match(forOp, &opsMatched);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index c5b62227777a7..6b43006c4528a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -820,9 +820,8 @@ tensorExtractVectorizationPrecondition(Operation *op, bool vectorizeNDExtract) {
return failure();
}
- if (llvm::any_of(extractOp->getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(extractOp->getResultTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
@@ -2163,14 +2162,12 @@ static LogicalResult vectorizeLinalgOpPrecondition(
})) {
continue;
}
- if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(innerOp.getOperandTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
- if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(innerOp.getResultTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
}
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2196199816292..34ae83b25c397 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -799,8 +799,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
"before singleton level";
auto *curCOOEnd = std::find_if_not(it, lvlTypes.end(), isSingletonLT);
- if (!std::all_of(it, curCOOEnd,
- [](LevelType i) { return isSingletonLT(i); }))
+ if (!std::all_of(it, curCOOEnd, isSingletonLT))
return emitError() << "expected all singleton lvlTypes "
"following a singleton level";
// We can potentially support mixed SoA/AoS singleton levels.
@@ -833,8 +832,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
it != std::end(lvlTypes)) {
if (it != lvlTypes.end() - 1)
return emitError() << "expected n_out_of_m to be the last level type";
- if (!std::all_of(lvlTypes.begin(), it,
- [](LevelType i) { return isDenseLT(i); }))
+ if (!std::all_of(lvlTypes.begin(), it, isDenseLT))
return emitError() << "expected all dense lvlTypes "
"before a n_out_of_m level";
if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 6e67377ddb6e8..04242cad9ecb6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1061,8 +1061,7 @@ void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
void EmptyOp::build(OpBuilder &builder, OperationState &result,
ArrayRef<int64_t> staticShape, Type elementType,
Attribute encoding) {
- assert(all_of(staticShape,
- [](int64_t sz) { return !ShapedType::isDynamic(sz); }) &&
+ assert(none_of(staticShape, ShapedType::isDynamic) &&
"expected only static sizes");
build(builder, result, staticShape, elementType, ValueRange{}, encoding);
}
|
|
@llvm/pr-subscribers-mlir-linalg Author: Kazu Hirata (kazutakahirata) ChangesWe don't need lambdas here. Full diff: https://github.com/llvm/llvm-project/pull/143280.diff 4 Files Affected:
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index fe53d03249369..01cc500148385 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -402,9 +402,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
return !VectorType::isValidElementType(type);
}))
return true;
- return llvm::any_of(op.getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- });
+ return !llvm::all_of(op.getResultTypes(), VectorType::isValidElementType);
});
SmallVector<NestedMatch, 8> opsMatched;
types.match(forOp, &opsMatched);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index c5b62227777a7..6b43006c4528a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -820,9 +820,8 @@ tensorExtractVectorizationPrecondition(Operation *op, bool vectorizeNDExtract) {
return failure();
}
- if (llvm::any_of(extractOp->getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(extractOp->getResultTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
@@ -2163,14 +2162,12 @@ static LogicalResult vectorizeLinalgOpPrecondition(
})) {
continue;
}
- if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(innerOp.getOperandTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
- if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
- return !VectorType::isValidElementType(type);
- })) {
+ if (!llvm::all_of(innerOp.getResultTypes(),
+ VectorType::isValidElementType)) {
return failure();
}
}
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2196199816292..34ae83b25c397 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -799,8 +799,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
"before singleton level";
auto *curCOOEnd = std::find_if_not(it, lvlTypes.end(), isSingletonLT);
- if (!std::all_of(it, curCOOEnd,
- [](LevelType i) { return isSingletonLT(i); }))
+ if (!std::all_of(it, curCOOEnd, isSingletonLT))
return emitError() << "expected all singleton lvlTypes "
"following a singleton level";
// We can potentially support mixed SoA/AoS singleton levels.
@@ -833,8 +832,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
it != std::end(lvlTypes)) {
if (it != lvlTypes.end() - 1)
return emitError() << "expected n_out_of_m to be the last level type";
- if (!std::all_of(lvlTypes.begin(), it,
- [](LevelType i) { return isDenseLT(i); }))
+ if (!std::all_of(lvlTypes.begin(), it, isDenseLT))
return emitError() << "expected all dense lvlTypes "
"before a n_out_of_m level";
if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 6e67377ddb6e8..04242cad9ecb6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1061,8 +1061,7 @@ void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
void EmptyOp::build(OpBuilder &builder, OperationState &result,
ArrayRef<int64_t> staticShape, Type elementType,
Attribute encoding) {
- assert(all_of(staticShape,
- [](int64_t sz) { return !ShapedType::isDynamic(sz); }) &&
+ assert(none_of(staticShape, ShapedType::isDynamic) &&
"expected only static sizes");
build(builder, result, staticShape, elementType, ValueRange{}, encoding);
}
|
We don't need lambdas here.
We don't need lambdas here.