Skip to content

Conversation

@kazutakahirata
Copy link
Contributor

We don't need lambdas here.

We don't need lambdas here.
@llvmbot
Copy link
Member

llvmbot commented Jun 7, 2025

@llvm/pr-subscribers-mlir-sparse
@llvm/pr-subscribers-mlir

@llvm/pr-subscribers-mlir-affine

Author: Kazu Hirata (kazutakahirata)

Changes

We don't need lambdas here.


Full diff: https://github.com/llvm/llvm-project/pull/143280.diff

4 Files Affected:

  • (modified) mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp (+1-3)
  • (modified) mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp (+6-9)
  • (modified) mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp (+2-4)
  • (modified) mlir/lib/Dialect/Tensor/IR/TensorOps.cpp (+1-2)
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index fe53d03249369..01cc500148385 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -402,9 +402,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
           return !VectorType::isValidElementType(type);
         }))
       return true;
-    return llvm::any_of(op.getResultTypes(), [](Type type) {
-      return !VectorType::isValidElementType(type);
-    });
+    return !llvm::all_of(op.getResultTypes(), VectorType::isValidElementType);
   });
   SmallVector<NestedMatch, 8> opsMatched;
   types.match(forOp, &opsMatched);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index c5b62227777a7..6b43006c4528a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -820,9 +820,8 @@ tensorExtractVectorizationPrecondition(Operation *op, bool vectorizeNDExtract) {
       return failure();
   }
 
-  if (llvm::any_of(extractOp->getResultTypes(), [](Type type) {
-        return !VectorType::isValidElementType(type);
-      })) {
+  if (!llvm::all_of(extractOp->getResultTypes(),
+                    VectorType::isValidElementType)) {
     return failure();
   }
 
@@ -2163,14 +2162,12 @@ static LogicalResult vectorizeLinalgOpPrecondition(
             })) {
       continue;
     }
-    if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
-          return !VectorType::isValidElementType(type);
-        })) {
+    if (!llvm::all_of(innerOp.getOperandTypes(),
+                      VectorType::isValidElementType)) {
       return failure();
     }
-    if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
-          return !VectorType::isValidElementType(type);
-        })) {
+    if (!llvm::all_of(innerOp.getResultTypes(),
+                      VectorType::isValidElementType)) {
       return failure();
     }
   }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2196199816292..34ae83b25c397 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -799,8 +799,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
                             "before singleton level";
 
     auto *curCOOEnd = std::find_if_not(it, lvlTypes.end(), isSingletonLT);
-    if (!std::all_of(it, curCOOEnd,
-                     [](LevelType i) { return isSingletonLT(i); }))
+    if (!std::all_of(it, curCOOEnd, isSingletonLT))
       return emitError() << "expected all singleton lvlTypes "
                             "following a singleton level";
     // We can potentially support mixed SoA/AoS singleton levels.
@@ -833,8 +832,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
       it != std::end(lvlTypes)) {
     if (it != lvlTypes.end() - 1)
       return emitError() << "expected n_out_of_m to be the last level type";
-    if (!std::all_of(lvlTypes.begin(), it,
-                     [](LevelType i) { return isDenseLT(i); }))
+    if (!std::all_of(lvlTypes.begin(), it, isDenseLT))
       return emitError() << "expected all dense lvlTypes "
                             "before a n_out_of_m level";
     if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 6e67377ddb6e8..04242cad9ecb6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1061,8 +1061,7 @@ void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
 void EmptyOp::build(OpBuilder &builder, OperationState &result,
                     ArrayRef<int64_t> staticShape, Type elementType,
                     Attribute encoding) {
-  assert(all_of(staticShape,
-                [](int64_t sz) { return !ShapedType::isDynamic(sz); }) &&
+  assert(none_of(staticShape, ShapedType::isDynamic) &&
          "expected only static sizes");
   build(builder, result, staticShape, elementType, ValueRange{}, encoding);
 }

@llvmbot
Copy link
Member

llvmbot commented Jun 7, 2025

@llvm/pr-subscribers-mlir-linalg

Author: Kazu Hirata (kazutakahirata)

Changes

We don't need lambdas here.


Full diff: https://github.com/llvm/llvm-project/pull/143280.diff

4 Files Affected:

  • (modified) mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp (+1-3)
  • (modified) mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp (+6-9)
  • (modified) mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp (+2-4)
  • (modified) mlir/lib/Dialect/Tensor/IR/TensorOps.cpp (+1-2)
diff --git a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
index fe53d03249369..01cc500148385 100644
--- a/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
+++ b/mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
@@ -402,9 +402,7 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
           return !VectorType::isValidElementType(type);
         }))
       return true;
-    return llvm::any_of(op.getResultTypes(), [](Type type) {
-      return !VectorType::isValidElementType(type);
-    });
+    return !llvm::all_of(op.getResultTypes(), VectorType::isValidElementType);
   });
   SmallVector<NestedMatch, 8> opsMatched;
   types.match(forOp, &opsMatched);
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
index c5b62227777a7..6b43006c4528a 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
@@ -820,9 +820,8 @@ tensorExtractVectorizationPrecondition(Operation *op, bool vectorizeNDExtract) {
       return failure();
   }
 
-  if (llvm::any_of(extractOp->getResultTypes(), [](Type type) {
-        return !VectorType::isValidElementType(type);
-      })) {
+  if (!llvm::all_of(extractOp->getResultTypes(),
+                    VectorType::isValidElementType)) {
     return failure();
   }
 
@@ -2163,14 +2162,12 @@ static LogicalResult vectorizeLinalgOpPrecondition(
             })) {
       continue;
     }
-    if (llvm::any_of(innerOp.getOperandTypes(), [](Type type) {
-          return !VectorType::isValidElementType(type);
-        })) {
+    if (!llvm::all_of(innerOp.getOperandTypes(),
+                      VectorType::isValidElementType)) {
       return failure();
     }
-    if (llvm::any_of(innerOp.getResultTypes(), [](Type type) {
-          return !VectorType::isValidElementType(type);
-        })) {
+    if (!llvm::all_of(innerOp.getResultTypes(),
+                      VectorType::isValidElementType)) {
       return failure();
     }
   }
diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
index 2196199816292..34ae83b25c397 100644
--- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
+++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
@@ -799,8 +799,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
                             "before singleton level";
 
     auto *curCOOEnd = std::find_if_not(it, lvlTypes.end(), isSingletonLT);
-    if (!std::all_of(it, curCOOEnd,
-                     [](LevelType i) { return isSingletonLT(i); }))
+    if (!std::all_of(it, curCOOEnd, isSingletonLT))
       return emitError() << "expected all singleton lvlTypes "
                             "following a singleton level";
     // We can potentially support mixed SoA/AoS singleton levels.
@@ -833,8 +832,7 @@ LogicalResult SparseTensorEncodingAttr::verify(
       it != std::end(lvlTypes)) {
     if (it != lvlTypes.end() - 1)
       return emitError() << "expected n_out_of_m to be the last level type";
-    if (!std::all_of(lvlTypes.begin(), it,
-                     [](LevelType i) { return isDenseLT(i); }))
+    if (!std::all_of(lvlTypes.begin(), it, isDenseLT))
       return emitError() << "expected all dense lvlTypes "
                             "before a n_out_of_m level";
     if (dimToLvl && (dimToLvl.getNumDims() != dimToLvl.getNumResults())) {
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 6e67377ddb6e8..04242cad9ecb6 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -1061,8 +1061,7 @@ void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
 void EmptyOp::build(OpBuilder &builder, OperationState &result,
                     ArrayRef<int64_t> staticShape, Type elementType,
                     Attribute encoding) {
-  assert(all_of(staticShape,
-                [](int64_t sz) { return !ShapedType::isDynamic(sz); }) &&
+  assert(none_of(staticShape, ShapedType::isDynamic) &&
          "expected only static sizes");
   build(builder, result, staticShape, elementType, ValueRange{}, encoding);
 }

@kazutakahirata kazutakahirata merged commit 1cf1c21 into llvm:main Jun 8, 2025
13 checks passed
@kazutakahirata kazutakahirata deleted the cleanup_20250607_lambdas_mlir branch June 8, 2025 08:34
tomtor pushed a commit to tomtor/llvm-project that referenced this pull request Jun 14, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants