Skip to content

Commit 1549a0c

Browse files
[mlir][SCF] Remove scf-bufferize pass (#113840)
The dialect conversion-based bufferization passes have been migrated to One-Shot Bufferize about two years ago. To clean up the code base, this commit removes the `scf-bufferize` pass, one of the few remaining parts of the old infrastructure. Most bufferization passes have already been removed. Note for LLVM integration: If you depend on this pass, migrate to One-Shot Bufferize or copy the pass to your codebase.
1 parent b46a048 commit 1549a0c

File tree

7 files changed

+32
-83
lines changed

7 files changed

+32
-83
lines changed

mlir/docs/Bufferization.md

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,6 @@ The code, slightly simplified and annotated, is reproduced here:
579579
// Partial bufferization passes.
580580
pm.addPass(createTensorConstantBufferizePass());
581581
pm.addNestedPass<func::FuncOp>(createTCPBufferizePass()); // Bufferizes the downstream `tcp` dialect.
582-
pm.addNestedPass<func::FuncOp>(createSCFBufferizePass());
583582
pm.addNestedPass<func::FuncOp>(createLinalgBufferizePass());
584583
pm.addNestedPass<func::FuncOp>(createTensorBufferizePass());
585584
pm.addPass(createFuncBufferizePass());
@@ -596,7 +595,7 @@ must be module passes because they make changes to the top-level module.
596595

597596
The bulk of the bufferization work is done by the function passes. Most of these
598597
passes are provided as part of the upstream MLIR distribution and bufferize
599-
their respective dialects (e.g. `scf-bufferize` bufferizes the `scf` dialect).
598+
their respective dialects (e.g. `abc-bufferize` bufferizes the `abc` dialect).
600599
The `tcp-bufferize` pass is an exception -- it is a partial bufferization pass
601600
used to bufferize the downstream `tcp` dialect, and fits in perfectly with all
602601
the other passes provided upstream.
@@ -694,20 +693,6 @@ which helps with this in general.
694693

695694
### Other partial bufferization examples
696695

697-
- `scf-bufferize`
698-
([code](https://github.com/llvm/llvm-project/blob/bc8acf2ce8ad6e8c9b1d97b2e02d3f4ad26e1d9d/mlir/lib/Dialect/SCF/Transforms/Bufferize.cpp#L1),
699-
[test](https://github.com/llvm/llvm-project/blob/bc8acf2ce8ad6e8c9b1d97b2e02d3f4ad26e1d9d/mlir/test/Dialect/SCF/bufferize.mlir#L1))
700-
701-
- Bufferizes ops from the `scf` dialect.
702-
- This is an example of how to bufferize ops that implement
703-
`RegionBranchOpInterface` (that is, they use regions to represent
704-
control flow).
705-
- The bulk of the work is done by
706-
`lib/Dialect/SCF/Transforms/StructuralTypeConversions.cpp`
707-
([code](https://github.com/llvm/llvm-project/blob/daaaed6bb89044ac58a23f1bb1ccdd12342a5a58/mlir/lib/Dialect/SCF/Transforms/StructuralTypeConversions.cpp#L1)),
708-
which is well-commented and covers how to correctly convert ops that
709-
contain regions.
710-
711696
- `func-bufferize`
712697
([code](https://github.com/llvm/llvm-project/blob/2f5715dc78328215d51d5664c72c632a6dac1046/mlir/lib/Dialect/Func/Transforms/FuncBufferize.cpp#L1),
713698
[test](https://github.com/llvm/llvm-project/blob/2f5715dc78328215d51d5664c72c632a6dac1046/mlir/test/Dialect/Func/func-bufferize.mlir#L1))

mlir/include/mlir/Dialect/SCF/Transforms/Passes.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,6 @@ namespace mlir {
2020
#define GEN_PASS_DECL
2121
#include "mlir/Dialect/SCF/Transforms/Passes.h.inc"
2222

23-
/// Creates a pass that bufferizes the SCF dialect.
24-
std::unique_ptr<Pass> createSCFBufferizePass();
25-
2623
/// Creates a pass that specializes for loop for unrolling and
2724
/// vectorization.
2825
std::unique_ptr<Pass> createForLoopSpecializationPass();

mlir/include/mlir/Dialect/SCF/Transforms/Passes.td

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,6 @@
1111

1212
include "mlir/Pass/PassBase.td"
1313

14-
def SCFBufferize : Pass<"scf-bufferize"> {
15-
let summary = "Bufferize the scf dialect.";
16-
let constructor = "mlir::createSCFBufferizePass()";
17-
let dependentDialects = ["bufferization::BufferizationDialect",
18-
"memref::MemRefDialect"];
19-
}
20-
2114
// Note: Making these canonicalization patterns would require a dependency
2215
// of the SCF dialect on the Affine/Tensor/MemRef dialects or vice versa.
2316
def SCFForLoopCanonicalization

mlir/lib/Dialect/SCF/Transforms/BufferizableOpInterfaceImpl.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -649,7 +649,8 @@ struct ForOpInterface
649649
if (failed(bufferizableOp.resolveTensorOpOperandConflicts(rewriter, state)))
650650
return failure();
651651

652-
if (!state.getOptions().enforceAliasingInvariants)
652+
if (!state.getOptions().enforceAliasingInvariants ||
653+
state.getOptions().copyBeforeWrite)
653654
return success();
654655

655656
// According to the `getAliasing...` implementations, a bufferized OpResult
@@ -889,7 +890,8 @@ struct WhileOpInterface
889890
if (failed(bufferizableOp.resolveTensorOpOperandConflicts(rewriter, state)))
890891
return failure();
891892

892-
if (!state.getOptions().enforceAliasingInvariants)
893+
if (!state.getOptions().enforceAliasingInvariants ||
894+
state.getOptions().copyBeforeWrite)
893895
return success();
894896

895897
// According to the `getAliasing...` implementations, a bufferized OpResult

mlir/lib/Dialect/SCF/Transforms/Bufferize.cpp

Lines changed: 0 additions & 47 deletions
This file was deleted.

mlir/lib/Dialect/SCF/Transforms/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
add_mlir_dialect_library(MLIRSCFTransforms
22
BufferDeallocationOpInterfaceImpl.cpp
33
BufferizableOpInterfaceImpl.cpp
4-
Bufferize.cpp
54
ForallToFor.cpp
65
ForallToParallel.cpp
76
ForToWhile.cpp

mlir/test/Dialect/SCF/bufferize.mlir

Lines changed: 27 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: mlir-opt %s -scf-bufferize | FileCheck %s
1+
// RUN: mlir-opt %s -one-shot-bufferize="dialect-filter=scf,bufferization copy-before-write unknown-type-conversion=identity-layout-map" -split-input-file | FileCheck %s
22

33
// CHECK-LABEL: func @if(
44
// CHECK-SAME: %[[PRED:.*]]: i1,
@@ -23,15 +23,21 @@ func.func @if(%pred: i1, %true_val: tensor<?xf32>, %false_val: tensor<?xf32>) ->
2323
return %0 : tensor<?xf32>
2424
}
2525

26+
// -----
27+
2628
// CHECK-LABEL: func @for(
2729
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
2830
// CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index,
2931
// CHECK-SAME: %[[STEP:.*]]: index) -> tensor<f32> {
3032
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
31-
// CHECK: %[[RESULT_MEMREF:.*]] = scf.for %[[VAL_6:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] iter_args(%[[ITER:.*]] = %[[MEMREF]]) -> (memref<f32>) {
33+
// Note: scf.for iter_args always bufferize to a memory write. This could be
34+
// optimized by analyzing the loop body.
35+
// CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
36+
// CHECK: memref.copy %[[MEMREF]], %[[MEMREF_COPY]]
37+
// CHECK: %[[RESULT_MEMREF:.*]] = scf.for %{{.*}} = %[[LB]] to %[[UB]] step %[[STEP]] iter_args(%[[ITER:.*]] = %[[MEMREF_COPY]]) -> (memref<f32>) {
3238
// CHECK: scf.yield %[[ITER]] : memref<f32>
3339
// CHECK: } {some_attr}
34-
// CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[VAL_9:.*]] : memref<f32>
40+
// CHECK: %[[VAL_8:.*]] = bufferization.to_tensor %[[RESULT_MEMREF]] : memref<f32>
3541
// CHECK: return %[[VAL_8]] : tensor<f32>
3642
// CHECK: }
3743
func.func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tensor<f32> {
@@ -41,6 +47,8 @@ func.func @for(%arg0: tensor<f32>, %lb: index, %ub: index, %step: index) -> tens
4147
return %ret : tensor<f32>
4248
}
4349

50+
// -----
51+
4452
// Check whether this converts at all.
4553
//
4654
// It would previously fail altogether.
@@ -57,17 +65,23 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor
5765
return %0 : tensor<f32>
5866
}
5967

68+
// -----
69+
6070
// CHECK-LABEL: func @for_correct_recursive_legalization_behavior(
6171
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
6272
// CHECK-SAME: %[[INDEX:.*]]: index) -> tensor<f32> {
6373
// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : memref<f32>
64-
// CHECK: %[[RESULT:.*]] = scf.for %[[IV:.*]] = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF]]) -> (memref<f32>) {
74+
// Note: scf.for iter_args always bufferize to a memory write. This could be
75+
// optimized by analyzing the loop body.
76+
// CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
77+
// CHECK: memref.copy %[[MEMREF]], %[[MEMREF_COPY]]
78+
// CHECK: %[[RESULT:.*]] = scf.for %{{.*}} = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF_COPY]]) -> (memref<f32>) {
6579
// CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref<f32>
6680
// CHECK: %[[TENSOR_MUNGED:.*]] = "test.munge_tensor"(%[[TENSOR_ITER]]) : (tensor<f32>) -> tensor<f32>
6781
// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : memref<f32>
6882
// CHECK: scf.yield %[[MEMREF_MUNGED]] : memref<f32>
6983
// CHECK: }
70-
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT:.*]] : memref<f32>
84+
// CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT]] : memref<f32>
7185
// CHECK: return %[[TENSOR]] : tensor<f32>
7286
// CHECK: }
7387
func.func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %index: index) -> tensor<f32> {
@@ -78,11 +92,17 @@ func.func @for_correct_recursive_legalization_behavior(%arg0: tensor<f32>, %inde
7892
return %ret : tensor<f32>
7993
}
8094

95+
// -----
96+
8197
// CHECK-LABEL: func @bufferize_while(
8298
// CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: tensor<f32>
8399
// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : memref<f32>
84-
// CHECK: %[[RES1:.*]]:3 = scf.while (%{{.*}} = %[[ARG0]], %{{.*}} = %[[M]]) : (i64, memref<f32>) -> (i64, i64, memref<f32>)
85-
// CHECK: scf.condition(%{{.*}}) %{{.*}}, %{{.*}}, %{{.*}} : i64, i64, memref<f32>
100+
// Note: scf.while iter_args always bufferize to a memory write. This could be
101+
// optimized by analyzing the loop body.
102+
// CHECK: %[[MEMREF_COPY:.*]] = memref.alloc()
103+
// CHECK: memref.copy %[[M]], %[[MEMREF_COPY]]
104+
// CHECK: %[[RES1:.*]]:3 = scf.while (%{{.*}} = %[[ARG0]], %[[ITER:.*]] = %[[MEMREF_COPY]]) : (i64, memref<f32>) -> (i64, i64, memref<f32>)
105+
// CHECK: scf.condition(%{{.*}}) %{{.*}}, %{{.*}}, %[[ITER]] : i64, i64, memref<f32>
86106
// CHECK: ^bb0(%{{.*}}: i64, %{{.*}}: i64, %{{.*}}: memref<f32>):
87107
// CHECK: scf.yield %{{.*}}, %{{.*}} : i64, memref<f32>
88108
// CHECK: %[[RES2:.*]] = bufferization.to_tensor %[[RES1]]#2 : memref<f32>

0 commit comments

Comments
 (0)