Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion mlir/include/mlir/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class GreedyRewriteConfig;

#define GEN_PASS_DECL_CANONICALIZER
#define GEN_PASS_DECL_CONTROLFLOWSINK
#define GEN_PASS_DECL_CSEPASS
#define GEN_PASS_DECL_CSE
#define GEN_PASS_DECL_INLINER
#define GEN_PASS_DECL_LOOPINVARIANTCODEMOTION
#define GEN_PASS_DECL_MEM2REG
Expand Down
58 changes: 54 additions & 4 deletions mlir/test/Transforms/cse.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.module(func.func(cse))' | FileCheck %s

// CHECK-DAG: #[[$MAP:.*]] = affine_map<(d0) -> (d0 mod 2)>
#map0 = affine_map<(d0) -> (d0 mod 2)>
// RUN: mlir-opt -allow-unregistered-dialect %s -pass-pipeline='builtin.module(func.func(cse))' -split-input-file | FileCheck %s

// CHECK-LABEL: @simple_constant
func.func @simple_constant() -> (i32, i32) {
Expand All @@ -13,6 +10,11 @@ func.func @simple_constant() -> (i32, i32) {
return %0, %1 : i32, i32
}

// -----

// CHECK: #[[$MAP:.*]] = affine_map<(d0) -> (d0 mod 2)>
#map0 = affine_map<(d0) -> (d0 mod 2)>

// CHECK-LABEL: @basic
func.func @basic() -> (index, index) {
// CHECK: %[[VAR_c0:[0-9a-zA-Z_]+]] = arith.constant 0 : index
Expand All @@ -27,6 +29,8 @@ func.func @basic() -> (index, index) {
return %0, %1 : index, index
}

// -----

// CHECK-LABEL: @many
func.func @many(f32, f32) -> (f32) {
^bb0(%a : f32, %b : f32):
Expand All @@ -52,6 +56,8 @@ func.func @many(f32, f32) -> (f32) {
return %l : f32
}

// -----

/// Check that operations are not eliminated if they have different operands.
// CHECK-LABEL: @different_ops
func.func @different_ops() -> (i32, i32) {
Expand All @@ -64,6 +70,8 @@ func.func @different_ops() -> (i32, i32) {
return %0, %1 : i32, i32
}

// -----

/// Check that operations are not eliminated if they have different result
/// types.
// CHECK-LABEL: @different_results
Expand All @@ -77,6 +85,8 @@ func.func @different_results(%arg0: tensor<*xf32>) -> (tensor<?x?xf32>, tensor<4
return %0, %1 : tensor<?x?xf32>, tensor<4x?xf32>
}

// -----

/// Check that operations are not eliminated if they have different attributes.
// CHECK-LABEL: @different_attributes
func.func @different_attributes(index, index) -> (i1, i1, i1) {
Expand All @@ -93,6 +103,8 @@ func.func @different_attributes(index, index) -> (i1, i1, i1) {
return %0, %1, %2 : i1, i1, i1
}

// -----

/// Check that operations with side effects are not eliminated.
// CHECK-LABEL: @side_effect
func.func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
Expand All @@ -106,6 +118,8 @@ func.func @side_effect() -> (memref<2x1xf32>, memref<2x1xf32>) {
return %0, %1 : memref<2x1xf32>, memref<2x1xf32>
}

// -----

/// Check that operation definitions are properly propagated down the dominance
/// tree.
// CHECK-LABEL: @down_propagate_for
Expand All @@ -122,6 +136,8 @@ func.func @down_propagate_for() {
return
}

// -----

// CHECK-LABEL: @down_propagate
func.func @down_propagate() -> i32 {
// CHECK-NEXT: %[[VAR_c1_i32:[0-9a-zA-Z_]+]] = arith.constant 1 : i32
Expand All @@ -142,6 +158,8 @@ func.func @down_propagate() -> i32 {
return %arg : i32
}

// -----

/// Check that operation definitions are NOT propagated up the dominance tree.
// CHECK-LABEL: @up_propagate_for
func.func @up_propagate_for() -> i32 {
Expand All @@ -159,6 +177,8 @@ func.func @up_propagate_for() -> i32 {
return %1 : i32
}

// -----

// CHECK-LABEL: func @up_propagate
func.func @up_propagate() -> i32 {
// CHECK-NEXT: %[[VAR_c0_i32:[0-9a-zA-Z_]+]] = arith.constant 0 : i32
Expand Down Expand Up @@ -188,6 +208,8 @@ func.func @up_propagate() -> i32 {
return %add : i32
}

// -----

/// The same test as above except that we are testing on a cfg embedded within
/// an operation region.
// CHECK-LABEL: func @up_propagate_region
Expand Down Expand Up @@ -221,6 +243,8 @@ func.func @up_propagate_region() -> i32 {
return %0 : i32
}

// -----

/// This test checks that nested regions that are isolated from above are
/// properly handled.
// CHECK-LABEL: @nested_isolated
Expand Down Expand Up @@ -248,6 +272,8 @@ func.func @nested_isolated() -> i32 {
return %0 : i32
}

// -----

/// This test is checking that CSE gracefully handles values in graph regions
/// where the use occurs before the def, and one of the defs could be CSE'd with
/// the other.
Expand All @@ -269,6 +295,8 @@ func.func @use_before_def() {
return
}

// -----

/// This test is checking that CSE is removing duplicated read op that follow
/// other.
// CHECK-LABEL: @remove_direct_duplicated_read_op
Expand All @@ -281,6 +309,8 @@ func.func @remove_direct_duplicated_read_op() -> i32 {
return %2 : i32
}

// -----

/// This test is checking that CSE is removing duplicated read op that follow
/// other.
// CHECK-LABEL: @remove_multiple_duplicated_read_op
Expand All @@ -300,6 +330,8 @@ func.func @remove_multiple_duplicated_read_op() -> i64 {
return %6 : i64
}

// -----

/// This test is checking that CSE is not removing duplicated read op that
/// have write op in between.
// CHECK-LABEL: @dont_remove_duplicated_read_op_with_sideeffecting
Expand All @@ -314,6 +346,8 @@ func.func @dont_remove_duplicated_read_op_with_sideeffecting() -> i32 {
return %2 : i32
}

// -----

// Check that an operation with a single region can CSE.
func.func @cse_single_block_ops(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>)
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
Expand All @@ -332,6 +366,8 @@ func.func @cse_single_block_ops(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>)
// CHECK-NOT: test.cse_of_single_block_op
// CHECK: return %[[OP]], %[[OP]]

// -----

// Operations with different number of bbArgs dont CSE.
func.func @no_cse_varied_bbargs(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>)
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
Expand All @@ -350,6 +386,8 @@ func.func @no_cse_varied_bbargs(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>)
// CHECK: %[[OP1:.+]] = test.cse_of_single_block_op
// CHECK: return %[[OP0]], %[[OP1]]

// -----

// Operations with different regions dont CSE
func.func @no_cse_region_difference_simple(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>)
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
Expand All @@ -368,6 +406,8 @@ func.func @no_cse_region_difference_simple(%a : tensor<?x?xf32>, %b : tensor<?x?
// CHECK: %[[OP1:.+]] = test.cse_of_single_block_op
// CHECK: return %[[OP0]], %[[OP1]]

// -----

// Operation with identical region with multiple statements CSE.
func.func @cse_single_block_ops_identical_bodies(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>, %c : f32, %d : i1)
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
Expand All @@ -392,6 +432,8 @@ func.func @cse_single_block_ops_identical_bodies(%a : tensor<?x?xf32>, %b : tens
// CHECK-NOT: test.cse_of_single_block_op
// CHECK: return %[[OP]], %[[OP]]

// -----

// Operation with non-identical regions dont CSE.
func.func @no_cse_single_block_ops_different_bodies(%a : tensor<?x?xf32>, %b : tensor<?x?xf32>, %c : f32, %d : i1)
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
Expand All @@ -416,6 +458,8 @@ func.func @no_cse_single_block_ops_different_bodies(%a : tensor<?x?xf32>, %b : t
// CHECK: %[[OP1:.+]] = test.cse_of_single_block_op
// CHECK: return %[[OP0]], %[[OP1]]

// -----

func.func @failing_issue_59135(%arg0: tensor<2x2xi1>, %arg1: f32, %arg2 : tensor<2xi1>) -> (tensor<2xi1>, tensor<2xi1>) {
%false_2 = arith.constant false
%true_5 = arith.constant true
Expand All @@ -438,6 +482,8 @@ func.func @failing_issue_59135(%arg0: tensor<2x2xi1>, %arg1: f32, %arg2 : tensor
// CHECK: test.region_yield %[[TRUE]]
// CHECK: return %[[OP]], %[[OP]]

// -----

func.func @cse_multiple_regions(%c: i1, %t: tensor<5xf32>) -> (tensor<5xf32>, tensor<5xf32>) {
%r1 = scf.if %c -> (tensor<5xf32>) {
%0 = tensor.empty() : tensor<5xf32>
Expand All @@ -463,6 +509,8 @@ func.func @cse_multiple_regions(%c: i1, %t: tensor<5xf32>) -> (tensor<5xf32>, te
// CHECK-NOT: scf.if
// CHECK: return %[[if]], %[[if]]

// -----

// CHECK-LABEL: @cse_recursive_effects_success
func.func @cse_recursive_effects_success() -> (i32, i32, i32) {
// CHECK-NEXT: %[[READ_VALUE:.*]] = "test.op_with_memread"() : () -> i32
Expand Down Expand Up @@ -492,6 +540,8 @@ func.func @cse_recursive_effects_success() -> (i32, i32, i32) {
return %0, %2, %1 : i32, i32, i32
}

// -----

// CHECK-LABEL: @cse_recursive_effects_failure
func.func @cse_recursive_effects_failure() -> (i32, i32, i32) {
// CHECK-NEXT: %[[READ_VALUE:.*]] = "test.op_with_memread"() : () -> i32
Expand Down
Loading