Skip to content

Commit ee74d37

Browse files
committed
[mlir][sparse] Make three integration tests run with the codegen path.
Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D138233
1 parent 96b3bf4 commit ee74d37

File tree

3 files changed

+48
-23
lines changed

3 files changed

+48
-23
lines changed

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex32.mlir

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,15 @@
1-
// RUN: mlir-opt %s --sparse-compiler | \
2-
// RUN: mlir-cpu-runner \
3-
// RUN: -e entry -entry-point-result=void \
4-
// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
5-
// RUN: FileCheck %s
1+
// DEFINE: %{option} = enable-runtime-library=true
2+
// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \
3+
// DEFINE: mlir-cpu-runner \
4+
// DEFINE: -e entry -entry-point-result=void \
5+
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
6+
// DEFINE: FileCheck %s
7+
//
8+
// RUN: %{command}
9+
//
10+
// Do the same run, but now with direct IR generation.
11+
// REDEFINE: %{option} = enable-runtime-library=false
12+
// RUN: %{command}
613

714
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
815

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex64.mlir

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,15 @@
1-
// RUN: mlir-opt %s --sparse-compiler | \
2-
// RUN: mlir-cpu-runner \
3-
// RUN: -e entry -entry-point-result=void \
4-
// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
5-
// RUN: FileCheck %s
1+
// DEFINE: %{option} = enable-runtime-library=true
2+
// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \
3+
// DEFINE: mlir-cpu-runner \
4+
// DEFINE: -e entry -entry-point-result=void \
5+
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
6+
// DEFINE: FileCheck %s
7+
//
8+
// RUN: %{command}
9+
//
10+
// Do the same run, but now with direct IR generation.
11+
// REDEFINE: %{option} = enable-runtime-library=false
12+
// RUN: %{command}
613

714
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
815

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_re_im.mlir

Lines changed: 24 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,15 @@
1-
// RUN: mlir-opt %s --sparse-compiler | \
2-
// RUN: mlir-cpu-runner \
3-
// RUN: -e entry -entry-point-result=void \
4-
// RUN: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
5-
// RUN: FileCheck %s
1+
// DEFINE: %{option} = enable-runtime-library=true
2+
// DEFINE: %{command} = mlir-opt %s --sparse-compiler=%{option} | \
3+
// DEFINE: mlir-cpu-runner \
4+
// DEFINE: -e entry -entry-point-result=void \
5+
// DEFINE: -shared-libs=%mlir_lib_dir/libmlir_c_runner_utils%shlibext | \
6+
// DEFINE: FileCheck %s
7+
//
8+
// RUN: %{command}
9+
//
10+
// Do the same run, but now with direct IR generation.
11+
// REDEFINE: %{option} = enable-runtime-library=false
12+
// RUN: %{command}
613

714
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
815

@@ -49,12 +56,14 @@ module {
4956
func.func @dump(%arg0: tensor<?xf32, #SparseVector>) {
5057
%c0 = arith.constant 0 : index
5158
%d0 = arith.constant -1.0 : f32
59+
%n = sparse_tensor.number_of_entries %arg0 : tensor<?xf32, #SparseVector>
60+
vector.print %n : index
5261
%values = sparse_tensor.values %arg0 : tensor<?xf32, #SparseVector> to memref<?xf32>
53-
%0 = vector.transfer_read %values[%c0], %d0: memref<?xf32>, vector<4xf32>
54-
vector.print %0 : vector<4xf32>
62+
%0 = vector.transfer_read %values[%c0], %d0: memref<?xf32>, vector<3xf32>
63+
vector.print %0 : vector<3xf32>
5564
%indices = sparse_tensor.indices %arg0 { dimension = 0 : index } : tensor<?xf32, #SparseVector> to memref<?xindex>
56-
%1 = vector.transfer_read %indices[%c0], %c0: memref<?xindex>, vector<4xindex>
57-
vector.print %1 : vector<4xindex>
65+
%1 = vector.transfer_read %indices[%c0], %c0: memref<?xindex>, vector<3xindex>
66+
vector.print %1 : vector<3xindex>
5867
return
5968
}
6069

@@ -76,10 +85,12 @@ module {
7685
//
7786
// Verify the results.
7887
//
79-
// CHECK: ( 5.13, 3, 5, -1 )
80-
// CHECK-NEXT: ( 0, 20, 31, 0 )
81-
// CHECK-NEXT: ( 2, 4, 6, -1 )
82-
// CHECK-NEXT: ( 0, 20, 31, 0 )
88+
// CHECK: 3
89+
// CHECK-NEXT: ( 5.13, 3, 5 )
90+
// CHECK-NEXT: ( 0, 20, 31 )
91+
// CHECK-NEXT: 3
92+
// CHECK-NEXT: ( 2, 4, 6 )
93+
// CHECK-NEXT: ( 0, 20, 31 )
8394
//
8495
call @dump(%0) : (tensor<?xf32, #SparseVector>) -> ()
8596
call @dump(%1) : (tensor<?xf32, #SparseVector>) -> ()

0 commit comments

Comments
 (0)