Skip to content

Commit 685ba66

Browse files
[Test] add more plaidML test (#511)
1 parent dca5cec commit 685ba66

File tree

6 files changed

+255
-2
lines changed

6 files changed

+255
-2
lines changed

test/PlaidML/CppEdsl.ConvI8.mlir

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// RUN: %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-cpu.pp \
2+
// RUN: --runner mlir-cpu-runner -e main \
3+
// RUN: --shared-libs=%mlir_runner_utils \
4+
// RUN: --entry-point-result=void | FileCheck %s
5+
// RUN: %gpu_skip || %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-llvm.pp \
6+
// RUN: --runner mlir-cpu-runner -e main \
7+
// RUN: --entry-point-result=void \
8+
// RUN: --shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%sycl_runtime | FileCheck %s
9+
#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
10+
#map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1 + d4, d2 + d5, d6)>
11+
#map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d3)>
12+
#map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>
13+
module @convolution {
14+
func.func @test(%arg0: tensor<1x224x224x3xi8>, %arg1: tensor<3x3x3x32xi8>) -> tensor<1x224x224x32xi8> {
15+
%c0_i8 = arith.constant 0 : i8
16+
%0 = tensor.empty() : tensor<1x224x224x3xi8>
17+
%1 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x224x224x3xi8>) outs(%0 : tensor<1x224x224x3xi8>) {
18+
^bb0(%arg2: i8, %arg3: i8):
19+
linalg.yield %arg2 : i8
20+
} -> tensor<1x224x224x3xi8>
21+
%c0_i8_0 = arith.constant 0 : i8
22+
%2 = tensor.pad %1 low[0, 1, 1, 0] high[0, 1, 1, 0] {
23+
^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index):
24+
tensor.yield %c0_i8_0 : i8
25+
} : tensor<1x224x224x3xi8> to tensor<1x226x226x3xi8>
26+
%3 = tensor.empty() : tensor<1x224x224x32xi8>
27+
%4 = linalg.fill ins(%c0_i8 : i8) outs(%3 : tensor<1x224x224x32xi8>) -> tensor<1x224x224x32xi8>
28+
%5 = linalg.generic {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction"]} ins(%2, %arg1 : tensor<1x226x226x3xi8>, tensor<3x3x3x32xi8>) outs(%4 : tensor<1x224x224x32xi8>) attrs = {iterator_ranges = [1, 224, 224, 32, 3, 3, 3]} {
29+
^bb0(%arg2: i8, %arg3: i8, %arg4: i8):
30+
%6 = arith.muli %arg2, %arg3 : i8
31+
%7 = arith.addi %arg4, %6 : i8
32+
linalg.yield %7 : i8
33+
} -> tensor<1x224x224x32xi8>
34+
return %5 : tensor<1x224x224x32xi8>
35+
}
36+
func.func @main() {
37+
%0 = arith.constant dense<1> : tensor<1x224x224x3xi8>
38+
%1 = arith.constant dense<1> : tensor<3x3x3x32xi8>
39+
%2 = call @test(%0, %1) : (tensor<1x224x224x3xi8>, tensor<3x3x3x32xi8>) -> tensor<1x224x224x32xi8>
40+
%unranked = tensor.cast %2 : tensor<1x224x224x32xi8> to tensor<*xi8>
41+
call @printMemrefI8(%unranked) : (tensor<*xi8>) -> ()
42+
return
43+
}
44+
// CHECK: Unranked Memref base@ = {{(0x)?[-9a-f]*}}
45+
// CHECK-NEXT:
46+
func.func private @printMemrefI8(tensor<*xi8>)
47+
}

test/PlaidML/CppEdsl.Convolution.mlir

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// RUN: %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-cpu.pp \
2+
// RUN: --runner mlir-cpu-runner -e main \
3+
// RUN: --shared-libs=%mlir_runner_utils,%mlir_c_runner_utils \
4+
// RUN: --entry-point-result=void | FileCheck %s
5+
// RUN: %gpu_skip || %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-llvm.pp \
6+
// RUN: --runner mlir-cpu-runner -e main \
7+
// RUN: --entry-point-result=void \
8+
// RUN: --shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%sycl_runtime | FileCheck %s
9+
#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
10+
#map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1 + d4, d2 + d5, d6)>
11+
#map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d4, d5, d6, d3)>
12+
#map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6) -> (d0, d1, d2, d3)>
13+
module @convolution {
14+
func.func @test(%arg0: tensor<1x56x56x64xf32>, %arg1: tensor<3x3x64x64xf32>) -> tensor<1x56x56x64xf32> {
15+
%cst = arith.constant 0.000000e+00 : f32
16+
%0 = tensor.empty() : tensor<1x56x56x64xf32>
17+
%1 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x56x56x64xf32>) outs(%0 : tensor<1x56x56x64xf32>) {
18+
^bb0(%arg2: f32, %arg3: f32):
19+
linalg.yield %arg2 : f32
20+
} -> tensor<1x56x56x64xf32>
21+
%cst_0 = arith.constant 0.000000e+00 : f32
22+
%2 = tensor.pad %1 low[0, 1, 1, 0] high[0, 1, 1, 0] {
23+
^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index):
24+
tensor.yield %cst_0 : f32
25+
} : tensor<1x56x56x64xf32> to tensor<1x58x58x64xf32>
26+
%3 = tensor.empty() : tensor<1x56x56x64xf32>
27+
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<1x56x56x64xf32>) -> tensor<1x56x56x64xf32>
28+
%5 = linalg.generic {indexing_maps = [#map1, #map2, #map3], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction"]} ins(%2, %arg1 : tensor<1x58x58x64xf32>, tensor<3x3x64x64xf32>) outs(%4 : tensor<1x56x56x64xf32>) attrs = {iterator_ranges = [1, 56, 56, 64, 3, 3, 64]} {
29+
^bb0(%arg2: f32, %arg3: f32, %arg4: f32):
30+
%6 = arith.mulf %arg2, %arg3 : f32
31+
%7 = arith.addf %arg4, %6 : f32
32+
linalg.yield %7 : f32
33+
} -> tensor<1x56x56x64xf32>
34+
return %5 : tensor<1x56x56x64xf32>
35+
}
36+
func.func @main() {
37+
%0 = arith.constant dense<1.0> : tensor<1x56x56x64xf32>
38+
%1 = arith.constant dense<0.5> : tensor<3x3x64x64xf32>
39+
%2 = call @test(%0, %1) : (tensor<1x56x56x64xf32>, tensor<3x3x64x64xf32>) -> tensor<1x56x56x64xf32>
40+
%unranked = tensor.cast %2 : tensor<1x56x56x64xf32> to tensor<*xf32>
41+
call @printMemrefF32(%unranked) : (tensor<*xf32>) -> ()
42+
// CHECK: Unranked Memref base@ = {{(0x)?[-9a-f]*}}
43+
// CHECK-NEXT: [128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
44+
return
45+
}
46+
func.func private @printMemrefF32(%ptr : tensor<*xf32>)
47+
}

test/PlaidML/CppEdsl.DefractLong.mlir

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
// RUN: %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-cpu.pp \
2+
// RUN: --runner mlir-cpu-runner -e main \
3+
// RUN: --shared-libs=%mlir_runner_utils \
4+
// RUN: --entry-point-result=void | FileCheck %s
5+
// RUN: %gpu_skip || %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-llvm.pp \
6+
// RUN: --runner mlir-cpu-runner -e main \
7+
// RUN: --entry-point-result=void \
8+
// RUN: --shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%sycl_runtime | FileCheck %s
9+
#map0 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
10+
#map1 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10) -> (d1, -d0 + d3 + d8 + 1, -d0 + d4 + d5 + d9, d10)>
11+
#map2 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10) -> (d0 * 4 + d2 * 2 + d7 - d8 * 2, d0 * 4 + d2 - d4 + d7 - d9 * 2 + 3, d6, d10)>
12+
#map3 = affine_map<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10) -> (d1, d2 + d3 * 2 + 1, d4 + d5 * 2, d6)>
13+
#set = affine_set<(d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10) : (d2 + d4 + d7 + (d9 - 1) * 2 + 1 >= 0, -d4 - d5 * 2 + 4 >= 0)>
14+
module @defract_long {
15+
func.func @main(%arg0: tensor<1x3x3x1xf32>, %arg1: tensor<1x3x3x1xf32>) -> tensor<1x5x5x1xf32> {
16+
%cst = arith.constant 0.000000e+00 : f32
17+
%0 = tensor.empty() : tensor<1x3x3x1xf32>
18+
%1 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg0 : tensor<1x3x3x1xf32>) outs(%0 : tensor<1x3x3x1xf32>) {
19+
^bb0(%arg2: f32, %arg3: f32):
20+
linalg.yield %arg2 : f32
21+
} -> tensor<1x3x3x1xf32>
22+
%cst_0 = arith.constant 0.000000e+00 : f32
23+
%2 = tensor.pad %1 low[0, 0, 1, 0] high[0, 0, 1, 0] {
24+
^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index):
25+
tensor.yield %cst_0 : f32
26+
} : tensor<1x3x3x1xf32> to tensor<1x3x5x1xf32>
27+
%3 = tensor.empty() : tensor<1x3x3x1xf32>
28+
%4 = linalg.generic {indexing_maps = [#map0, #map0], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg1 : tensor<1x3x3x1xf32>) outs(%3 : tensor<1x3x3x1xf32>) {
29+
^bb0(%arg2: f32, %arg3: f32):
30+
linalg.yield %arg2 : f32
31+
} -> tensor<1x3x3x1xf32>
32+
%cst_1 = arith.constant 0.000000e+00 : f32
33+
%5 = tensor.pad %4 low[0, 0, 0, 0] high[0, 1, 0, 0] {
34+
^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index):
35+
tensor.yield %cst_1 : f32
36+
} : tensor<1x3x3x1xf32> to tensor<1x4x3x1xf32>
37+
%6 = tensor.empty() : tensor<1x5x5x1xf32>
38+
%7 = linalg.fill ins(%cst : f32) outs(%6 : tensor<1x5x5x1xf32>) -> tensor<1x5x5x1xf32>
39+
//%8 = linalg.generic {indexing_maps = [#map1, #map2, #map3], iterator_types = ["reduction", "parallel", "window", "window", "window", "window",
40+
%8 = linalg.generic {indexing_maps = [#map1, #map2, #map3], iterator_types = ["reduction", "parallel", "parallel", "parallel", "parallel", "parallel", "parallel", "reduction", "reduction", "reduction", "reduction"]} ins(%2, %5 : tensor<1x3x5x1xf32>, tensor<1x4x3x1xf32>) outs(%7 : tensor<1x5x5x1xf32>) attrs = {constraints = #set, iterator_ranges = [1, 1, 1, 2, 2, 3, 1, 1, 1, 2, 1]} {
41+
^bb0(%arg2: f32, %arg3: f32, %arg4: f32):
42+
%9 = arith.mulf %arg2, %arg3 : f32
43+
%10 = arith.addf %arg4, %9 : f32
44+
linalg.yield %10 : f32
45+
} -> tensor<1x5x5x1xf32>
46+
return %8 : tensor<1x5x5x1xf32>
47+
}
48+
}

test/PlaidML/CppEdsl.DoubleDot.mlir

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
// RUN: %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-cpu.pp \
2+
// RUN: --runner mlir-cpu-runner -e main \
3+
// RUN: --shared-libs=%mlir_runner_utils \
4+
// RUN: --entry-point-result=void | FileCheck %s
5+
// RUN: %gpu_skip || %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-llvm.pp \
6+
// RUN: --runner mlir-cpu-runner -e main \
7+
// RUN: --entry-point-result=void \
8+
// RUN: --shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%sycl_runtime | FileCheck %s
9+
#map0 = affine_map<(d0, d1, d2) -> (d0, d2)>
10+
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
11+
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
12+
module @double_dot {
13+
func.func @test(%arg0: tensor<10x20xf32>, %arg1: tensor<20x30xf32>, %arg2: tensor<30x40xf32>) -> tensor<10x40xf32> {
14+
%cst = arith.constant 0.000000e+00 : f32
15+
%0 = tensor.empty() : tensor<10x30xf32>
16+
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<10x30xf32>) -> tensor<10x30xf32>
17+
%2 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<10x20xf32>, tensor<20x30xf32>) outs(%1 : tensor<10x30xf32>) attrs = {iterator_ranges = [10, 30, 20]} {
18+
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
19+
%6 = arith.mulf %arg3, %arg4 : f32
20+
%7 = arith.addf %arg5, %6 : f32
21+
linalg.yield %7 : f32
22+
} -> tensor<10x30xf32>
23+
%3 = tensor.empty() : tensor<10x40xf32>
24+
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<10x40xf32>) -> tensor<10x40xf32>
25+
%5 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%2, %arg2 : tensor<10x30xf32>, tensor<30x40xf32>) outs(%4 : tensor<10x40xf32>) attrs = {iterator_ranges = [10, 40, 30]} {
26+
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
27+
%6 = arith.mulf %arg3, %arg4 : f32
28+
%7 = arith.addf %arg5, %6 : f32
29+
linalg.yield %7 : f32
30+
} -> tensor<10x40xf32>
31+
return %5 : tensor<10x40xf32>
32+
}
33+
func.func @main() {
34+
%0 = arith.constant dense<1.0> : tensor<10x20xf32>
35+
%1 = arith.constant dense<2.0> : tensor<20x30xf32>
36+
%2 = arith.constant dense<3.0> : tensor<30x40xf32>
37+
%3 = call @test(%0, %1, %2) : (tensor<10x20xf32>, tensor<20x30xf32>, tensor<30x40xf32>) -> tensor<10x40xf32>
38+
%unranked = tensor.cast %3 : tensor<10x40xf32> to tensor<*xf32>
39+
call @printMemrefF32(%unranked) : (tensor<*xf32>) -> ()
40+
// CHECK: Unranked Memref base@ = {{(0x)?[-9a-f]*}}
41+
// CHECK-NEXT: [3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600
42+
return
43+
}
44+
45+
func.func private @printMemrefF32(%ptr : tensor<*xf32>)
46+
}

test/PlaidML/CppEdsl.DupOut.mlir

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
// RUN: %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-cpu.pp \
2+
// RUN: --runner mlir-cpu-runner -e main \
3+
// RUN: --shared-libs=%mlir_runner_utils \
4+
// RUN: --entry-point-result=void | FileCheck %s
5+
// RUN: %gpu_skip || %python_executable %imex_runner -i %s --pass-pipeline-file=%p/linalg-to-llvm.pp \
6+
// RUN: --runner mlir-cpu-runner -e main \
7+
// RUN: --entry-point-result=void \
8+
// RUN: --shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%sycl_runtime | FileCheck %s
9+
#map0 = affine_map<(d0, d1, d2) -> (d0, d2)>
10+
#map1 = affine_map<(d0, d1, d2) -> (d2, d1)>
11+
#map2 = affine_map<(d0, d1, d2) -> (d0, d1)>
12+
#map3 = affine_map<(d0, d1) -> (d0, d1)>
13+
module @dup_out {
14+
func.func @test(%arg0: tensor<10x20xf32>, %arg1: tensor<20x30xf32>, %arg2: tensor<30x40xf32>) -> (tensor<10x40xf32>, tensor<10x40xf32>, tensor<10x40xf32>) {
15+
%cst = arith.constant 0.000000e+00 : f32
16+
%0 = tensor.empty() : tensor<10x30xf32>
17+
%1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<10x30xf32>) -> tensor<10x30xf32>
18+
%2 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%arg0, %arg1 : tensor<10x20xf32>, tensor<20x30xf32>) outs(%1 : tensor<10x30xf32>) attrs = {iterator_ranges = [10, 30, 20]} {
19+
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
20+
%10 = arith.mulf %arg3, %arg4 : f32
21+
%11 = arith.addf %arg5, %10 : f32
22+
linalg.yield %11 : f32
23+
} -> tensor<10x30xf32>
24+
%3 = tensor.empty() : tensor<10x40xf32>
25+
%4 = linalg.fill ins(%cst : f32) outs(%3 : tensor<10x40xf32>) -> tensor<10x40xf32>
26+
%5 = linalg.generic {indexing_maps = [#map0, #map1, #map2], iterator_types = ["parallel", "parallel", "reduction"]} ins(%2, %arg2 : tensor<10x30xf32>, tensor<30x40xf32>) outs(%4 : tensor<10x40xf32>) attrs = {iterator_ranges = [10, 40, 30]} {
27+
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
28+
%10 = arith.mulf %arg3, %arg4 : f32
29+
%11 = arith.addf %arg5, %10 : f32
30+
linalg.yield %11 : f32
31+
} -> tensor<10x40xf32>
32+
%6 = tensor.empty() : tensor<10x40xf32>
33+
%7 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%5 : tensor<10x40xf32>) outs(%6 : tensor<10x40xf32>) {
34+
^bb0(%arg3: f32, %arg4: f32):
35+
linalg.yield %arg3 : f32
36+
} -> tensor<10x40xf32>
37+
%8 = tensor.empty() : tensor<10x40xf32>
38+
%9 = linalg.generic {indexing_maps = [#map3, #map3], iterator_types = ["parallel", "parallel"]} ins(%5 : tensor<10x40xf32>) outs(%8 : tensor<10x40xf32>) {
39+
^bb0(%arg3: f32, %arg4: f32):
40+
linalg.yield %arg3 : f32
41+
} -> tensor<10x40xf32>
42+
return %5, %7, %9 : tensor<10x40xf32>, tensor<10x40xf32>, tensor<10x40xf32>
43+
}
44+
func.func @main() {
45+
%0 = arith.constant dense<1.0> : tensor<10x20xf32>
46+
%1 = arith.constant dense<2.0> : tensor<20x30xf32>
47+
%2 = arith.constant dense<3.0> : tensor<30x40xf32>
48+
%3:3 = call @test(%0, %1, %2) : (tensor<10x20xf32>, tensor<20x30xf32>, tensor<30x40xf32>) -> (tensor<10x40xf32>, tensor<10x40xf32>, tensor<10x40xf32>)
49+
%unranked0 = tensor.cast %3#0 : tensor<10x40xf32> to tensor<*xf32>
50+
call @printMemrefF32(%unranked0) : (tensor<*xf32>) -> ()
51+
// CHECK: Unranked Memref base@ = {{(0x)?[-9a-f]*}}
52+
// CHECK-NEXT: [3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600,
53+
%unranked1 = tensor.cast %3#1 : tensor<10x40xf32> to tensor<*xf32>
54+
call @printMemrefF32(%unranked1) : (tensor<*xf32>) -> ()
55+
// CHECK: Unranked Memref base@ = {{(0x)?[-9a-f]*}}
56+
// CHECK-NEXT: [3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600,
57+
%unranked2 = tensor.cast %3#2 : tensor<10x40xf32> to tensor<*xf32>
58+
call @printMemrefF32(%unranked2) : (tensor<*xf32>) -> ()
59+
// CHECK: Unranked Memref base@ = {{(0x)?[-9a-f]*}}
60+
// CHECK-NEXT: [3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600, 3600,
61+
return
62+
}
63+
64+
func.func private @printMemrefF32(%ptr : tensor<*xf32>)
65+
}

test/PlaidML/lit.local.cfg

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ local_excludes = [
1414
'OpTest.LogicalOr_int32.mlir',
1515
'OpTest.MnistCnn.mlir',
1616
'OpTest.SinH.mlir',
17-
'OpTest.Sum.mlir',
1817
'OpTest.Tan.mlir',
1918
'OpTest.GEMV_INT8.mlir',
2019
'OpTest.GEMM_INT8.mlir',
@@ -27,7 +26,8 @@ local_excludes = [
2726
'OpTest.LogicalOr_int32.mlir',
2827
'OpTest.MnistCnn.mlir',
2928
'OpTest.SinH.mlir',
30-
'OpTest.Tan.mlir',
29+
'CppEdsl.ConvI8.mlir',
30+
'CppEdsl.DefractLong.mlir'
3131
]
3232
config.excludes.update(local_excludes)
3333

0 commit comments

Comments
 (0)