|
1 | | -// RUN: iree-opt --split-input-file --pass-pipeline="builtin.module(util.func(iree-dispatch-creation-set-encoding))" %s | FileCheck %s |
| 1 | +// RUN: iree-opt --split-input-file --pass-pipeline="builtin.module(util.func(iree-dispatch-creation-set-encoding))" %s | FileCheck %s --check-prefixes=CHECK-ALL,CHECK |
| 2 | +// RUN: iree-opt --split-input-file --pass-pipeline="builtin.module(util.func(iree-dispatch-creation-set-encoding{encoding-option=matmulk}))" %s | FileCheck %s --check-prefixes=CHECK-ALL,MATMULK |
2 | 3 |
|
3 | 4 | util.func public @matmul_f32f32f32(%arg0 : tensor<100x250xf32>, %arg1 : tensor<250x500xf32>, |
4 | 5 | %arg2 : tensor<100x500xf32>) -> tensor<100x500xf32> { |
5 | 6 | %0 = linalg.matmul ins(%arg0, %arg1 : tensor<100x250xf32>, tensor<250x500xf32>) |
6 | 7 | outs(%arg2 : tensor<100x500xf32>) -> tensor<100x500xf32> |
7 | 8 | util.return %0 : tensor<100x500xf32> |
8 | 9 | } |
9 | | -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)> |
10 | | -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)> |
11 | | -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)> |
12 | | -// CHECK-DAG: #[[LHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [100, 500, 250]> |
13 | | -// CHECK-DAG: #[[RHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [100, 500, 250]> |
14 | | -// CHECK-DAG: #[[OUT_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [100, 500, 250]> |
15 | | -// CHECK: util.func public @matmul_f32f32f32( |
16 | | -// CHECK-SAME: %[[ARG0:.+]]: tensor<100x250xf32> |
17 | | -// CHECK-SAME: %[[ARG1:.+]]: tensor<250x500xf32> |
18 | | -// CHECK-SAME: %[[ARG2:.+]]: tensor<100x500xf32> |
19 | | -// CHECK: %[[LHS:.+]] = iree_encoding.set_encoding %[[ARG0]] |
20 | | -// CHECK-SAME: tensor<100x250xf32, #[[LHS_ENCODING]]> |
21 | | -// CHECK: %[[RHS:.+]] = iree_encoding.set_encoding %[[ARG1]] |
22 | | -// CHECK-SAME: tensor<250x500xf32, #[[RHS_ENCODING]]> |
23 | | -// CHECK: %[[OUTS:.+]] = iree_encoding.set_encoding %[[ARG2]] |
24 | | -// CHECK-SAME: tensor<100x500xf32, #[[OUT_ENCODING]]> |
25 | | -// CHECK: %[[MATMUL:.+]] = linalg.matmul |
26 | | -// CHECK-SAME: ins(%[[LHS]], %[[RHS]] : |
27 | | -// CHECK-SAME: outs(%[[OUTS]] : |
28 | | -// CHECK: %[[RESULT:.+]] = iree_encoding.unset_encoding %[[MATMUL]] : tensor<100x500xf32, #[[OUT_ENCODING]]> -> tensor<100x500xf32> |
29 | | -// CHECK: util.return %[[RESULT]] |
| 10 | +// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 11 | +// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 12 | +// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 13 | +// CHECK-DAG: #[[LHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [100, 500, 250]> |
| 14 | +// CHECK-DAG: #[[RHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [100, 500, 250]> |
| 15 | +// CHECK-DAG: #[[OUT_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [100, 500, 250]> |
| 16 | +// MATMULK-DAG: #[[LHS_ENCODING:.+]] = #iree_encoding.matmul_k<k_dims = [1]> |
| 17 | +// MATMULK-DAG: #[[RHS_ENCODING:.+]] = #iree_encoding.matmul_k<k_dims = [0]> |
| 18 | +// MATMULK-DAG: #[[OUT_ENCODING:.+]] = #iree_encoding.matmul_k<k_dims = []> |
| 19 | +// CHECK-ALL: util.func public @matmul_f32f32f32( |
| 20 | +// CHECK-ALL-SAME: %[[ARG0:.+]]: tensor<100x250xf32> |
| 21 | +// CHECK-ALL-SAME: %[[ARG1:.+]]: tensor<250x500xf32> |
| 22 | +// CHECK-ALL-SAME: %[[ARG2:.+]]: tensor<100x500xf32> |
| 23 | +// CHECK-ALL: %[[LHS:.+]] = iree_encoding.set_encoding %[[ARG0]] |
| 24 | +// CHECK-ALL-SAME: tensor<100x250xf32, #[[LHS_ENCODING]]> |
| 25 | +// CHECK-ALL: %[[RHS:.+]] = iree_encoding.set_encoding %[[ARG1]] |
| 26 | +// CHECK-ALL-SAME: tensor<250x500xf32, #[[RHS_ENCODING]]> |
| 27 | +// CHECK-ALL: %[[OUTS:.+]] = iree_encoding.set_encoding %[[ARG2]] |
| 28 | +// CHECK-ALL-SAME: tensor<100x500xf32, #[[OUT_ENCODING]]> |
| 29 | +// CHECK-ALL: %[[MATMUL:.+]] = linalg.matmul |
| 30 | +// CHECK-ALL-SAME: ins(%[[LHS]], %[[RHS]] : |
| 31 | +// CHECK-ALL-SAME: outs(%[[OUTS]] : |
| 32 | +// CHECK-ALL: %[[RESULT:.+]] = iree_encoding.unset_encoding %[[MATMUL]] : tensor<100x500xf32, #[[OUT_ENCODING]]> -> tensor<100x500xf32> |
| 33 | +// CHECK-ALL: util.return %[[RESULT]] |
30 | 34 |
|
31 | 35 | // ----- |
32 | 36 |
|
@@ -72,27 +76,30 @@ util.func public @matmul_f32f32f32_parallel_reduce_parallel(%arg0 : tensor<32x12 |
72 | 76 | } -> tensor<4096x32xf32> |
73 | 77 | util.return %0 : tensor<4096x32xf32> |
74 | 78 | } |
75 | | -// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)> |
76 | | -// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d1, d2)> |
77 | | -// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d2, d0)> |
78 | | -// CHECK-DAG: #[[LHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [32, 128, 4096]> |
79 | | -// CHECK-DAG: #[[RHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [32, 128, 4096]> |
80 | | -// CHECK-DAG: #[[OUT_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [32, 128, 4096]> |
81 | | -// CHECK: util.func public @matmul_f32f32f32_parallel_reduce_parallel( |
82 | | -// CHECK-SAME: %[[ARG0:.+]]: tensor<32x128xf32> |
83 | | -// CHECK-SAME: %[[ARG1:.+]]: tensor<128x4096xf32> |
84 | | -// CHECK-SAME: %[[ARG2:.+]]: tensor<4096x32xf32> |
85 | | -// CHECK: %[[LHS:.+]] = iree_encoding.set_encoding %[[ARG0]] |
86 | | -// CHECK-SAME: tensor<32x128xf32, #[[LHS_ENCODING]]> |
87 | | -// CHECK: %[[RHS:.+]] = iree_encoding.set_encoding %[[ARG1]] |
88 | | -// CHECK-SAME: tensor<128x4096xf32, #[[RHS_ENCODING]]> |
89 | | -// CHECK: %[[OUTS:.+]] = iree_encoding.set_encoding %[[ARG2]] |
90 | | -// CHECK-SAME: tensor<4096x32xf32, #[[OUT_ENCODING]]> |
91 | | -// CHECK: %[[MATMUL:.+]] = linalg.generic |
92 | | -// CHECK-SAME: ins(%[[LHS]], %[[RHS]] : |
93 | | -// CHECK-SAME: outs(%[[OUTS]] : |
94 | | -// CHECK: %[[RESULT:.+]] = iree_encoding.unset_encoding %[[MATMUL]] : tensor<4096x32xf32, #[[OUT_ENCODING]]> -> tensor<4096x32xf32> |
95 | | -// CHECK: util.return %[[RESULT]] |
| 79 | +// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 80 | +// CHECK-DAG: #[[MAP2:.+]] = affine_map<(d0, d1, d2) -> (d1, d2)> |
| 81 | +// CHECK-DAG: #[[MAP3:.+]] = affine_map<(d0, d1, d2) -> (d2, d0)> |
| 82 | +// CHECK-DAG: #[[LHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 0 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [32, 128, 4096]> |
| 83 | +// CHECK-DAG: #[[RHS_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 1 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [32, 128, 4096]> |
| 84 | +// CHECK-DAG: #[[OUT_ENCODING:.+]] = #iree_encoding.encoding<operand_index = 2 : index, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#[[MAP1]], #[[MAP2]], #[[MAP3]]], iteration_sizes = [32, 128, 4096]> |
| 85 | +// MATMULK-DAG: #[[LHS_ENCODING:.+]] = #iree_encoding.matmul_k<k_dims = [1]> |
| 86 | +// MATMULK-DAG: #[[RHS_ENCODING:.+]] = #iree_encoding.matmul_k<k_dims = [0]> |
| 87 | +// MATMULK-DAG: #[[OUT_ENCODING:.+]] = #iree_encoding.matmul_k<k_dims = []> |
| 88 | +// CHECK-ALL: util.func public @matmul_f32f32f32_parallel_reduce_parallel( |
| 89 | +// CHECK-ALL-SAME: %[[ARG0:.+]]: tensor<32x128xf32> |
| 90 | +// CHECK-ALL-SAME: %[[ARG1:.+]]: tensor<128x4096xf32> |
| 91 | +// CHECK-ALL-SAME: %[[ARG2:.+]]: tensor<4096x32xf32> |
| 92 | +// CHECK-ALL: %[[LHS:.+]] = iree_encoding.set_encoding %[[ARG0]] |
| 93 | +// CHECK-ALL-SAME: tensor<32x128xf32, #[[LHS_ENCODING]]> |
| 94 | +// CHECK-ALL: %[[RHS:.+]] = iree_encoding.set_encoding %[[ARG1]] |
| 95 | +// CHECK-ALL-SAME: tensor<128x4096xf32, #[[RHS_ENCODING]]> |
| 96 | +// CHECK-ALL: %[[OUTS:.+]] = iree_encoding.set_encoding %[[ARG2]] |
| 97 | +// CHECK-ALL-SAME: tensor<4096x32xf32, #[[OUT_ENCODING]]> |
| 98 | +// CHECK-ALL: %[[MATMUL:.+]] = linalg.generic |
| 99 | +// CHECK-ALL-SAME: ins(%[[LHS]], %[[RHS]] : |
| 100 | +// CHECK-ALL-SAME: outs(%[[OUTS]] : |
| 101 | +// CHECK-ALL: %[[RESULT:.+]] = iree_encoding.unset_encoding %[[MATMUL]] : tensor<4096x32xf32, #[[OUT_ENCODING]]> -> tensor<4096x32xf32> |
| 102 | +// CHECK-ALL: util.return %[[RESULT]] |
96 | 103 |
|
97 | 104 | // ----- |
98 | 105 |
|
|
0 commit comments