Skip to content

Commit b5dc6d1

Browse files
Signed-off-by: MaheshRavishankar <[email protected]>
1 parent 190f535 commit b5dc6d1

File tree

6 files changed

+16
-16
lines changed

6 files changed

+16
-16
lines changed

.github/workflows/pkgci_test_sharktank.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ jobs:
6060
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
6161
with:
6262
repository: iree-org/iree-test-suites
63-
ref: fece13306aff1d8ef33858dccfdb10eaf8b036c2
63+
ref: 82b17caf4fe59ae9bef33b5bf039a9883428f601
6464
path: iree-test-suites
6565
lfs: true
6666
- name: Install Sharktank models test suite requirements

compiler/src/iree/compiler/Codegen/Common/test/block_dynamic_dims.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,11 +66,11 @@ func.func @block_attention_dims() {
6666
// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
6767
// CHECK-DAG: %[[M:.+]] = flow.dispatch.workload.ordinal %{{.+}}, 0 : index
6868
// CHECK-DAG: %[[K2:.+]] = flow.dispatch.workload.ordinal %{{.+}}, 1 : index
69-
// CHECK-DAG: %[[M_DYNAMIC:.+]] = arith.divui %[[M]], %[[C16]]
69+
// CHECK-DAG: %[[M_DYNAMIC:.+]] = arith.divsi %[[M]], %[[C16]]
7070
// CHECK: %[[Q_BINDING:.+]] = hal.interface.binding.subspan
7171
// CHECK-SAME: binding(0)
7272
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<4x?x16x32x128xf16>>{%[[M_DYNAMIC]]}
73-
// CHECK: %[[K2_DYNAMIC:.+]] = arith.divui %[[K2]], %[[C32]]
73+
// CHECK: %[[K2_DYNAMIC:.+]] = arith.divsi %[[K2]], %[[C32]]
7474
// CHECK: %[[K_BINDING:.+]] = hal.interface.binding.subspan
7575
// CHECK-SAME: binding(1)
7676
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<4x?x32x32x128xf16>>{%[[K2_DYNAMIC]]}

compiler/src/iree/compiler/Codegen/Common/test/propagate_reshapes_by_expansion.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ func.func @fold_expand_into_loads_dynamic() -> tensor<2x?x16x32xf32> {
5757
// CHECK-LABEL: func @fold_expand_into_loads_dynamic()
5858
// CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index
5959
// CHECK-DAG: %[[CONST:.+]] = hal.interface.constant.load
60-
// CHECK: %[[SHAPE:.+]] = arith.divui %[[CONST]], %[[C16]]
60+
// CHECK: %[[SHAPE:.+]] = arith.divsi %[[CONST]], %[[C16]]
6161
// CHECK: %[[SUBSPAN:.+]] = hal.interface.binding.subspan
6262
// CHECK-SAME: !flow.dispatch.tensor<readonly:tensor<2x?x16x32xf32>>{%[[SHAPE]]}
6363
// CHECK: %[[LOAD:.+]] = flow.dispatch.tensor.load %[[SUBSPAN]]
@@ -81,7 +81,7 @@ func.func @fold_collapse_into_stores_dynamic(%arg0 : tensor<2x?x32xf32>) {
8181
// CHECK-LABEL: func @fold_collapse_into_stores_dynamic(
8282
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
8383
// CHECK: %[[CONST:.+]] = hal.interface.constant.load
84-
// CHECK: %[[SHAPE:.+]] = arith.divui %[[CONST]], %[[C2]]
84+
// CHECK: %[[SHAPE:.+]] = arith.divsi %[[CONST]], %[[C2]]
8585
// CHECK: %[[SUBSPAN:.+]] = hal.interface.binding.subspan
8686
// CHECK-SAME: !flow.dispatch.tensor<writeonly:tensor<2x?x32xf32>>{%[[SHAPE]]}
8787
// CHECK: flow.dispatch.tensor.store %{{.+}}, %[[SUBSPAN]]

compiler/src/iree/compiler/Codegen/LLVMGPU/test/nvvm_extract_address_computation.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
// CHECK-DAG: %[[TID_Y_EXT:.*]] = llvm.sext %[[TID_Y]] : i32 to i64
4545
// CHECK-DAG: %[[LANEID:.*]] = nvvm.read.ptx.sreg.laneid range <i32, 0, 32> : i32
4646
// CHECK-DAG: %[[LANEID_EXT:.*]] = llvm.sext %[[LANEID]] : i32 to i64
47-
// CHECK-DAG: %[[TID_Y_IDX:.*]] = llvm.mul %[[TID_Y_EXT]], %[[C64]] : i64
47+
// CHECK-DAG: %[[TID_Y_IDX:.*]] = llvm.mul %[[TID_Y_EXT]], %[[C64]] overflow<nsw> : i64
4848
//
4949
// Match the loop invariant math on the special registers.
5050
// CHECK: %[[GRP_IDX:.*]] = llvm.add %[[TID_Y_IDX]], %[[LANEID_EXT]] : i64

compiler/src/iree/compiler/Codegen/SPIRV/test/pipeline_matvec.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -199,11 +199,11 @@ hal.executable @i4_dequant_matvec_f16_subgroup_64 {
199199

200200
// Load the quantized weight and get 4xi4 out of it. Ensure that the offset
201201
// calculation avoids excessive scaling down in computing the element offset.
202-
// CHECK: spirv.IMul %{{.*}}, %[[C64]] : i32
202+
// CHECK: spirv.IMul %{{.*}}, %[[C64]] {no_signed_wrap} : i32
203203
// CHECK: spirv.IAdd %{{.*}}, %[[STREAMBINDING]] : i32
204-
// CHECK: spirv.IMul %{{.*}}, %[[C5504]] : i32
204+
// CHECK: spirv.IMul %{{.*}}, %[[C5504]] {no_signed_wrap} : i32
205205
// CHECK: spirv.IAdd %{{.*}}, %{{.*}} : i32
206-
// CHECK: spirv.IMul %[[WIDX]], %[[C2]] : i32
206+
// CHECK: spirv.IMul %[[WIDX]], %[[C2]] {no_signed_wrap} : i32
207207
// CHECK: spirv.IAdd %{{.*}}, %{{.*}} : i32
208208
// CHECK: %[[OFFSET:.+]] = spirv.SDiv %{{.*}}, %[[C4]] : i32
209209
// CHECK: %[[ACCESS:.+]] = spirv.AccessChain %[[RADDR]][{{.*}}, %[[OFFSET]]] : !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>, i32, i32

compiler/src/iree/compiler/DispatchCreation/test/attention_fuse_by_expansion.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -197,17 +197,17 @@ util.func public @attention_dynamic(%arg0: tensor<?x?x?xf16>, %arg1: tensor<?x?x
197197
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG0]], %[[C1]]
198198
// CHECK-DAG: %[[D2:.+]] = tensor.dim %[[ARG0]], %[[C2]]
199199
// CHECK-DAG: %[[D4:.+]] = tensor.dim %[[ARG2]], %[[C2]]
200-
// CHECK-DAG: %[[SPLIT0:.+]] = arith.divui %[[D0]]
200+
// CHECK-DAG: %[[SPLIT0:.+]] = arith.divsi %[[D0]]
201201
// CHECK-DAG: %[[EMPTY:.+]] = tensor.empty(%[[SPLIT0]], %[[D1]], %[[D4]]) : tensor<2x?x?x?xf16>
202202
// CHECK-DAG: %[[QUERY:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT0]], %[[D1]], %[[D2]]]
203203
// CHECK-DAG: %[[D5:.+]] = tensor.dim %[[ARG1]], %[[C0]]
204204
// CHECK-DAG: %[[D6:.+]] = tensor.dim %[[ARG1]], %[[C1]]
205205
// CHECK-DAG: %[[D7:.+]] = tensor.dim %[[ARG1]], %[[C2]]
206-
// CHECK-DAG: %[[SPLIT1:.+]] = arith.divui %[[D5]], %[[C2]]
206+
// CHECK-DAG: %[[SPLIT1:.+]] = arith.divsi %[[D5]], %[[C2]]
207207
// CHECK-DAG: %[[KEY:.+]] = tensor.expand_shape %[[ARG1]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT1]], %[[D6]], %[[D7]]]
208208
// CHECK-DAG: %[[D8:.+]] = tensor.dim %[[ARG2]], %[[C0]]
209209
// CHECK-DAG: %[[D9:.+]] = tensor.dim %[[ARG2]], %[[C1]]
210-
// CHECK-DAG: %[[SPLIT2:.+]] = arith.divui %[[D8]], %[[C2]]
210+
// CHECK-DAG: %[[SPLIT2:.+]] = arith.divsi %[[D8]], %[[C2]]
211211
// CHECK-DAG: %[[CACHE:.+]] = tensor.expand_shape %[[ARG2]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT2]], %[[D9]], %[[D4]]]
212212
// CHECK: %[[ATTENTION:.+]] = iree_linalg_ext.attention
213213
// CHECK-SAME: indexing_maps =
@@ -262,22 +262,22 @@ util.func public @attention_dynamic_masked(%arg0: tensor<?x?x?xf16>, %arg1: tens
262262
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG0]], %[[C1]]
263263
// CHECK-DAG: %[[D2:.+]] = tensor.dim %[[ARG0]], %[[C2]]
264264
// CHECK-DAG: %[[D4:.+]] = tensor.dim %[[ARG2]], %[[C2]]
265-
// CHECK-DAG: %[[SPLIT0:.+]] = arith.divui %[[D0]]
265+
// CHECK-DAG: %[[SPLIT0:.+]] = arith.divsi %[[D0]]
266266
// CHECK-DAG: %[[EMPTY:.+]] = tensor.empty(%[[SPLIT0]], %[[D1]], %[[D4]]) : tensor<2x?x?x?xf16>
267267
// CHECK-DAG: %[[QUERY:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT0]], %[[D1]], %[[D2]]]
268268
// CHECK-DAG: %[[D5:.+]] = tensor.dim %[[ARG1]], %[[C0]]
269269
// CHECK-DAG: %[[D6:.+]] = tensor.dim %[[ARG1]], %[[C1]]
270270
// CHECK-DAG: %[[D7:.+]] = tensor.dim %[[ARG1]], %[[C2]]
271-
// CHECK-DAG: %[[SPLIT1:.+]] = arith.divui %[[D5]], %[[C2]]
271+
// CHECK-DAG: %[[SPLIT1:.+]] = arith.divsi %[[D5]], %[[C2]]
272272
// CHECK-DAG: %[[KEY:.+]] = tensor.expand_shape %[[ARG1]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT1]], %[[D6]], %[[D7]]]
273273
// CHECK-DAG: %[[D8:.+]] = tensor.dim %[[ARG2]], %[[C0]]
274274
// CHECK-DAG: %[[D9:.+]] = tensor.dim %[[ARG2]], %[[C1]]
275-
// CHECK-DAG: %[[SPLIT2:.+]] = arith.divui %[[D8]], %[[C2]]
275+
// CHECK-DAG: %[[SPLIT2:.+]] = arith.divsi %[[D8]], %[[C2]]
276276
// CHECK-DAG: %[[CACHE:.+]] = tensor.expand_shape %[[ARG2]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT2]], %[[D9]], %[[D4]]]
277277
// CHECK-DAG: %[[D10:.+]] = tensor.dim %[[ARG4]], %[[C0]]
278278
// CHECK-DAG: %[[D11:.+]] = tensor.dim %[[ARG4]], %[[C1]]
279279
// CHECK-DAG: %[[D12:.+]] = tensor.dim %[[ARG4]], %[[C2]]
280-
// CHECK-DAG: %[[SPLIT3:.+]] = arith.divui %[[D10]], %[[C2]]
280+
// CHECK-DAG: %[[SPLIT3:.+]] = arith.divsi %[[D10]], %[[C2]]
281281
// CHECK-DAG: %[[MASK:.+]] = tensor.expand_shape %[[ARG4]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT3]], %[[D11]], %[[D12]]]
282282
// CHECK: %[[ATTENTION:.+]] = iree_linalg_ext.attention
283283
// CHECK-SAME: indexing_maps =

0 commit comments

Comments
 (0)