|
1 | 1 | // RUN: iree-opt --pass-pipeline="builtin.module(func.func(iree-codegen-materialize-device-encoding))" --split-input-file %s | FileCheck %s |
2 | 2 |
|
| 3 | +//----------------------------------------------------------------------------// |
| 4 | +// Test suite using generic encoding resolvers, that are defined in Encoding |
| 5 | +// dialect. |
| 6 | +//----------------------------------------------------------------------------// |
| 7 | + |
| 8 | +#pipeline_layout = #hal.pipeline.layout<constants = 3, bindings = [ |
| 9 | + #hal.pipeline.binding<storage_buffer>, |
| 10 | + #hal.pipeline.binding<storage_buffer>, |
| 11 | + #hal.pipeline.binding<storage_buffer> |
| 12 | +]> |
| 13 | +#map = affine_map<(d0, d1, d2) -> (d0, d2)> |
| 14 | +#map1 = affine_map<(d0, d1, d2) -> (d2, d1)> |
| 15 | +#map2 = affine_map<(d0, d1, d2) -> (d0, d1)> |
| 16 | +#encoding_lhs = #iree_encoding.encoding<operand_index = 0, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> |
| 17 | +#encoding_rhs = #iree_encoding.encoding<operand_index = 1, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> |
| 18 | +#encoding_result = #iree_encoding.encoding<operand_index = 2, op_type = matmul, element_types = [f32, f32, f32], user_indexing_maps = [#map, #map1, #map2], iteration_sizes = [?, ?, ?]> |
| 19 | +func.func @matmul_lowering_f32f32f32_identity_resolver() attributes { |
| 20 | + hal.executable.target = #hal.executable.target<"llvm-cpu", "whatever", {iree.encoding.resolver = #iree_encoding.identity_resolver<>}> |
| 21 | +} { |
| 22 | + %c0 = arith.constant 0 : index |
| 23 | + %M = hal.interface.constant.load layout(#pipeline_layout) ordinal(0) : index |
| 24 | + %N = hal.interface.constant.load layout(#pipeline_layout) ordinal(1) : index |
| 25 | + %K = hal.interface.constant.load layout(#pipeline_layout) ordinal(2) : index |
| 26 | + %0 = hal.interface.binding.subspan layout(#pipeline_layout) binding(0) alignment(64) offset(%c0) |
| 27 | + : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding_lhs>>{%M, %K} |
| 28 | + %1 = hal.interface.binding.subspan layout(#pipeline_layout) binding(1) alignment(64) offset(%c0) |
| 29 | + : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding_rhs>>{%K, %N} |
| 30 | + %2 = hal.interface.binding.subspan layout(#pipeline_layout) binding(2) alignment(64) offset(%c0) |
| 31 | + : !iree_tensor_ext.dispatch.tensor<readwrite:tensor<?x?xf32, #encoding_result>>{%M, %N} |
| 32 | + %3 = iree_tensor_ext.dispatch.tensor.load %0, offsets = [0, 0], sizes = [%M, %K], strides = [1, 1] |
| 33 | + : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding_lhs>>{%M, %K} |
| 34 | + -> tensor<?x?xf32, #encoding_lhs> |
| 35 | + %4 = iree_tensor_ext.dispatch.tensor.load %1, offsets = [0, 0], sizes = [%K, %N], strides = [1, 1] |
| 36 | + : !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32, #encoding_rhs>>{%K, %N} |
| 37 | + -> tensor<?x?xf32, #encoding_rhs> |
| 38 | + %5 = iree_tensor_ext.dispatch.tensor.load %2, offsets = [0, 0], sizes = [%M, %N], strides = [1, 1] |
| 39 | + : !iree_tensor_ext.dispatch.tensor<readwrite:tensor<?x?xf32, #encoding_result>>{%M, %N} |
| 40 | + -> tensor<?x?xf32, #encoding_result> |
| 41 | + %6 = linalg.matmul |
| 42 | + ins(%3, %4 : tensor<?x?xf32, #encoding_lhs>, |
| 43 | + tensor<?x?xf32, #encoding_rhs>) |
| 44 | + outs(%5 : tensor<?x?xf32, #encoding_result>) |
| 45 | + -> tensor<?x?xf32, #encoding_result> |
| 46 | + iree_tensor_ext.dispatch.tensor.store %6, %2, offsets = [0, 0], sizes = [%M, %N], strides = [1, 1] |
| 47 | + : tensor<?x?xf32, #encoding_result> |
| 48 | + -> !iree_tensor_ext.dispatch.tensor<readwrite:tensor<?x?xf32, #encoding_result>>{%M, %N} |
| 49 | + return |
| 50 | +} |
| 51 | +// CHECK-LABEL: func @matmul_lowering_f32f32f32_identity_resolver() |
| 52 | +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index |
| 53 | +// CHECK-DAG: %[[M:.+]] = hal.interface.constant.load layout(#pipeline_layout) ordinal(0) |
| 54 | +// CHECK-DAG: %[[N:.+]] = hal.interface.constant.load layout(#pipeline_layout) ordinal(1) |
| 55 | +// CHECK-DAG: %[[K:.+]] = hal.interface.constant.load layout(#pipeline_layout) ordinal(2) |
| 56 | +// CHECK: %[[LHS_BINDING:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(0) |
| 57 | +// CHECK-SAME: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%[[M]], %[[K]]} |
| 58 | +// CHECK: %[[RHS_BINDING:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(1) |
| 59 | +// CHECK-SAME: !iree_tensor_ext.dispatch.tensor<readonly:tensor<?x?xf32>>{%[[K]], %[[N]]} |
| 60 | +// CHECK: %[[OUTS_BINDING:.+]] = hal.interface.binding.subspan layout({{.+}}) binding(2) |
| 61 | +// CHECK-SAME: !iree_tensor_ext.dispatch.tensor<readwrite:tensor<?x?xf32>>{%[[M]], %[[N]]} |
| 62 | +// CHECK: %[[LHS:.+]] = iree_tensor_ext.dispatch.tensor.load %[[LHS_BINDING]] |
| 63 | +// CHECK-SAME: offsets = [0, 0], sizes = [%[[M]], %[[K]]], strides = [1, 1] |
| 64 | +// CHECK: %[[RHS:.+]] = iree_tensor_ext.dispatch.tensor.load %[[RHS_BINDING]] |
| 65 | +// CHECK-SAME: offsets = [0, 0], sizes = [%[[K]], %[[N]]], strides = [1, 1] |
| 66 | +// CHECK: %[[OUTS:.+]] = iree_tensor_ext.dispatch.tensor.load %[[OUTS_BINDING]] |
| 67 | +// CHECK-SAME: offsets = [0, 0], sizes = [%[[M]], %[[N]]], strides = [1, 1] |
| 68 | +// CHECK: %[[RES:.+]] = linalg.matmul |
| 69 | +// CHECK-SAME: ins(%[[LHS]], %[[RHS]] : |
| 70 | +// CHECK-SAME: outs(%[[OUTS]] : |
| 71 | +// CHECK: iree_tensor_ext.dispatch.tensor.store %[[RES]], %[[OUTS_BINDING]] |
| 72 | +// CHECK-SAME: offsets = [0, 0], sizes = [%[[M]], %[[N]]], strides = [1, 1] |
| 73 | + |
| 74 | +// ----- |
| 75 | + |
3 | 76 | //----------------------------------------------------------------------------// |
4 | 77 | // Test suite using CPU encoding resolvers. |
5 | 78 | //----------------------------------------------------------------------------// |
|
0 commit comments