From 3960e0fecb54fd76f86f505865dfe6814e439387 Mon Sep 17 00:00:00 2001 From: Momchil Velikov Date: Mon, 19 May 2025 14:50:45 +0000 Subject: [PATCH 1/5] [MLIR] Integration tests for lowering vector.contract to SVE FEAT_I8MM --- .../CPU/ArmSVE/contraction-smmla-4x8x4.mlir | 117 +++++++++++++ .../ArmSVE/contraction-smmla-8x8x8-vs2.mlir | 159 ++++++++++++++++++ .../CPU/ArmSVE/contraction-summla-4x8x4.mlir | 118 +++++++++++++ .../CPU/ArmSVE/contraction-ummla-4x8x4.mlir | 119 +++++++++++++ .../CPU/ArmSVE/contraction-usmmla-4x8x4.mlir | 117 +++++++++++++ 5 files changed, 630 insertions(+) create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir new file mode 100644 index 0000000000000..88534dd2aab1e --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir @@ -0,0 +1,117 @@ +// REQUIRES: arm-emulator + +// DEFINE: %{compile} = mlir-opt %s \ +// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ +// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ +// DEFINE: -o %t + +// DEFINE: %{entry_point} = main + +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ +// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils + +// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s + +#packed_maps = [ + affine_map<(d0, d1, d2) -> (d0, d2)>, + affine_map<(d0, d1, d2) -> (d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> +] + +func.func private @setArmVLBits(%bits : i32) + +func.func @main() { + %c128 = arith.constant 128 : i32 + func.call @setArmVLBits(%c128) : (i32) -> () + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + +// Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46], + [ -8, 25, -34, 26], + [-20, -36, -3, 39], + [-48, -31, -25, -21]]> : vector<4x4xi32> + %acc_m = memref.alloca() : memref<4x4xi32> + vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> + + %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> + %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> + %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> + + vector.print str "ACC:\n" + %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> + %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> + %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> + %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %acc0 : vector<[4]xi32> + vector.print %acc1 : vector<[4]xi32> + vector.print %acc2 : vector<[4]xi32> + vector.print %acc3 : vector<[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], + [-20, 17, -32, -47, 37, 22, -7, -21], + [ -7, -35, 20, -4, 39, 46, -23, 40], + [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> + + %lhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> + + vector.print str "LHS:\n" + %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> + %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> + %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> + %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> + vector.print %lhs0 : vector<8xi8> + vector.print %lhs1 : vector<8xi8> + vector.print %lhs2 : vector<8xi8> + vector.print %lhs3 : vector<8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[-17, -50, -1, 48, -13, 22, 39, 33], + [-35, -24, 37, -32, 33, 30, -11, -17], + [-28, 31, 3, -44, -15, -27, 22, 35], + [-23, 39, 48, 26, -23, 32, -39, -38]]> : vector<4x8xi8> + + %rhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + + %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> + %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> + + vector.print str "RHS:\n" + %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> + %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> + vector.print %rhs0 : vector<[16]xi8> + vector.print %rhs1 : vector<[16]xi8> + + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result:\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + +// CHECK: ( -1999, 1941, 685, -2879 ) +// CHECK: ( -3705, 2952, 987, -685 ) +// CHECK: ( 2565, 4157, -1589, -357 ) +// CHECK: ( 2383, -2252, 32, -1365 ) + return +} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir new file mode 100644 index 0000000000000..ce57be91fa540 --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir @@ -0,0 +1,159 @@ +// REQUIRES: arm-emulator + +// DEFINE: %{compile} = mlir-opt %s \ +// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ +// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ +// DEFINE: -o %t + +// DEFINE: %{entry_point} = main + +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ +// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils + +// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s + +#packed_maps = [ + affine_map<(d0, d1, d2) -> (d0, d2)>, + affine_map<(d0, d1, d2) -> (d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> +] + +func.func private @setArmVLBits(%bits : i32) + +func.func @main() { + %c256 = arith.constant 256 : i32 + func.call @setArmVLBits(%c256) : (i32) -> () + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + + + // Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46, -8, 25, -34, 26], + [-20, -36, -3, 39, -48, -31, -25, -21], + [-35, -27, -36, -31, 23, -34, -8, -33], + [-20, 17, -32, -47, 37, 22, -7, -21], + [ -7, -35, 20, -4, 39, 46, -23, 40], + [ 40, 27, 37, 43, 38, -6, 37, 49], + [-17, -50, -1, 48, -13, 22, 39, 33], + [-35, -24, 37, -32, 33, 30, -11, -17]]> : vector<8x8xi32> + %acc_m = memref.alloca() : memref<8x8xi32> + vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<8x8xi32>, memref<8x8xi32> + + %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<8x8xi32> into memref<64xi32> + %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<64xi32>, vector<[32]xi32> + %acc = vector.shape_cast %acc_flat : vector<[32]xi32> to vector<8x[4]xi32> + + vector.print str "ACC:\n" + %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<8x[4]xi32> + %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<8x[4]xi32> + %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<8x[4]xi32> + %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<8x[4]xi32> + %acc4 = vector.extract %acc[4] : vector<[4]xi32> from vector<8x[4]xi32> + %acc5 = vector.extract %acc[5] : vector<[4]xi32> from vector<8x[4]xi32> + %acc6 = vector.extract %acc[6] : vector<[4]xi32> from vector<8x[4]xi32> + %acc7 = vector.extract %acc[7] : vector<[4]xi32> from vector<8x[4]xi32> + vector.print %acc0 : vector<[4]xi32> + vector.print %acc1 : vector<[4]xi32> + vector.print %acc2 : vector<[4]xi32> + vector.print %acc3 : vector<[4]xi32> + vector.print %acc4 : vector<[4]xi32> + vector.print %acc5 : vector<[4]xi32> + vector.print %acc6 : vector<[4]xi32> + vector.print %acc7 : vector<[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[-28, 31, 3, -44, -15, -27, 22, 35], + [-23, 39, 48, 26, -23, 32, -39, -38], + [ -3, 9, 43, -30, -32, 39, 41, -39], + [-13, -21, -25, 27, 47, -36, -11, -11], + [ -4, -20, 36, 11, 13, -23, 24, -13], + [-20, 30, -5, 1, 42, -37, -22, 35], + [-22, 38, -4, 44, 25, -31, 23, -39], + [-45, -4, -31, -24, 14, -41, -47, 22]]> : vector<8x8xi8> + + %lhs_m = memref.alloca() : memref<8x8xi8> + vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<8x8xi8>, memref<8x8xi8> + %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<8x8xi8>, vector<8x8xi8> + + vector.print str "LHS:\n" + %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<8x8xi8> + %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<8x8xi8> + %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<8x8xi8> + %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<8x8xi8> + %lhs4 = vector.extract %lhs[4] : vector<8xi8> from vector<8x8xi8> + %lhs5 = vector.extract %lhs[5] : vector<8xi8> from vector<8x8xi8> + %lhs6 = vector.extract %lhs[6] : vector<8xi8> from vector<8x8xi8> + %lhs7 = vector.extract %lhs[7] : vector<8xi8> from vector<8x8xi8> + vector.print %lhs0 : vector<8xi8> + vector.print %lhs1 : vector<8xi8> + vector.print %lhs2 : vector<8xi8> + vector.print %lhs3 : vector<8xi8> + vector.print %lhs4 : vector<8xi8> + vector.print %lhs5 : vector<8xi8> + vector.print %lhs6 : vector<8xi8> + vector.print %lhs7 : vector<8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[-40, -11, -36, 36, -1, 20, 14, -32], + [ 46, -45, -48, -46, -24, 31, -36, 22], + [ 2, 36, 45, -29, -37, -49, -20, -35], + [ -6, 23, 23, 15, 20, 4, -8, -2], + [-35, -6, 16, 49, -50, 9, -44, 13], + [ 24, 1, -4, -44, 41, 15, -43, 44], + [ 44, 0, -10, 41, 22, 44, -40, 0], + [-33, 19, 27, 22, 38, -17, 23, -9]]> : vector<8x8xi8> + + %rhs_m = memref.alloca() : memref<8x8xi8> + vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<8x8xi8>, memref<8x8xi8> + + %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<8x8xi8> into memref<64xi8> + %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<64xi8>, vector<[32]xi8> + + vector.print str "RHS:\n" + %rhs0 = vector.scalable.extract %rhs_flat[ 0] : vector<[16]xi8> from vector<[32]xi8> + %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> + vector.print %rhs0 : vector<[16]xi8> + vector.print %rhs1 : vector<[16]xi8> + + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extsi %lhs : vector<8x8xi8> to vector<8x8xi32> + %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<8x8xi32>, vector<[4]x8xi32> into vector<8x[4]xi32> + + // Display the result of the multilication + vector.print str "Result:\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<8x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<8x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<8x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<8x[4]xi32> + %u4 = vector.extract %2[4] : vector<[4]xi32> from vector<8x[4]xi32> + %u5 = vector.extract %2[5] : vector<[4]xi32> from vector<8x[4]xi32> + %u6 = vector.extract %2[6] : vector<[4]xi32> from vector<8x[4]xi32> + %u7 = vector.extract %2[7] : vector<[4]xi32> from vector<8x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + vector.print %u4 : vector<[4]xi32> + vector.print %u5 : vector<[4]xi32> + vector.print %u6 : vector<[4]xi32> + vector.print %u7 : vector<[4]xi32> + + +// CHECK: ( -2294, -1282, 2728, -410, -1328, 882, -5498, 732 ) +// CHECK: ( 1012, -4237, 4154, 2624, 5225, -2338, 2011, 1374 ) +// CHECK: ( -8, -1611, 2905, -1, -1068, -3155, -2428, 153 ) +// CHECK: ( 2034, -1768, -2092, 284, -792, -23, 668, 2172 ) +// CHECK: ( -248, -3728, 1214, 555, -668, -2114, -1794, 2560 ) +// CHECK: ( -1484, -2642, 297, 1551, -483, 3173, -576, 2570 ) +// CHECK: ( 3098, -7851, 1366, 1892, -427, -4533, -819, 4698 ) +// CHECK: ( -135, 1247, 765, -479, 1245, 3074, -2281, -23 ) + return +} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir new file mode 100644 index 0000000000000..f1f311ddb0c18 --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir @@ -0,0 +1,118 @@ +// REQUIRES: arm-emulator + +// DEFINE: %{compile} = mlir-opt %s \ +// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ +// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ +// DEFINE: -o %t + +// DEFINE: %{entry_point} = main + +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ +// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils + +// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s + +#packed_maps = [ + affine_map<(d0, d1, d2) -> (d0, d2)>, + affine_map<(d0, d1, d2) -> (d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> +] + +func.func private @setArmVLBits(%bits : i32) + +func.func @main() { + %c128 = arith.constant 128 : i32 + func.call @setArmVLBits(%c128) : (i32) -> () + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + +// Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46], + [ -8, 25, -34, 26], + [-20, -36, -3, 39], + [-48, -31, -25, -21]]> : vector<4x4xi32> + %acc_m = memref.alloca() : memref<4x4xi32> + vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> + + %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> + %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> + %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> + + vector.print str "ACC:\n" + %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> + %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> + %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> + %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %acc0 : vector<[4]xi32> + vector.print %acc1 : vector<[4]xi32> + vector.print %acc2 : vector<[4]xi32> + vector.print %acc3 : vector<[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], + [-20, 17, -32, -47, 37, 22, -7, -21], + [ -7, -35, 20, -4, 39, 46, -23, 40], + [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> + + %lhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> + + vector.print str "LHS:\n" + %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> + %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> + %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> + %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> + vector.print %lhs0 : vector<8xi8> + vector.print %lhs1 : vector<8xi8> + vector.print %lhs2 : vector<8xi8> + vector.print %lhs3 : vector<8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[125, 171, 138, 187, 108, 175, 82, 99], + [221, 25, 164, 97, 156, 221, 218, 177], + [171, 160, 219, 191, 144, 45, 161, 210], + [223, 165, 123, 99, 108, 86, 37, 92]]> : vector<4x8xi8> + + %rhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + + %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> + %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> + + vector.print str "RHS:\n" + %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> + %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> + vector.print %rhs0 : vector<[16]xi8> + vector.print %rhs1 : vector<[16]xi8> + + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result:\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + +// CHECK: ( -27190, -28812, -30502, -23575 ) +// CHECK: ( -7613, -8386, -15938, -6521 ) +// CHECK: ( 9468, 18750, 9199, 5764 ) +// CHECK: ( 33655, 41064, 48900, 31627 ) + return +} + diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir new file mode 100644 index 0000000000000..7af0b2c3f1054 --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir @@ -0,0 +1,119 @@ +// REQUIRES: arm-emulator + +// DEFINE: %{compile} = mlir-opt %s \ +// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ +// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ +// DEFINE: -o %t + +// DEFINE: %{entry_point} = main + +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ +// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils + +// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s + +#packed_maps = [ + affine_map<(d0, d1, d2) -> (d0, d2)>, + affine_map<(d0, d1, d2) -> (d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> +] + +func.func private @setArmVLBits(%bits : i32) + +func.func @main() { + + %c128 = arith.constant 128 : i32 + func.call @setArmVLBits(%c128) : (i32) -> () + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + + +// Accumulator test data + %acc_cst = arith.constant dense<[[16, 16, 48, 40], + [40, 24, 35, 12], + [33, 24, 29, 19], + [28, 13, 33, 18]]> : vector<4x4xi32> + %acc_m = memref.alloca() : memref<4x4xi32> + vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> + + %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> + %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> + %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> + + vector.print str "ACC:\n" + %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> + %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> + %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> + %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %acc0 : vector<[4]xi32> + vector.print %acc1 : vector<[4]xi32> + vector.print %acc2 : vector<[4]xi32> + vector.print %acc3 : vector<[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[35, 42, 37, 49, 36, 36, 23, 33], + [39, 34, 33, 45, 43, 10, 44, 47], + [18, 35, 29, 25, 36, 33, 28, 29], + [26, 49, 43, 32, 27, 16, 45, 33]]> : vector<4x8xi8> + + %lhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> + + vector.print str "LHS:\n" + %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> + %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> + %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> + %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> + vector.print %lhs0 : vector<8xi8> + vector.print %lhs1 : vector<8xi8> + vector.print %lhs2 : vector<8xi8> + vector.print %lhs3 : vector<8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[18, 31, 37, 35, 44, 22, 37, 28], + [21, 22, 49, 39, 30, 28, 35, 37], + [21, 47, 39, 35, 23, 43, 24, 49], + [49, 49, 40, 32, 37, 20, 47, 40]]> : vector<4x8xi8> + + %rhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + + %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> + %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> + + vector.print str "RHS:\n" + %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> + %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> + vector.print %rhs0 : vector<[16]xi8> + vector.print %rhs1 : vector<[16]xi8> + + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result:\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + +// CHECK: ( 9183, 9513, 10460, 11314 ) +// CHECK: ( 9648, 9812, 10092, 12088 ) +// CHECK: ( 7548, 7625, 8398, 9044 ) +// CHECK: ( 8855, 9046, 9685, 11191 ) + return +} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir new file mode 100644 index 0000000000000..a25a51dd7018c --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir @@ -0,0 +1,117 @@ +// REQUIRES: arm-emulator + +// DEFINE: %{compile} = mlir-opt %s \ +// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ +// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ +// DEFINE: -o %t + +// DEFINE: %{entry_point} = main + +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ +// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils + +// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s + +#packed_maps = [ + affine_map<(d0, d1, d2) -> (d0, d2)>, + affine_map<(d0, d1, d2) -> (d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> +] + +func.func private @setArmVLBits(%bits : i32) + +func.func @main() { + %c128 = arith.constant 128 : i32 + func.call @setArmVLBits(%c128) : (i32) -> () + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + +// Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46], + [ -8, 25, -34, 26], + [-20, -36, -3, 39], + [-48, -31, -25, -21]]> : vector<4x4xi32> + %acc_m = memref.alloca() : memref<4x4xi32> + vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> + + %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> + %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> + %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> + + vector.print str "ACC:\n" + %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> + %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> + %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> + %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %acc0 : vector<[4]xi32> + vector.print %acc1 : vector<[4]xi32> + vector.print %acc2 : vector<[4]xi32> + vector.print %acc3 : vector<[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[153, 161, 24, 157, 211, 154, 52, 27], + [168, 77, 136, 124, 249, 28, 13, 122], + [ 97, 82, 181, 39, 53, 25, 80, 240], + [184, 227, 106, 165, 126, 113, 121, 228]]> : vector<4x8xi8> + + %lhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> + + vector.print str "LHS:\n" + %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> + %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> + %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> + %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> + vector.print %lhs0 : vector<8xi8> + vector.print %lhs1 : vector<8xi8> + vector.print %lhs2 : vector<8xi8> + vector.print %lhs3 : vector<8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[ 40, 27, 37, 43, 38, -6, 37, 49], + [-17, -50, -1, 48, -13, 22, 39, 33], + [-35, -24, 37, -32, 33, 30, -11, -17], + [-28, 31, 3, -44, -15, -27, 22, 35]]> : vector<4x8xi8> + + %rhs_m = memref.alloca() : memref<4x8xi8> + vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + + %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> + %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> + + vector.print str "RHS:\n" + %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> + %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> + vector.print %rhs0 : vector<[16]xi8> + vector.print %rhs1 : vector<[16]xi8> + + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result:\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + + // CHECK: ( 28403, 445, -2759, -11409 ) + // CHECK: ( 34908, 1047, 142, -7274 ) + // CHECK: ( 31032, 6807, -2378, 7382 ) + // CHECK: ( 44217, 6396, -10930, 623 ) + return +} From 915fbf938354589f701c76291811b432d2149148 Mon Sep 17 00:00:00 2001 From: Momchil Velikov Date: Wed, 4 Jun 2025 17:00:17 +0000 Subject: [PATCH 2/5] [fixup] Check we are generating the expected number and kind of LLVM intrinsics --- .../CPU/ArmSVE/contraction-smmla-4x8x4.mlir | 99 ++++++++++--------- .../ArmSVE/contraction-smmla-8x8x8-vs2.mlir | 6 +- .../CPU/ArmSVE/contraction-summla-4x8x4.mlir | 6 +- .../CPU/ArmSVE/contraction-ummla-4x8x4.mlir | 7 +- .../CPU/ArmSVE/contraction-usmmla-4x8x4.mlir | 14 +-- 5 files changed, 70 insertions(+), 62 deletions(-) diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir index 88534dd2aab1e..7f7d2c3d69b36 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir @@ -10,7 +10,7 @@ // DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ // DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils -// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s +// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s #packed_maps = [ affine_map<(d0, d1, d2) -> (d0, d2)>, @@ -20,6 +20,45 @@ func.func private @setArmVLBits(%bits : i32) +func.func private @prepareAccTestData(%in: vector<4x4xi32>) -> vector<4x[4]xi32> { + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + + %mem = memref.alloca() : memref<4x4xi32> + vector.transfer_write %in, %mem[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> + + %flat_mem = memref.collapse_shape %mem [[0, 1]] : memref<4x4xi32> into memref<16xi32> + %flat_vec = vector.transfer_read %flat_mem[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> + %out = vector.shape_cast %flat_vec : vector<[16]xi32> to vector<4x[4]xi32> + + return %out : vector<4x[4]xi32> +} + +func.func private @prepareLHSTestData(%in: vector<4x8xi8>) -> vector<4x8xi8> { + %c0 = arith.constant 0 : index + %c0_i8 = arith.constant 0 : i8 + + %mem = memref.alloca() : memref<4x8xi8> + vector.transfer_write %in, %mem[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + + %out = vector.transfer_read %mem[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> + + return %out : vector<4x8xi8> +} + +func.func private @prepareRHSTestData(%in: vector<4x8xi8>) -> vector<[32]xi8> { + %c0 = arith.constant 0 : index + %c0_i8 = arith.constant 0 : i8 + + %mem = memref.alloca() : memref<4x8xi8> + vector.transfer_write %in, %mem[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> + + %flat_mem = memref.collapse_shape %mem [[0, 1]] : memref<4x8xi8> into memref<32xi8> + %flat_vec = vector.transfer_read %flat_mem[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> + + return %flat_vec : vector<[32]xi8> +} + func.func @main() { %c128 = arith.constant 128 : i32 func.call @setArmVLBits(%c128) : (i32) -> () @@ -28,68 +67,32 @@ func.func @main() { %c0_i32 = arith.constant 0 : i32 %c0_i8 = arith.constant 0 : i8 -// Accumulator test data + // Accumulator test data %acc_cst = arith.constant dense<[[-44, 20, 44, -46], [ -8, 25, -34, 26], [-20, -36, -3, 39], [-48, -31, -25, -21]]> : vector<4x4xi32> - %acc_m = memref.alloca() : memref<4x4xi32> - vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> - - %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> - %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> - %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> - - vector.print str "ACC:\n" - %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> - %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> - %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> - %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %acc0 : vector<[4]xi32> - vector.print %acc1 : vector<[4]xi32> - vector.print %acc2 : vector<[4]xi32> - vector.print %acc3 : vector<[4]xi32> + + %acc = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> vector<4x[4]xi32> // LHS test data %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], - [-20, 17, -32, -47, 37, 22, -7, -21], - [ -7, -35, 20, -4, 39, 46, -23, 40], - [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> - - %lhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> - - vector.print str "LHS:\n" - %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> - %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> - %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> - %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> - vector.print %lhs0 : vector<8xi8> - vector.print %lhs1 : vector<8xi8> - vector.print %lhs2 : vector<8xi8> - vector.print %lhs3 : vector<8xi8> + [-20, 17, -32, -47, 37, 22, -7, -21], + [ -7, -35, 20, -4, 39, 46, -23, 40], + [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> + + %lhs = func.call @prepareLHSTestData(%lhs_cst) : (vector<4x8xi8>) -> vector<4x8xi8> // RHS test data %rhs_cst = arith.constant dense<[[-17, -50, -1, 48, -13, 22, 39, 33], [-35, -24, 37, -32, 33, 30, -11, -17], [-28, 31, 3, -44, -15, -27, 22, 35], [-23, 39, 48, 26, -23, 32, -39, -38]]> : vector<4x8xi8> - - %rhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - - %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> - %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> - - vector.print str "RHS:\n" - %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> - %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> - vector.print %rhs0 : vector<[16]xi8> - vector.print %rhs1 : vector<[16]xi8> - + %rhs_flat = func.call @prepareRHSTestData(%rhs_cst) : (vector<4x8xi8>) -> vector<[32]xi8> %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> +// CHECK-IR-COUNT-4: arm_sve.intr.smmla + // Matrix multiplication %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir index ce57be91fa540..4941ba43769a3 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir @@ -10,7 +10,7 @@ // DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ // DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils -// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s +// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s #packed_maps = [ affine_map<(d0, d1, d2) -> (d0, d2)>, @@ -28,7 +28,6 @@ func.func @main() { %c0_i32 = arith.constant 0 : i32 %c0_i8 = arith.constant 0 : i8 - // Accumulator test data %acc_cst = arith.constant dense<[[-44, 20, 44, -46, -8, 25, -34, 26], [-20, -36, -3, 39, -48, -31, -25, -21], @@ -119,6 +118,8 @@ func.func @main() { %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> +// CHECK-IR-COUNT-8: arm_sve.intr.smmla + // Matrix multiplication %0 = arith.extsi %lhs : vector<8x8xi8> to vector<8x8xi32> %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> @@ -146,7 +147,6 @@ func.func @main() { vector.print %u6 : vector<[4]xi32> vector.print %u7 : vector<[4]xi32> - // CHECK: ( -2294, -1282, 2728, -410, -1328, 882, -5498, 732 ) // CHECK: ( 1012, -4237, 4154, 2624, 5225, -2338, 2011, 1374 ) // CHECK: ( -8, -1611, 2905, -1, -1068, -3155, -2428, 153 ) diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir index f1f311ddb0c18..e5621dc6171d6 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir @@ -10,7 +10,7 @@ // DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ // DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils -// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s +// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s #packed_maps = [ affine_map<(d0, d1, d2) -> (d0, d2)>, @@ -28,7 +28,7 @@ func.func @main() { %c0_i32 = arith.constant 0 : i32 %c0_i8 = arith.constant 0 : i8 -// Accumulator test data + // Accumulator test data %acc_cst = arith.constant dense<[[-44, 20, 44, -46], [ -8, 25, -34, 26], [-20, -36, -3, 39], @@ -90,6 +90,8 @@ func.func @main() { %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> +// CHECK-IR-COUNT-4: arm_sve.intr.usmmla + // Matrix multiplication %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir index 7af0b2c3f1054..60b3271a77b70 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir @@ -10,7 +10,7 @@ // DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ // DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils -// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s +// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s #packed_maps = [ affine_map<(d0, d1, d2) -> (d0, d2)>, @@ -29,8 +29,7 @@ func.func @main() { %c0_i32 = arith.constant 0 : i32 %c0_i8 = arith.constant 0 : i8 - -// Accumulator test data + // Accumulator test data %acc_cst = arith.constant dense<[[16, 16, 48, 40], [40, 24, 35, 12], [33, 24, 29, 19], @@ -92,6 +91,8 @@ func.func @main() { %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> +// CHECK-IR-COUNT-4: arm_sve.intr.ummla + // Matrix multiplication %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir index a25a51dd7018c..319539b5cfc54 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir @@ -10,7 +10,7 @@ // DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ // DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils -// RUN: rm -f %t && %{compile} && %{run} | FileCheck %s +// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s #packed_maps = [ affine_map<(d0, d1, d2) -> (d0, d2)>, @@ -28,7 +28,7 @@ func.func @main() { %c0_i32 = arith.constant 0 : i32 %c0_i8 = arith.constant 0 : i8 -// Accumulator test data + // Accumulator test data %acc_cst = arith.constant dense<[[-44, 20, 44, -46], [ -8, 25, -34, 26], [-20, -36, -3, 39], @@ -90,6 +90,8 @@ func.func @main() { %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> +// CHECK-IR-COUNT-4: arm_sve.intr.usmmla + // Matrix multiplication %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> @@ -109,9 +111,9 @@ func.func @main() { vector.print %u2 : vector<[4]xi32> vector.print %u3 : vector<[4]xi32> - // CHECK: ( 28403, 445, -2759, -11409 ) - // CHECK: ( 34908, 1047, 142, -7274 ) - // CHECK: ( 31032, 6807, -2378, 7382 ) - // CHECK: ( 44217, 6396, -10930, 623 ) +// CHECK: ( 28403, 445, -2759, -11409 ) +// CHECK: ( 34908, 1047, 142, -7274 ) +// CHECK: ( 31032, 6807, -2378, 7382 ) +// CHECK: ( 44217, 6396, -10930, 623 ) return } From 245b6bcf74d18778023556f58731be7eff391d38 Mon Sep 17 00:00:00 2001 From: Momchil Velikov Date: Thu, 12 Jun 2025 14:22:33 +0000 Subject: [PATCH 3/5] [fixup] Refactor a test, remove redundant test files --- .../CPU/ArmSVE/contraction-smmla-4x8x4.mlir | 120 ------ .../ArmSVE/contraction-smmla-8x8x8-vs2.mlir | 159 -------- .../CPU/ArmSVE/contraction-summla-4x8x4.mlir | 120 ------ .../CPU/ArmSVE/contraction-ummla-4x8x4.mlir | 120 ------ .../CPU/ArmSVE/contraction-usmmla-4x8x4.mlir | 119 ------ .../CPU/ArmSVE/vector-contract-i8mm.mlir | 375 ++++++++++++++++++ 6 files changed, 375 insertions(+), 638 deletions(-) delete mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir delete mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir delete mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir delete mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir delete mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir create mode 100644 mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir deleted file mode 100644 index 7f7d2c3d69b36..0000000000000 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-4x8x4.mlir +++ /dev/null @@ -1,120 +0,0 @@ -// REQUIRES: arm-emulator - -// DEFINE: %{compile} = mlir-opt %s \ -// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ -// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ -// DEFINE: -o %t - -// DEFINE: %{entry_point} = main - -// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ -// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils - -// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s - -#packed_maps = [ - affine_map<(d0, d1, d2) -> (d0, d2)>, - affine_map<(d0, d1, d2) -> (d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> -] - -func.func private @setArmVLBits(%bits : i32) - -func.func private @prepareAccTestData(%in: vector<4x4xi32>) -> vector<4x[4]xi32> { - %c0 = arith.constant 0 : index - %c0_i32 = arith.constant 0 : i32 - - %mem = memref.alloca() : memref<4x4xi32> - vector.transfer_write %in, %mem[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> - - %flat_mem = memref.collapse_shape %mem [[0, 1]] : memref<4x4xi32> into memref<16xi32> - %flat_vec = vector.transfer_read %flat_mem[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> - %out = vector.shape_cast %flat_vec : vector<[16]xi32> to vector<4x[4]xi32> - - return %out : vector<4x[4]xi32> -} - -func.func private @prepareLHSTestData(%in: vector<4x8xi8>) -> vector<4x8xi8> { - %c0 = arith.constant 0 : index - %c0_i8 = arith.constant 0 : i8 - - %mem = memref.alloca() : memref<4x8xi8> - vector.transfer_write %in, %mem[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - - %out = vector.transfer_read %mem[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> - - return %out : vector<4x8xi8> -} - -func.func private @prepareRHSTestData(%in: vector<4x8xi8>) -> vector<[32]xi8> { - %c0 = arith.constant 0 : index - %c0_i8 = arith.constant 0 : i8 - - %mem = memref.alloca() : memref<4x8xi8> - vector.transfer_write %in, %mem[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - - %flat_mem = memref.collapse_shape %mem [[0, 1]] : memref<4x8xi8> into memref<32xi8> - %flat_vec = vector.transfer_read %flat_mem[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> - - return %flat_vec : vector<[32]xi8> -} - -func.func @main() { - %c128 = arith.constant 128 : i32 - func.call @setArmVLBits(%c128) : (i32) -> () - - %c0 = arith.constant 0 : index - %c0_i32 = arith.constant 0 : i32 - %c0_i8 = arith.constant 0 : i8 - - // Accumulator test data - %acc_cst = arith.constant dense<[[-44, 20, 44, -46], - [ -8, 25, -34, 26], - [-20, -36, -3, 39], - [-48, -31, -25, -21]]> : vector<4x4xi32> - - %acc = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> vector<4x[4]xi32> - - // LHS test data - %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], - [-20, 17, -32, -47, 37, 22, -7, -21], - [ -7, -35, 20, -4, 39, 46, -23, 40], - [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> - - %lhs = func.call @prepareLHSTestData(%lhs_cst) : (vector<4x8xi8>) -> vector<4x8xi8> - - // RHS test data - %rhs_cst = arith.constant dense<[[-17, -50, -1, 48, -13, 22, 39, 33], - [-35, -24, 37, -32, 33, 30, -11, -17], - [-28, 31, 3, -44, -15, -27, 22, 35], - [-23, 39, 48, 26, -23, 32, -39, -38]]> : vector<4x8xi8> - %rhs_flat = func.call @prepareRHSTestData(%rhs_cst) : (vector<4x8xi8>) -> vector<[32]xi8> - %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - -// CHECK-IR-COUNT-4: arm_sve.intr.smmla - - // Matrix multiplication - %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> - %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> - %2 = vector.contract {indexing_maps = #packed_maps, - iterator_types = ["parallel", "parallel", "reduction"], - kind = #vector.kind} %0, %1, %acc - : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> - - // Display the result of the multiplication - vector.print str "Result:\n" - %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> - %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> - %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> - %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %u0 : vector<[4]xi32> - vector.print %u1 : vector<[4]xi32> - vector.print %u2 : vector<[4]xi32> - vector.print %u3 : vector<[4]xi32> - -// CHECK: ( -1999, 1941, 685, -2879 ) -// CHECK: ( -3705, 2952, 987, -685 ) -// CHECK: ( 2565, 4157, -1589, -357 ) -// CHECK: ( 2383, -2252, 32, -1365 ) - return -} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir deleted file mode 100644 index 4941ba43769a3..0000000000000 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-smmla-8x8x8-vs2.mlir +++ /dev/null @@ -1,159 +0,0 @@ -// REQUIRES: arm-emulator - -// DEFINE: %{compile} = mlir-opt %s \ -// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ -// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ -// DEFINE: -o %t - -// DEFINE: %{entry_point} = main - -// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ -// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils - -// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s - -#packed_maps = [ - affine_map<(d0, d1, d2) -> (d0, d2)>, - affine_map<(d0, d1, d2) -> (d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> -] - -func.func private @setArmVLBits(%bits : i32) - -func.func @main() { - %c256 = arith.constant 256 : i32 - func.call @setArmVLBits(%c256) : (i32) -> () - - %c0 = arith.constant 0 : index - %c0_i32 = arith.constant 0 : i32 - %c0_i8 = arith.constant 0 : i8 - - // Accumulator test data - %acc_cst = arith.constant dense<[[-44, 20, 44, -46, -8, 25, -34, 26], - [-20, -36, -3, 39, -48, -31, -25, -21], - [-35, -27, -36, -31, 23, -34, -8, -33], - [-20, 17, -32, -47, 37, 22, -7, -21], - [ -7, -35, 20, -4, 39, 46, -23, 40], - [ 40, 27, 37, 43, 38, -6, 37, 49], - [-17, -50, -1, 48, -13, 22, 39, 33], - [-35, -24, 37, -32, 33, 30, -11, -17]]> : vector<8x8xi32> - %acc_m = memref.alloca() : memref<8x8xi32> - vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<8x8xi32>, memref<8x8xi32> - - %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<8x8xi32> into memref<64xi32> - %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<64xi32>, vector<[32]xi32> - %acc = vector.shape_cast %acc_flat : vector<[32]xi32> to vector<8x[4]xi32> - - vector.print str "ACC:\n" - %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<8x[4]xi32> - %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<8x[4]xi32> - %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<8x[4]xi32> - %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<8x[4]xi32> - %acc4 = vector.extract %acc[4] : vector<[4]xi32> from vector<8x[4]xi32> - %acc5 = vector.extract %acc[5] : vector<[4]xi32> from vector<8x[4]xi32> - %acc6 = vector.extract %acc[6] : vector<[4]xi32> from vector<8x[4]xi32> - %acc7 = vector.extract %acc[7] : vector<[4]xi32> from vector<8x[4]xi32> - vector.print %acc0 : vector<[4]xi32> - vector.print %acc1 : vector<[4]xi32> - vector.print %acc2 : vector<[4]xi32> - vector.print %acc3 : vector<[4]xi32> - vector.print %acc4 : vector<[4]xi32> - vector.print %acc5 : vector<[4]xi32> - vector.print %acc6 : vector<[4]xi32> - vector.print %acc7 : vector<[4]xi32> - - // LHS test data - %lhs_cst = arith.constant dense<[[-28, 31, 3, -44, -15, -27, 22, 35], - [-23, 39, 48, 26, -23, 32, -39, -38], - [ -3, 9, 43, -30, -32, 39, 41, -39], - [-13, -21, -25, 27, 47, -36, -11, -11], - [ -4, -20, 36, 11, 13, -23, 24, -13], - [-20, 30, -5, 1, 42, -37, -22, 35], - [-22, 38, -4, 44, 25, -31, 23, -39], - [-45, -4, -31, -24, 14, -41, -47, 22]]> : vector<8x8xi8> - - %lhs_m = memref.alloca() : memref<8x8xi8> - vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<8x8xi8>, memref<8x8xi8> - %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<8x8xi8>, vector<8x8xi8> - - vector.print str "LHS:\n" - %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<8x8xi8> - %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<8x8xi8> - %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<8x8xi8> - %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<8x8xi8> - %lhs4 = vector.extract %lhs[4] : vector<8xi8> from vector<8x8xi8> - %lhs5 = vector.extract %lhs[5] : vector<8xi8> from vector<8x8xi8> - %lhs6 = vector.extract %lhs[6] : vector<8xi8> from vector<8x8xi8> - %lhs7 = vector.extract %lhs[7] : vector<8xi8> from vector<8x8xi8> - vector.print %lhs0 : vector<8xi8> - vector.print %lhs1 : vector<8xi8> - vector.print %lhs2 : vector<8xi8> - vector.print %lhs3 : vector<8xi8> - vector.print %lhs4 : vector<8xi8> - vector.print %lhs5 : vector<8xi8> - vector.print %lhs6 : vector<8xi8> - vector.print %lhs7 : vector<8xi8> - - // RHS test data - %rhs_cst = arith.constant dense<[[-40, -11, -36, 36, -1, 20, 14, -32], - [ 46, -45, -48, -46, -24, 31, -36, 22], - [ 2, 36, 45, -29, -37, -49, -20, -35], - [ -6, 23, 23, 15, 20, 4, -8, -2], - [-35, -6, 16, 49, -50, 9, -44, 13], - [ 24, 1, -4, -44, 41, 15, -43, 44], - [ 44, 0, -10, 41, 22, 44, -40, 0], - [-33, 19, 27, 22, 38, -17, 23, -9]]> : vector<8x8xi8> - - %rhs_m = memref.alloca() : memref<8x8xi8> - vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<8x8xi8>, memref<8x8xi8> - - %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<8x8xi8> into memref<64xi8> - %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<64xi8>, vector<[32]xi8> - - vector.print str "RHS:\n" - %rhs0 = vector.scalable.extract %rhs_flat[ 0] : vector<[16]xi8> from vector<[32]xi8> - %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> - vector.print %rhs0 : vector<[16]xi8> - vector.print %rhs1 : vector<[16]xi8> - - %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - -// CHECK-IR-COUNT-8: arm_sve.intr.smmla - - // Matrix multiplication - %0 = arith.extsi %lhs : vector<8x8xi8> to vector<8x8xi32> - %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> - %2 = vector.contract {indexing_maps = #packed_maps, - iterator_types = ["parallel", "parallel", "reduction"], - kind = #vector.kind} %0, %1, %acc - : vector<8x8xi32>, vector<[4]x8xi32> into vector<8x[4]xi32> - - // Display the result of the multilication - vector.print str "Result:\n" - %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<8x[4]xi32> - %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<8x[4]xi32> - %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<8x[4]xi32> - %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<8x[4]xi32> - %u4 = vector.extract %2[4] : vector<[4]xi32> from vector<8x[4]xi32> - %u5 = vector.extract %2[5] : vector<[4]xi32> from vector<8x[4]xi32> - %u6 = vector.extract %2[6] : vector<[4]xi32> from vector<8x[4]xi32> - %u7 = vector.extract %2[7] : vector<[4]xi32> from vector<8x[4]xi32> - vector.print %u0 : vector<[4]xi32> - vector.print %u1 : vector<[4]xi32> - vector.print %u2 : vector<[4]xi32> - vector.print %u3 : vector<[4]xi32> - vector.print %u4 : vector<[4]xi32> - vector.print %u5 : vector<[4]xi32> - vector.print %u6 : vector<[4]xi32> - vector.print %u7 : vector<[4]xi32> - -// CHECK: ( -2294, -1282, 2728, -410, -1328, 882, -5498, 732 ) -// CHECK: ( 1012, -4237, 4154, 2624, 5225, -2338, 2011, 1374 ) -// CHECK: ( -8, -1611, 2905, -1, -1068, -3155, -2428, 153 ) -// CHECK: ( 2034, -1768, -2092, 284, -792, -23, 668, 2172 ) -// CHECK: ( -248, -3728, 1214, 555, -668, -2114, -1794, 2560 ) -// CHECK: ( -1484, -2642, 297, 1551, -483, 3173, -576, 2570 ) -// CHECK: ( 3098, -7851, 1366, 1892, -427, -4533, -819, 4698 ) -// CHECK: ( -135, 1247, 765, -479, 1245, 3074, -2281, -23 ) - return -} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir deleted file mode 100644 index e5621dc6171d6..0000000000000 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-summla-4x8x4.mlir +++ /dev/null @@ -1,120 +0,0 @@ -// REQUIRES: arm-emulator - -// DEFINE: %{compile} = mlir-opt %s \ -// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ -// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ -// DEFINE: -o %t - -// DEFINE: %{entry_point} = main - -// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ -// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils - -// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s - -#packed_maps = [ - affine_map<(d0, d1, d2) -> (d0, d2)>, - affine_map<(d0, d1, d2) -> (d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> -] - -func.func private @setArmVLBits(%bits : i32) - -func.func @main() { - %c128 = arith.constant 128 : i32 - func.call @setArmVLBits(%c128) : (i32) -> () - - %c0 = arith.constant 0 : index - %c0_i32 = arith.constant 0 : i32 - %c0_i8 = arith.constant 0 : i8 - - // Accumulator test data - %acc_cst = arith.constant dense<[[-44, 20, 44, -46], - [ -8, 25, -34, 26], - [-20, -36, -3, 39], - [-48, -31, -25, -21]]> : vector<4x4xi32> - %acc_m = memref.alloca() : memref<4x4xi32> - vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> - - %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> - %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> - %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> - - vector.print str "ACC:\n" - %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> - %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> - %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> - %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %acc0 : vector<[4]xi32> - vector.print %acc1 : vector<[4]xi32> - vector.print %acc2 : vector<[4]xi32> - vector.print %acc3 : vector<[4]xi32> - - // LHS test data - %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], - [-20, 17, -32, -47, 37, 22, -7, -21], - [ -7, -35, 20, -4, 39, 46, -23, 40], - [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> - - %lhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> - - vector.print str "LHS:\n" - %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> - %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> - %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> - %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> - vector.print %lhs0 : vector<8xi8> - vector.print %lhs1 : vector<8xi8> - vector.print %lhs2 : vector<8xi8> - vector.print %lhs3 : vector<8xi8> - - // RHS test data - %rhs_cst = arith.constant dense<[[125, 171, 138, 187, 108, 175, 82, 99], - [221, 25, 164, 97, 156, 221, 218, 177], - [171, 160, 219, 191, 144, 45, 161, 210], - [223, 165, 123, 99, 108, 86, 37, 92]]> : vector<4x8xi8> - - %rhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - - %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> - %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> - - vector.print str "RHS:\n" - %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> - %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> - vector.print %rhs0 : vector<[16]xi8> - vector.print %rhs1 : vector<[16]xi8> - - %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - -// CHECK-IR-COUNT-4: arm_sve.intr.usmmla - - // Matrix multiplication - %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> - %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> - %2 = vector.contract {indexing_maps = #packed_maps, - iterator_types = ["parallel", "parallel", "reduction"], - kind = #vector.kind} %0, %1, %acc - : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> - - // Display the result of the multiplication - vector.print str "Result:\n" - %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> - %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> - %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> - %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %u0 : vector<[4]xi32> - vector.print %u1 : vector<[4]xi32> - vector.print %u2 : vector<[4]xi32> - vector.print %u3 : vector<[4]xi32> - -// CHECK: ( -27190, -28812, -30502, -23575 ) -// CHECK: ( -7613, -8386, -15938, -6521 ) -// CHECK: ( 9468, 18750, 9199, 5764 ) -// CHECK: ( 33655, 41064, 48900, 31627 ) - return -} - diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir deleted file mode 100644 index 60b3271a77b70..0000000000000 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-ummla-4x8x4.mlir +++ /dev/null @@ -1,120 +0,0 @@ -// REQUIRES: arm-emulator - -// DEFINE: %{compile} = mlir-opt %s \ -// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ -// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ -// DEFINE: -o %t - -// DEFINE: %{entry_point} = main - -// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ -// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils - -// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s - -#packed_maps = [ - affine_map<(d0, d1, d2) -> (d0, d2)>, - affine_map<(d0, d1, d2) -> (d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> -] - -func.func private @setArmVLBits(%bits : i32) - -func.func @main() { - - %c128 = arith.constant 128 : i32 - func.call @setArmVLBits(%c128) : (i32) -> () - - %c0 = arith.constant 0 : index - %c0_i32 = arith.constant 0 : i32 - %c0_i8 = arith.constant 0 : i8 - - // Accumulator test data - %acc_cst = arith.constant dense<[[16, 16, 48, 40], - [40, 24, 35, 12], - [33, 24, 29, 19], - [28, 13, 33, 18]]> : vector<4x4xi32> - %acc_m = memref.alloca() : memref<4x4xi32> - vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> - - %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> - %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> - %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> - - vector.print str "ACC:\n" - %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> - %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> - %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> - %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %acc0 : vector<[4]xi32> - vector.print %acc1 : vector<[4]xi32> - vector.print %acc2 : vector<[4]xi32> - vector.print %acc3 : vector<[4]xi32> - - // LHS test data - %lhs_cst = arith.constant dense<[[35, 42, 37, 49, 36, 36, 23, 33], - [39, 34, 33, 45, 43, 10, 44, 47], - [18, 35, 29, 25, 36, 33, 28, 29], - [26, 49, 43, 32, 27, 16, 45, 33]]> : vector<4x8xi8> - - %lhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> - - vector.print str "LHS:\n" - %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> - %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> - %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> - %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> - vector.print %lhs0 : vector<8xi8> - vector.print %lhs1 : vector<8xi8> - vector.print %lhs2 : vector<8xi8> - vector.print %lhs3 : vector<8xi8> - - // RHS test data - %rhs_cst = arith.constant dense<[[18, 31, 37, 35, 44, 22, 37, 28], - [21, 22, 49, 39, 30, 28, 35, 37], - [21, 47, 39, 35, 23, 43, 24, 49], - [49, 49, 40, 32, 37, 20, 47, 40]]> : vector<4x8xi8> - - %rhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - - %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> - %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> - - vector.print str "RHS:\n" - %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> - %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> - vector.print %rhs0 : vector<[16]xi8> - vector.print %rhs1 : vector<[16]xi8> - - %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - -// CHECK-IR-COUNT-4: arm_sve.intr.ummla - - // Matrix multiplication - %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> - %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> - %2 = vector.contract {indexing_maps = #packed_maps, - iterator_types = ["parallel", "parallel", "reduction"], - kind = #vector.kind} %0, %1, %acc - : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> - - // Display the result of the multiplication - vector.print str "Result:\n" - %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> - %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> - %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> - %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %u0 : vector<[4]xi32> - vector.print %u1 : vector<[4]xi32> - vector.print %u2 : vector<[4]xi32> - vector.print %u3 : vector<[4]xi32> - -// CHECK: ( 9183, 9513, 10460, 11314 ) -// CHECK: ( 9648, 9812, 10092, 12088 ) -// CHECK: ( 7548, 7625, 8398, 9044 ) -// CHECK: ( 8855, 9046, 9685, 11191 ) - return -} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir deleted file mode 100644 index 319539b5cfc54..0000000000000 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction-usmmla-4x8x4.mlir +++ /dev/null @@ -1,119 +0,0 @@ -// REQUIRES: arm-emulator - -// DEFINE: %{compile} = mlir-opt %s \ -// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ -// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm --reconcile-unrealized-casts \ -// DEFINE: -o %t - -// DEFINE: %{entry_point} = main - -// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ -// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils - -// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s - -#packed_maps = [ - affine_map<(d0, d1, d2) -> (d0, d2)>, - affine_map<(d0, d1, d2) -> (d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> -] - -func.func private @setArmVLBits(%bits : i32) - -func.func @main() { - %c128 = arith.constant 128 : i32 - func.call @setArmVLBits(%c128) : (i32) -> () - - %c0 = arith.constant 0 : index - %c0_i32 = arith.constant 0 : i32 - %c0_i8 = arith.constant 0 : i8 - - // Accumulator test data - %acc_cst = arith.constant dense<[[-44, 20, 44, -46], - [ -8, 25, -34, 26], - [-20, -36, -3, 39], - [-48, -31, -25, -21]]> : vector<4x4xi32> - %acc_m = memref.alloca() : memref<4x4xi32> - vector.transfer_write %acc_cst, %acc_m[%c0, %c0] : vector<4x4xi32>, memref<4x4xi32> - - %acc_m1 = memref.collapse_shape %acc_m [[0, 1]] : memref<4x4xi32> into memref<16xi32> - %acc_flat = vector.transfer_read %acc_m1[%c0], %c0_i32 {in_bounds = [true]} : memref<16xi32>, vector<[16]xi32> - %acc = vector.shape_cast %acc_flat : vector<[16]xi32> to vector<4x[4]xi32> - - vector.print str "ACC:\n" - %acc0 = vector.extract %acc[0] : vector<[4]xi32> from vector<4x[4]xi32> - %acc1 = vector.extract %acc[1] : vector<[4]xi32> from vector<4x[4]xi32> - %acc2 = vector.extract %acc[2] : vector<[4]xi32> from vector<4x[4]xi32> - %acc3 = vector.extract %acc[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %acc0 : vector<[4]xi32> - vector.print %acc1 : vector<[4]xi32> - vector.print %acc2 : vector<[4]xi32> - vector.print %acc3 : vector<[4]xi32> - - // LHS test data - %lhs_cst = arith.constant dense<[[153, 161, 24, 157, 211, 154, 52, 27], - [168, 77, 136, 124, 249, 28, 13, 122], - [ 97, 82, 181, 39, 53, 25, 80, 240], - [184, 227, 106, 165, 126, 113, 121, 228]]> : vector<4x8xi8> - - %lhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %lhs_cst, %lhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - %lhs = vector.transfer_read %lhs_m[%c0, %c0], %c0_i8 : memref<4x8xi8>, vector<4x8xi8> - - vector.print str "LHS:\n" - %lhs0 = vector.extract %lhs[0] : vector<8xi8> from vector<4x8xi8> - %lhs1 = vector.extract %lhs[1] : vector<8xi8> from vector<4x8xi8> - %lhs2 = vector.extract %lhs[2] : vector<8xi8> from vector<4x8xi8> - %lhs3 = vector.extract %lhs[3] : vector<8xi8> from vector<4x8xi8> - vector.print %lhs0 : vector<8xi8> - vector.print %lhs1 : vector<8xi8> - vector.print %lhs2 : vector<8xi8> - vector.print %lhs3 : vector<8xi8> - - // RHS test data - %rhs_cst = arith.constant dense<[[ 40, 27, 37, 43, 38, -6, 37, 49], - [-17, -50, -1, 48, -13, 22, 39, 33], - [-35, -24, 37, -32, 33, 30, -11, -17], - [-28, 31, 3, -44, -15, -27, 22, 35]]> : vector<4x8xi8> - - %rhs_m = memref.alloca() : memref<4x8xi8> - vector.transfer_write %rhs_cst, %rhs_m[%c0, %c0] : vector<4x8xi8>, memref<4x8xi8> - - %rhs_m1 = memref.collapse_shape %rhs_m [[0, 1]] : memref<4x8xi8> into memref<32xi8> - %rhs_flat = vector.transfer_read %rhs_m1[%c0], %c0_i8 {in_bounds = [true]} : memref<32xi8>, vector<[32]xi8> - - vector.print str "RHS:\n" - %rhs0 = vector.scalable.extract %rhs_flat[0] : vector<[16]xi8> from vector<[32]xi8> - %rhs1 = vector.scalable.extract %rhs_flat[16] : vector<[16]xi8> from vector<[32]xi8> - vector.print %rhs0 : vector<[16]xi8> - vector.print %rhs1 : vector<[16]xi8> - - %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - -// CHECK-IR-COUNT-4: arm_sve.intr.usmmla - - // Matrix multiplication - %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> - %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> - %2 = vector.contract {indexing_maps = #packed_maps, - iterator_types = ["parallel", "parallel", "reduction"], - kind = #vector.kind} %0, %1, %acc - : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> - - // Display the result of the multiplication - vector.print str "Result:\n" - %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> - %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> - %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> - %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> - vector.print %u0 : vector<[4]xi32> - vector.print %u1 : vector<[4]xi32> - vector.print %u2 : vector<[4]xi32> - vector.print %u3 : vector<[4]xi32> - -// CHECK: ( 28403, 445, -2759, -11409 ) -// CHECK: ( 34908, 1047, 142, -7274 ) -// CHECK: ( 31032, 6807, -2378, 7382 ) -// CHECK: ( 44217, 6396, -10930, 623 ) - return -} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir new file mode 100644 index 0000000000000..d70a9709aefa4 --- /dev/null +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir @@ -0,0 +1,375 @@ +// REQUIRES: arm-emulator + +// DEFINE: %{compile} = mlir-opt %s \ +// DEFINE: --convert-vector-to-scf --convert-scf-to-cf --convert-vector-to-llvm='enable-arm-sve enable-arm-i8mm' \ +// DEFINE: --expand-strided-metadata --convert-to-llvm --finalize-memref-to-llvm \ +// DEFINE: --lower-affine --convert-arith-to-llvm --reconcile-unrealized-casts \ +// DEFINE: -o %t + +// DEFINE: %{entry_point} = main + +// DEFINE: %{run} = %mcr_aarch64_cmd %t -e %{entry_point} -entry-point-result=void --march=aarch64 --mattr="+sve,+i8mm" \ +// DEFINE: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils,%native_mlir_arm_runner_utils + +// RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s + +#packed_maps = [ + affine_map<(d0, d1, d2) -> (d0, d2)>, + affine_map<(d0, d1, d2) -> (d1, d2)>, + affine_map<(d0, d1, d2) -> (d0, d1)> +] + +func.func private @setArmVLBits(%bits : i32) +func.func private @printMemrefI32(%ptr : memref<*xi32>) + +func.func private @prepareAccTestData(%in: vector<4x4xi32>) -> memref<4x?xi32> { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c4 = arith.constant 4 : index + %c0_i32 = arith.constant 0 : i32 + + %vs = vector.vscale + %d = arith.muli %c4, %vs : index + %mem = memref.alloc(%d) : memref<4x?xi32> + + scf.for %j = %c0 to %d step %c4 { + vector.transfer_write %in, %mem[%c0, %j] {in_bounds = [true, true]} : vector<4x4xi32>, memref<4x?xi32> + } + + return %mem : memref<4x?xi32> +} + +func.func private @prepareLHSTestData(%in: vector<4x8xi8>) -> memref<4x8xi8> { + %c0 = arith.constant 0 : index + %c0_i8 = arith.constant 0 : i8 + + %mem = memref.alloc() : memref<4x8xi8> + vector.transfer_write %in, %mem[%c0, %c0] {in_bounds = [true, true]} : vector<4x8xi8>, memref<4x8xi8> + + return %mem : memref<4x8xi8> +} + +func.func private @prepareRHSTestData(%in: vector<4x8xi8>) -> memref { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c4 = arith.constant 4 : index + %c0_i8 = arith.constant 0 : i8 + + %vs = vector.vscale + %d = arith.muli %c4, %vs : index + %mem = memref.alloc(%d) : memref + + scf.for %i = %c0 to %d step %c4 { + vector.transfer_write %in, %mem[%i, %c0] {in_bounds = [true, true]} : vector<4x8xi8>, memref + } + + %mem_out = memref.collapse_shape %mem [[0, 1]] : memref into memref + return %mem_out : memref +} + +// CHECK-IR-LABEL: llvm.func @test_smmla +// CHECK-IR-COUNT-4: arm_sve.intr.smmla +func.func @test_smmla() { + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + + // Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46], + [ -8, 25, -34, 26], + [-20, -36, -3, 39], + [-48, -31, -25, -21]]> : vector<4x4xi32> + + %acc_mem = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> memref<4x?xi32> + %acc = vector.transfer_read %acc_mem[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4x?xi32>, vector<4x[4]xi32> + + // Workaround for a crash, see https://github.com/llvm/llvm-project/issues/143670 + %acc_cast = memref.cast %acc_mem : memref<4x?xi32> to memref<*xi32> + call @printMemrefI32(%acc_cast) : (memref<*xi32>) -> () + + // LHS test data + %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], + [-20, 17, -32, -47, 37, 22, -7, -21], + [ -7, -35, 20, -4, 39, 46, -23, 40], + [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> + + %lhs_mem = func.call @prepareLHSTestData(%lhs_cst) : (vector<4x8xi8>) -> memref<4x8xi8> + %lhs = vector.transfer_read %lhs_mem[%c0, %c0], %c0_i8 {in_bounds = [true, true]} : memref<4x8xi8>, vector<4x8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[-17, -50, -1, 48, -13, 22, 39, 33], + [-35, -24, 37, -32, 33, 30, -11, -17], + [-28, 31, 3, -44, -15, -27, 22, 35], + [-23, 39, 48, 26, -23, 32, -39, -38]]> : vector<4x8xi8> + + %rhs_mem = func.call @prepareRHSTestData(%rhs_cst) : (vector<4x8xi8>) -> memref + %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result(SMMLA):\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + + return +} + +// CHECK-IR-LABEL: llvm.func @test_ummla +// CHECK-IR-COUNT-4: arm_sve.intr.ummla +func.func @test_ummla() { + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + + // Accumulator test data + %acc_cst = arith.constant dense<[[16, 16, 48, 40], + [40, 24, 35, 12], + [33, 24, 29, 19], + [28, 13, 33, 18]]> : vector<4x4xi32> + + %acc_mem = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> memref<4x?xi32> + %acc = vector.transfer_read %acc_mem[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4x?xi32>, vector<4x[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[35, 42, 37, 49, 36, 36, 23, 33], + [39, 34, 33, 45, 43, 10, 44, 47], + [18, 35, 29, 25, 36, 33, 28, 29], + [26, 49, 43, 32, 27, 16, 45, 33]]> : vector<4x8xi8> + + %lhs_mem = func.call @prepareLHSTestData(%lhs_cst) : (vector<4x8xi8>) -> memref<4x8xi8> + %lhs = vector.transfer_read %lhs_mem[%c0, %c0], %c0_i8 {in_bounds = [true, true]} : memref<4x8xi8>, vector<4x8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[18, 31, 37, 35, 44, 22, 37, 28], + [21, 22, 49, 39, 30, 28, 35, 37], + [21, 47, 39, 35, 23, 43, 24, 49], + [49, 49, 40, 32, 37, 20, 47, 40]]> : vector<4x8xi8> + + %rhs_mem = func.call @prepareRHSTestData(%rhs_cst) : (vector<4x8xi8>) -> memref + %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result(UMMLA):\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + + return +} + +// CHECK-IR-LABEL: llvm.func @test_usmmla +// CHECK-IR-COUNT-4: arm_sve.intr.usmmla +func.func @test_usmmla() { + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + + // Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46], + [ -8, 25, -34, 26], + [-20, -36, -3, 39], + [-48, -31, -25, -21]]> : vector<4x4xi32> + + %acc_mem = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> memref<4x?xi32> + %acc = vector.transfer_read %acc_mem[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4x?xi32>, vector<4x[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[153, 161, 24, 157, 211, 154, 52, 27], + [168, 77, 136, 124, 249, 28, 13, 122], + [ 97, 82, 181, 39, 53, 25, 80, 240], + [184, 227, 106, 165, 126, 113, 121, 228]]> : vector<4x8xi8> + + %lhs_mem = func.call @prepareLHSTestData(%lhs_cst) : (vector<4x8xi8>) -> memref<4x8xi8> + %lhs = vector.transfer_read %lhs_mem[%c0, %c0], %c0_i8 {in_bounds = [true, true]} : memref<4x8xi8>, vector<4x8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[ 40, 27, 37, 43, 38, -6, 37, 49], + [-17, -50, -1, 48, -13, 22, 39, 33], + [-35, -24, 37, -32, 33, 30, -11, -17], + [-28, 31, 3, -44, -15, -27, 22, 35]]> : vector<4x8xi8> + + %rhs_mem = func.call @prepareRHSTestData(%rhs_cst) : (vector<4x8xi8>) -> memref + %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result(USMMLA):\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + + return +} + +// CHECK-IR-LABEL: llvm.func @test_summla +// CHECK-IR-COUNT-4: arm_sve.intr.usmmla +func.func @test_summla() { + + %c0 = arith.constant 0 : index + %c0_i32 = arith.constant 0 : i32 + %c0_i8 = arith.constant 0 : i8 + + // Accumulator test data + %acc_cst = arith.constant dense<[[-44, 20, 44, -46], + [ -8, 25, -34, 26], + [-20, -36, -3, 39], + [-48, -31, -25, -21]]> : vector<4x4xi32> + + %acc_mem = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> memref<4x?xi32> + %acc = vector.transfer_read %acc_mem[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4x?xi32>, vector<4x[4]xi32> + + // LHS test data + %lhs_cst = arith.constant dense<[[-35, -27, -36, -31, 23, -34, -8, -33], + [-20, 17, -32, -47, 37, 22, -7, -21], + [ -7, -35, 20, -4, 39, 46, -23, 40], + [ 40, 27, 37, 43, 38, -6, 37, 49]]> : vector<4x8xi8> + + %lhs_mem = func.call @prepareLHSTestData(%lhs_cst) : (vector<4x8xi8>) -> memref<4x8xi8> + %lhs = vector.transfer_read %lhs_mem[%c0, %c0], %c0_i8 {in_bounds = [true, true]} : memref<4x8xi8>, vector<4x8xi8> + + // RHS test data + %rhs_cst = arith.constant dense<[[125, 171, 138, 187, 108, 175, 82, 99], + [221, 25, 164, 97, 156, 221, 218, 177], + [171, 160, 219, 191, 144, 45, 161, 210], + [223, 165, 123, 99, 108, 86, 37, 92]]> : vector<4x8xi8> + + %rhs_mem = func.call @prepareRHSTestData(%rhs_cst) : (vector<4x8xi8>) -> memref + %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> + %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> + + // Matrix multiplication + %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> + %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> + %2 = vector.contract {indexing_maps = #packed_maps, + iterator_types = ["parallel", "parallel", "reduction"], + kind = #vector.kind} %0, %1, %acc + : vector<4x8xi32>, vector<[4]x8xi32> into vector<4x[4]xi32> + + // Display the result of the multiplication + vector.print str "Result(SUMMLA (i.e. USMMLA transposed)):\n" + %u0 = vector.extract %2[0] : vector<[4]xi32> from vector<4x[4]xi32> + %u1 = vector.extract %2[1] : vector<[4]xi32> from vector<4x[4]xi32> + %u2 = vector.extract %2[2] : vector<[4]xi32> from vector<4x[4]xi32> + %u3 = vector.extract %2[3] : vector<[4]xi32> from vector<4x[4]xi32> + vector.print %u0 : vector<[4]xi32> + vector.print %u1 : vector<[4]xi32> + vector.print %u2 : vector<[4]xi32> + vector.print %u3 : vector<[4]xi32> + + return +} + +func.func @main() { + %c128 = arith.constant 128 : i32 + %c256 = arith.constant 256 : i32 + +// CHECK-LABEL: Result(SMMLA): +// CHECK: ( -1999, 1941, 685, -2879 ) +// CHECK: ( -3705, 2952, 987, -685 ) +// CHECK: ( 2565, 4157, -1589, -357 ) +// CHECK: ( 2383, -2252, 32, -1365 ) + func.call @setArmVLBits(%c128) : (i32) -> () + func.call @test_smmla() : () -> () + +// CHECK: Result(SMMLA): +// CHECK: ( -1999, 1941, 685, -2879, -1999, 1941, 685, -2879 ) +// CHECK: ( -3705, 2952, 987, -685, -3705, 2952, 987, -685 ) +// CHECK: ( 2565, 4157, -1589, -357, 2565, 4157, -1589, -357 ) +// CHECK: ( 2383, -2252, 32, -1365, 2383, -2252, 32, -1365 ) + func.call @setArmVLBits(%c256) : (i32) -> () + func.call @test_smmla() : () -> () + +// CHECK-LABEL: Result(UMMLA): +// CHECK: ( 9183, 9513, 10460, 11314 ) +// CHECK: ( 9648, 9812, 10092, 12088 ) +// CHECK: ( 7548, 7625, 8398, 9044 ) +// CHECK: ( 8855, 9046, 9685, 11191 ) + func.call @setArmVLBits(%c128) : (i32) -> () + func.call @test_ummla() : () -> () + +// CHECK: Result(UMMLA): +// CHECK: ( 9183, 9513, 10460, 11314, 9183, 9513, 10460, 11314 ) +// CHECK: ( 9648, 9812, 10092, 12088, 9648, 9812, 10092, 12088 ) +// CHECK: ( 7548, 7625, 8398, 9044, 7548, 7625, 8398, 9044 ) +// CHECK: ( 8855, 9046, 9685, 11191, 8855, 9046, 9685, 11191 ) + func.call @setArmVLBits(%c256) : (i32) -> () + func.call @test_ummla() : () -> () + +// CHECK-LABEL: Result(USMMLA): +// CHECK: ( 28403, 445, -2759, -11409 ) +// CHECK: ( 34908, 1047, 142, -7274 ) +// CHECK: ( 31032, 6807, -2378, 7382 ) +// CHECK: ( 44217, 6396, -10930, 623 ) + func.call @setArmVLBits(%c128) : (i32) -> () + func.call @test_usmmla() : () -> () + +// CHECK: Result(USMMLA): +// CHECK: ( 28403, 445, -2759, -11409, 28403, 445, -2759, -11409 ) +// CHECK: ( 34908, 1047, 142, -7274, 34908, 1047, 142, -7274 ) +// CHECK: ( 31032, 6807, -2378, 7382, 31032, 6807, -2378, 7382 ) +// CHECK: ( 44217, 6396, -10930, 623, 44217, 6396, -10930, 623 ) + func.call @setArmVLBits(%c256) : (i32) -> () + func.call @test_usmmla() : () -> () + +// CHECK-LABEL: Result(SUMMLA (i.e. USMMLA transposed)): +// CHECK: ( -27190, -28812, -30502, -23575 ) +// CHECK: ( -7613, -8386, -15938, -6521 ) +// CHECK: ( 9468, 18750, 9199, 5764 ) +// CHECK: ( 33655, 41064, 48900, 31627 ) + func.call @setArmVLBits(%c128) : (i32) -> () + func.call @test_summla() : () -> () + +// CHECK: Result(SUMMLA (i.e. USMMLA transposed)): +// CHECK: ( -27190, -28812, -30502, -23575, -27190, -28812, -30502, -23575 ) +// CHECK: ( -7613, -8386, -15938, -6521, -7613, -8386, -15938, -6521 ) +// CHECK: ( 9468, 18750, 9199, 5764, 9468, 18750, 9199, 5764 ) +// CHECK: ( 33655, 41064, 48900, 31627, 33655, 41064, 48900, 31627 ) + func.call @setArmVLBits(%c256) : (i32) -> () + func.call @test_summla() : () -> () + + return +} From a4564e109e59a5cb577a5f745aafcef7dbbae24f Mon Sep 17 00:00:00 2001 From: Momchil Velikov Date: Fri, 13 Jun 2025 12:31:00 +0000 Subject: [PATCH 4/5] [fixup] More commenting --- .../CPU/ArmSVE/vector-contract-i8mm.mlir | 92 ++++++++++++++++++- 1 file changed, 88 insertions(+), 4 deletions(-) diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir index d70a9709aefa4..bf912d7c86fd9 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir @@ -19,9 +19,37 @@ affine_map<(d0, d1, d2) -> (d0, d1)> ] -func.func private @setArmVLBits(%bits : i32) -func.func private @printMemrefI32(%ptr : memref<*xi32>) - +// +// Test the lowering of `vector.contract` using the `LowerContractionToSVEI8MMPattern` +// +// The operation that the `vector.contract` in this test performs is matrix +// multiplication with accumulate +// OUT = ACC + LHS * RHS +// of two 8-bit integer matrices LHS and RHS, and a 32-bit integer matrix ACC +// into a 32-bit integer matrix OUT. The LHS and RHS can be sign- or zero- extended, +// this test covers all the possible variants. +// +// Tested are calculations as well as that the relevant `ArmSVE` dialect +// operations ('arm_sve.smmla`, arm_sve.ummla`, etc) are emitted. +// +// That pattern above handles (therefore this test prepares) input/output vectors with +// specific shapes: +// * LHS: vector +// * RHS: vector<[N]x8xi8> +// * ACC, OUT: vector +// Note that the RHS is transposed. +// See mlir/lib/Dialect/ArmSVE/Transforms/LowerContractionToSVEI8MMPattern.cpp +// for more information and rationale about these shapes. +// +// In this specific test we use M == 4 and N == 4 +// + +// Allocate and initialise a memref containing test data for use as the ACC +// operand. The memref has one dynamic dimension whose extent depends on the +// runtime value of VSCALE. +// +// The input parameter `%in` is a vector that is replicated VSCALE times +// across the columns of the memref. func.func private @prepareAccTestData(%in: vector<4x4xi32>) -> memref<4x?xi32> { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index @@ -39,6 +67,9 @@ func.func private @prepareAccTestData(%in: vector<4x4xi32>) -> memref<4x?xi32> { return %mem : memref<4x?xi32> } +// Allocate and initialise a memref containing test data for use as the LHS +// operand. This function just writes the parameter `%in` into the memref. +// The size of the LHS does not depends on VSCALE. func.func private @prepareLHSTestData(%in: vector<4x8xi8>) -> memref<4x8xi8> { %c0 = arith.constant 0 : index %c0_i8 = arith.constant 0 : i8 @@ -49,6 +80,15 @@ func.func private @prepareLHSTestData(%in: vector<4x8xi8>) -> memref<4x8xi8> { return %mem : memref<4x8xi8> } +// Allocate and initialise a memref containing test data for use as the RHS +// operand. The memref has one dynamic dimension whose extent depends on the +// runtime value of VSCALE. +// +// The input parameter `%in` is a vector that is replicated VSCALE times +// across the rows of the memref. +// +// For convenience, flatten the memref, since the RHS vector is read first as a +// single-dimensional scalable vector and then cast into [N]x8 shape. func.func private @prepareRHSTestData(%in: vector<4x8xi8>) -> memref { %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index @@ -67,6 +107,9 @@ func.func private @prepareRHSTestData(%in: vector<4x8xi8>) -> memref { return %mem_out : memref } +// Test the operation where both LHS and RHS are interpreted as signed, hence +// we ultimately emit and execute the `smmla` instruction. + // CHECK-IR-LABEL: llvm.func @test_smmla // CHECK-IR-COUNT-4: arm_sve.intr.smmla func.func @test_smmla() { @@ -84,7 +127,7 @@ func.func @test_smmla() { %acc_mem = func.call @prepareAccTestData(%acc_cst) : (vector<4x4xi32>) -> memref<4x?xi32> %acc = vector.transfer_read %acc_mem[%c0, %c0], %c0_i32 {in_bounds = [true, true]} : memref<4x?xi32>, vector<4x[4]xi32> - // Workaround for a crash, see https://github.com/llvm/llvm-project/issues/143670 + // FIXME: Workaround for a crash, see https://github.com/llvm/llvm-project/issues/143670 %acc_cast = memref.cast %acc_mem : memref<4x?xi32> to memref<*xi32> call @printMemrefI32(%acc_cast) : (memref<*xi32>) -> () @@ -126,9 +169,17 @@ func.func @test_smmla() { vector.print %u2 : vector<[4]xi32> vector.print %u3 : vector<[4]xi32> + // Deallocate the buffers. + memref.dealloc %acc_mem : memref<4x?xi32> + memref.dealloc %lhs_mem : memref<4x8xi8> + memref.dealloc %rhs_mem : memref + return } +// Test the operation where both LHS and RHS are interpreted as unsigned, hence +// we ultimately emit and execute the `ummla` instruction. + // CHECK-IR-LABEL: llvm.func @test_ummla // CHECK-IR-COUNT-4: arm_sve.intr.ummla func.func @test_ummla() { @@ -184,9 +235,18 @@ func.func @test_ummla() { vector.print %u2 : vector<[4]xi32> vector.print %u3 : vector<[4]xi32> + // Deallocate the buffers. + memref.dealloc %acc_mem : memref<4x?xi32> + memref.dealloc %lhs_mem : memref<4x8xi8> + memref.dealloc %rhs_mem : memref + return } +// Test the operation where LHS is interpreted as unsigned and RHS is +// interpreted as signed, hence we ultimately emit and execute the `usmmla` +// instruction. + // CHECK-IR-LABEL: llvm.func @test_usmmla // CHECK-IR-COUNT-4: arm_sve.intr.usmmla func.func @test_usmmla() { @@ -242,9 +302,19 @@ func.func @test_usmmla() { vector.print %u2 : vector<[4]xi32> vector.print %u3 : vector<[4]xi32> + // Deallocate the buffers. + memref.dealloc %acc_mem : memref<4x?xi32> + memref.dealloc %lhs_mem : memref<4x8xi8> + memref.dealloc %rhs_mem : memref + return } +// Test the operation where LHS is interpreted as signed and RHS is interpreted +// as unsigned. In this test we ultimately emit end execute the `usmmla` +// instruction with reversed operands, see `LowerContractionToSVEI8MMPattern.cpp` +// for more details. + // CHECK-IR-LABEL: llvm.func @test_summla // CHECK-IR-COUNT-4: arm_sve.intr.usmmla func.func @test_summla() { @@ -300,9 +370,20 @@ func.func @test_summla() { vector.print %u2 : vector<[4]xi32> vector.print %u3 : vector<[4]xi32> + // Deallocate the buffers. + memref.dealloc %acc_mem : memref<4x?xi32> + memref.dealloc %lhs_mem : memref<4x8xi8> + memref.dealloc %rhs_mem : memref + return } +// Perform each test with SVE vector lengths 128 bits and 256 bits (i.e. VSCALEs +// 1 and 2, respectively). The vector length is set via the `setArmVLBits` +// function. The effect of setting a different vector length is that the tests +// allocate and operate on different sized buffers (see `prepareTestData` +// functions). + func.func @main() { %c128 = arith.constant 128 : i32 %c256 = arith.constant 256 : i32 @@ -373,3 +454,6 @@ func.func @main() { return } + +func.func private @setArmVLBits(%bits : i32) +func.func private @printMemrefI32(%ptr : memref<*xi32>) From 4de82c1f027aeb843c53b8b638ee249f46a95398 Mon Sep 17 00:00:00 2001 From: Momchil Velikov Date: Mon, 16 Jun 2025 15:00:55 +0000 Subject: [PATCH 5/5] [fixup] Commenting --- .../CPU/ArmSVE/vector-contract-i8mm.mlir | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir index bf912d7c86fd9..5f6e8e4c30892 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/vector-contract-i8mm.mlir @@ -14,9 +14,9 @@ // RUN: rm -f %t && %{compile} && FileCheck %s --input-file=%t -check-prefix CHECK-IR && %{run} | FileCheck %s #packed_maps = [ - affine_map<(d0, d1, d2) -> (d0, d2)>, - affine_map<(d0, d1, d2) -> (d1, d2)>, - affine_map<(d0, d1, d2) -> (d0, d1)> + affine_map<(m, n, k) -> (m, k)>, + affine_map<(m, n, k) -> (n, k)>, + affine_map<(m, n, k) -> (m, n)> ] // @@ -38,6 +38,10 @@ // * RHS: vector<[N]x8xi8> // * ACC, OUT: vector // Note that the RHS is transposed. +// This data layout makes it efficient to load data into SVE +// registers in the layout expected by FEAT_I8MM instructions. +// Such a `vector.contract` is representative of the code we aim to generate +// by scalable vectorisation of `linalg.mmt4d`. // See mlir/lib/Dialect/ArmSVE/Transforms/LowerContractionToSVEI8MMPattern.cpp // for more information and rationale about these shapes. // @@ -150,7 +154,7 @@ func.func @test_smmla() { %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - // Matrix multiplication + // Matrix multiplication and accumulate with transposed RHS. %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> %2 = vector.contract {indexing_maps = #packed_maps, @@ -216,7 +220,7 @@ func.func @test_ummla() { %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - // Matrix multiplication + // Matrix multiplication and accumulate with transposed RHS. %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> %2 = vector.contract {indexing_maps = #packed_maps, @@ -283,7 +287,7 @@ func.func @test_usmmla() { %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - // Matrix multiplication + // Matrix multiplication and accumulate with transposed RHS. %0 = arith.extui %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extsi %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> %2 = vector.contract {indexing_maps = #packed_maps, @@ -351,7 +355,7 @@ func.func @test_summla() { %rhs_flat = vector.transfer_read %rhs_mem[%c0], %c0_i8 {in_bounds = [true]} : memref, vector<[32]xi8> %rhs = vector.shape_cast %rhs_flat : vector<[32]xi8> to vector<[4]x8xi8> - // Matrix multiplication + // Matrix multiplication and accumulate with transposed RHS. %0 = arith.extsi %lhs : vector<4x8xi8> to vector<4x8xi32> %1 = arith.extui %rhs : vector<[4]x8xi8> to vector<[4]x8xi32> %2 = vector.contract {indexing_maps = #packed_maps,