From c695a263d90229fb32a375e19d532a11770de42d Mon Sep 17 00:00:00 2001 From: James Newling Date: Tue, 5 Aug 2025 17:39:33 -0700 Subject: [PATCH] miscellaneous replacements of broadcast with splat (ongoing burndown) --- mlir/test/Dialect/Arith/canonicalize.mlir | 14 ++++----- mlir/test/Dialect/Vector/vector-sink.mlir | 30 +++++++++---------- .../vector-transfer-to-vector-load-store.mlir | 8 ++--- .../Vector/CPU/ArmSME/outerproduct-f32.mlir | 4 +-- .../Vector/CPU/ArmSME/outerproduct-f64.mlir | 4 +-- .../Vector/CPU/ArmSME/transfer-write-2d.mlir | 4 +-- .../Vector/CPU/ArmSVE/contraction.mlir | 2 +- .../CPU/ArmSVE/scalable-interleave.mlir | 4 +-- .../Dialect/Vector/CPU/interleave.mlir | 4 +-- .../Dialect/Vector/CPU/outerproduct-f32.mlir | 6 ++-- .../Dialect/Vector/CPU/outerproduct-i64.mlir | 6 ++-- .../Dialect/Vector/CPU/transfer-read-1d.mlir | 4 +-- .../Dialect/Vector/CPU/transfer-read-2d.mlir | 4 +-- .../Dialect/Vector/CPU/transfer-read-3d.mlir | 2 +- .../Dialect/Vector/CPU/transfer-read.mlir | 2 +- .../Dialect/Vector/CPU/transfer-write.mlir | 8 ++--- 16 files changed, 53 insertions(+), 53 deletions(-) diff --git a/mlir/test/Dialect/Arith/canonicalize.mlir b/mlir/test/Dialect/Arith/canonicalize.mlir index 3d5a46d13e59d..78f67821da138 100644 --- a/mlir/test/Dialect/Arith/canonicalize.mlir +++ b/mlir/test/Dialect/Arith/canonicalize.mlir @@ -654,7 +654,7 @@ func.func @signExtendConstant() -> i16 { // CHECK: return %[[cres]] func.func @signExtendConstantSplat() -> vector<4xi16> { %c-2 = arith.constant -2 : i8 - %splat = vector.splat %c-2 : vector<4xi8> + %splat = vector.broadcast %c-2 : i8 to vector<4xi8> %ext = arith.extsi %splat : vector<4xi8> to vector<4xi16> return %ext : vector<4xi16> } @@ -682,7 +682,7 @@ func.func @unsignedExtendConstant() -> i16 { // CHECK: return %[[cres]] func.func @unsignedExtendConstantSplat() -> vector<4xi16> { %c2 = arith.constant 2 : i8 - %splat = vector.splat %c2 : vector<4xi8> + %splat = vector.broadcast %c2 : i8 to vector<4xi8> %ext = arith.extui %splat : vector<4xi8> to vector<4xi16> return %ext : vector<4xi16> } @@ -866,7 +866,7 @@ func.func @truncExtsiVector(%arg0: vector<2xi32>) -> vector<2xi16> { // CHECK: return %[[cres]] func.func @truncConstantSplat() -> vector<4xi8> { %c-2 = arith.constant -2 : i16 - %splat = vector.splat %c-2 : vector<4xi16> + %splat = vector.broadcast %c-2 : i16 to vector<4xi16> %trunc = arith.trunci %splat : vector<4xi16> to vector<4xi8> return %trunc : vector<4xi8> } @@ -2334,7 +2334,7 @@ func.func @constant_FPtoUI_splat() -> vector<4xi32> { // CHECK: %[[C0:.+]] = arith.constant dense<2> : vector<4xi32> // CHECK: return %[[C0]] %c0 = arith.constant 2.0 : f32 - %splat = vector.splat %c0 : vector<4xf32> + %splat = vector.broadcast %c0 : f32 to vector<4xf32> %res = arith.fptoui %splat : vector<4xf32> to vector<4xi32> return %res : vector<4xi32> } @@ -2374,7 +2374,7 @@ func.func @constant_FPtoSI_splat() -> vector<4xi32> { // CHECK: %[[C0:.+]] = arith.constant dense<-2> : vector<4xi32> // CHECK: return %[[C0]] %c0 = arith.constant -2.0 : f32 - %splat = vector.splat %c0 : vector<4xf32> + %splat = vector.broadcast %c0 : f32 to vector<4xf32> %res = arith.fptosi %splat : vector<4xf32> to vector<4xi32> return %res : vector<4xi32> } @@ -2413,7 +2413,7 @@ func.func @constant_SItoFP_splat() -> vector<4xf32> { // CHECK: %[[C0:.+]] = arith.constant dense<2.000000e+00> : vector<4xf32> // CHECK: return %[[C0]] %c0 = arith.constant 2 : i32 - %splat = vector.splat %c0 : vector<4xi32> + %splat = vector.broadcast %c0 : i32 to vector<4xi32> %res = arith.sitofp %splat : vector<4xi32> to vector<4xf32> return %res : vector<4xf32> } @@ -2442,7 +2442,7 @@ func.func @constant_UItoFP_splat() -> vector<4xf32> { // CHECK: %[[C0:.+]] = arith.constant dense<2.000000e+00> : vector<4xf32> // CHECK: return %[[C0]] %c0 = arith.constant 2 : i32 - %splat = vector.splat %c0 : vector<4xi32> + %splat = vector.broadcast %c0 : i32 to vector<4xi32> %res = arith.uitofp %splat : vector<4xi32> to vector<4xf32> return %res : vector<4xf32> } diff --git a/mlir/test/Dialect/Vector/vector-sink.mlir b/mlir/test/Dialect/Vector/vector-sink.mlir index ef881ba05a416..577b06df42929 100644 --- a/mlir/test/Dialect/Vector/vector-sink.mlir +++ b/mlir/test/Dialect/Vector/vector-sink.mlir @@ -40,7 +40,7 @@ func.func @broadcast_scalar_with_bcast_scalable(%arg1: index, %arg2: index) -> v // CHECK: %[[BCAST:.*]] = vector.broadcast %[[ADD]] : index to vector<1x4xindex> // CHECK: return %[[BCAST]] : vector<1x4xindex> func.func @broadcast_scalar_with_bcast_and_splat(%arg1: index, %arg2: index) -> vector<1x4xindex> { - %0 = vector.splat %arg1 : vector<1x4xindex> + %0 = vector.broadcast %arg1 : index to vector<1x4xindex> %1 = vector.broadcast %arg2 : index to vector<1x4xindex> %2 = arith.addi %0, %1 : vector<1x4xindex> return %2 : vector<1x4xindex> @@ -53,7 +53,7 @@ func.func @broadcast_scalar_with_bcast_and_splat(%arg1: index, %arg2: index) -> // CHECK: %[[BCAST:.*]] = vector.broadcast %[[ADD]] : index to vector<1x[4]xindex> // CHECK: return %[[BCAST]] : vector<1x[4]xindex> func.func @broadcast_scalar_with_bcast_and_splat_scalable(%arg1: index, %arg2: index) -> vector<1x[4]xindex> { - %0 = vector.splat %arg1 : vector<1x[4]xindex> + %0 = vector.broadcast %arg1 : index to vector<1x[4]xindex> %1 = vector.broadcast %arg2 : index to vector<1x[4]xindex> %2 = arith.addi %0, %1 : vector<1x[4]xindex> return %2 : vector<1x[4]xindex> @@ -94,12 +94,12 @@ func.func @broadcast_vector_scalable(%arg1: vector<[4]xf32>, %arg2: vector<[4]xf // CHECK-LABEL: func.func @broadcast_scalar_and_vec( // CHECK-SAME: %[[ARG1:.*]]: index, // CHECK-SAME: %[[ARG2:.*]]: vector<4xindex>) -> vector<1x4xindex> { -// CHECK: %[[SPLAT:.*]] = vector.splat %[[ARG1]] : vector<1x4xindex> +// CHECK: %[[SPLAT:.*]] = vector.broadcast %[[ARG1]] : index to vector<1x4xindex> // CHECK: %[[BCAST:.*]] = vector.broadcast %[[ARG2]] : vector<4xindex> to vector<1x4xindex> // CHECK: %[[ADD:.*]] = arith.addi %[[SPLAT]], %[[BCAST]] : vector<1x4xindex> // CHECK: return %[[ADD]] : vector<1x4xindex> func.func @broadcast_scalar_and_vec(%arg1: index, %arg2: vector<4xindex>) -> vector<1x4xindex> { - %0 = vector.splat %arg1 : vector<1x4xindex> + %0 = vector.broadcast %arg1 : index to vector<1x4xindex> %1 = vector.broadcast %arg2 : vector<4xindex> to vector<1x4xindex> %2 = arith.addi %0, %1 : vector<1x4xindex> return %2 : vector<1x4xindex> @@ -108,12 +108,12 @@ func.func @broadcast_scalar_and_vec(%arg1: index, %arg2: vector<4xindex>) -> vec // CHECK-LABEL: func.func @broadcast_scalar_and_vec_scalable( // CHECK-SAME: %[[ARG1:.*]]: index, // CHECK-SAME: %[[ARG2:.*]]: vector<[4]xindex>) -> vector<1x[4]xindex> { -// CHECK: %[[SPLAT:.*]] = vector.splat %[[ARG1]] : vector<1x[4]xindex> +// CHECK: %[[SPLAT:.*]] = vector.broadcast %[[ARG1]] : index to vector<1x[4]xindex> // CHECK: %[[BCAST:.*]] = vector.broadcast %[[ARG2]] : vector<[4]xindex> to vector<1x[4]xindex> // CHECK: %[[ADD:.*]] = arith.addi %[[SPLAT]], %[[BCAST]] : vector<1x[4]xindex> // CHECK: return %[[ADD]] : vector<1x[4]xindex> func.func @broadcast_scalar_and_vec_scalable(%arg1: index, %arg2: vector<[4]xindex>) -> vector<1x[4]xindex> { - %0 = vector.splat %arg1 : vector<1x[4]xindex> + %0 = vector.broadcast %arg1 : index to vector<1x[4]xindex> %1 = vector.broadcast %arg2 : vector<[4]xindex> to vector<1x[4]xindex> %2 = arith.addi %0, %1 : vector<1x[4]xindex> return %2 : vector<1x[4]xindex> @@ -787,7 +787,7 @@ func.func @negative_extract_load_scalable(%arg0: memref, %arg1: index) -> // CHECK-SAME: (%[[ARG0:.*]]: memref, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32) func.func @store_splat(%arg0: memref, %arg1: index, %arg2: f32) { // CHECK: memref.store %[[ARG2]], %[[ARG0]][%[[ARG1]]] : memref - %0 = vector.splat %arg2 : vector<1xf32> + %0 = vector.broadcast %arg2 : f32 to vector<1xf32> vector.store %0, %arg0[%arg1] : memref, vector<1xf32> return } @@ -813,9 +813,9 @@ func.func @store_broadcast_1d_to_2d(%arg0: memref, %arg1: index, %arg2: // CHECK-LABEL: @negative_store_scalable // CHECK-SAME: (%[[ARG0:.*]]: memref, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32) func.func @negative_store_scalable(%arg0: memref, %arg1: index, %arg2: f32) { -// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<[1]xf32> +// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<[1]xf32> // CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref, vector<[1]xf32> - %0 = vector.splat %arg2 : vector<[1]xf32> + %0 = vector.broadcast %arg2 : f32 to vector<[1]xf32> vector.store %0, %arg0[%arg1] : memref, vector<[1]xf32> return } @@ -823,9 +823,9 @@ func.func @negative_store_scalable(%arg0: memref, %arg1: index, %arg2: f3 // CHECK-LABEL: @negative_store_memref_of_vec // CHECK-SAME: (%[[ARG0:.*]]: memref>, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32) func.func @negative_store_memref_of_vec(%arg0: memref>, %arg1: index, %arg2: f32) { -// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<1xf32> +// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<1xf32> // CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref>, vector<1xf32> - %0 = vector.splat %arg2 : vector<1xf32> + %0 = vector.broadcast %arg2 : f32 to vector<1xf32> vector.store %0, %arg0[%arg1] : memref>, vector<1xf32> return } @@ -833,9 +833,9 @@ func.func @negative_store_memref_of_vec(%arg0: memref>, %arg1: i // CHECK-LABEL: @negative_store_more_than_one_element // CHECK-SAME: (%[[ARG0:.*]]: memref, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32) func.func @negative_store_more_than_one_element(%arg0: memref, %arg1: index, %arg2: f32) { -// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<4xf32> +// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<4xf32> // CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref, vector<4xf32> - %0 = vector.splat %arg2 : vector<4xf32> + %0 = vector.broadcast %arg2 : f32 to vector<4xf32> vector.store %0, %arg0[%arg1] : memref, vector<4xf32> return } @@ -843,10 +843,10 @@ func.func @negative_store_more_than_one_element(%arg0: memref, %arg1: ind // CHECK-LABEL: @negative_store_no_single_use // CHECK-SAME: (%[[ARG0:.*]]: memref, %[[ARG1:.*]]: index, %[[ARG2:.*]]: f32) func.func @negative_store_no_single_use(%arg0: memref, %arg1: index, %arg2: f32) -> vector<1xf32> { -// CHECK: %[[RES:.*]] = vector.splat %[[ARG2]] : vector<1xf32> +// CHECK: %[[RES:.*]] = vector.broadcast %[[ARG2]] : f32 to vector<1xf32> // CHECK: vector.store %[[RES]], %[[ARG0]][%[[ARG1]]] : memref, vector<1xf32> // CHECK: return %[[RES:.*]] : vector<1xf32> - %0 = vector.splat %arg2 : vector<1xf32> + %0 = vector.broadcast %arg2 : f32 to vector<1xf32> vector.store %0, %arg0[%arg1] : memref, vector<1xf32> return %0 : vector<1xf32> } diff --git a/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir b/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir index 1b54d54ffbd9f..45afbffc1be48 100644 --- a/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir +++ b/mlir/test/Dialect/Vector/vector-transfer-to-vector-load-store.mlir @@ -285,19 +285,19 @@ func.func @transfer_read_permutations(%mem_0 : memref, %mem_1 : memref< %c0 = arith.constant 0 : index // CHECK: %[[MASK0:.*]] = vector.broadcast %{{.*}} : i1 to vector<14x7xi1> - %mask0 = vector.splat %m : vector<14x7xi1> + %mask0 = vector.broadcast %m : i1 to vector<14x7xi1> %0 = vector.transfer_read %mem_1[%c0, %c0, %c0, %c0], %cst, %mask0 {in_bounds = [true, false, true, true], permutation_map = #map0} : memref, vector<7x14x8x16xf32> // CHECK: vector.transfer_read {{.*}} %[[MASK0]] {in_bounds = [false, true, true, true], permutation_map = #[[$MAP0]]} : memref, vector<14x7x8x16xf32> // CHECK: vector.transpose %{{.*}}, [1, 0, 2, 3] : vector<14x7x8x16xf32> to vector<7x14x8x16xf32> // CHECK: %[[MASK1:.*]] = vector.broadcast %{{.*}} : i1 to vector<16x14xi1> - %mask1 = vector.splat %m : vector<16x14xi1> + %mask1 = vector.broadcast %m : i1 to vector<16x14xi1> %1 = vector.transfer_read %mem_1[%c0, %c0, %c0, %c0], %cst, %mask1 {in_bounds = [true, false, true, false], permutation_map = #map1} : memref, vector<7x14x8x16xf32> // CHECK: vector.transfer_read {{.*}} %[[MASK1]] {in_bounds = [false, false, true, true], permutation_map = #[[$MAP0]]} : memref, vector<16x14x7x8xf32> // CHECK: vector.transpose %{{.*}}, [2, 1, 3, 0] : vector<16x14x7x8xf32> to vector<7x14x8x16xf32> // CHECK: %[[MASK3:.*]] = vector.broadcast %{{.*}} : i1 to vector<14x7xi1> - %mask2 = vector.splat %m : vector<14x7xi1> + %mask2 = vector.broadcast %m : i1 to vector<14x7xi1> %2 = vector.transfer_read %mem_1[%c0, %c0, %c0, %c0], %cst, %mask2 {in_bounds = [true, false, true, true], permutation_map = #map2} : memref, vector<7x14x8x16xf32> // CHECK: vector.transfer_read {{.*}} %[[MASK3]] {in_bounds = [false, true, true], permutation_map = #[[$MAP1]]} : memref, vector<14x16x7xf32> // CHECK: vector.broadcast %{{.*}} : vector<14x16x7xf32> to vector<8x14x16x7xf32> @@ -337,7 +337,7 @@ func.func @transfer_write_permutations_tensor_masked( %c0 = arith.constant 0 : index // CHECK: %[[MASK:.*]] = vector.broadcast %[[M]] : i1 to vector<16x14x7x8xi1> - %mask0 = vector.splat %m : vector<16x14x7x8xi1> + %mask0 = vector.broadcast %m : i1 to vector<16x14x7x8xi1> %res = vector.transfer_write %vec, %dst[%c0, %c0, %c0, %c0], %mask0 {in_bounds = [true, false, false, true], permutation_map = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3, d0)>} : vector<7x14x8x16xf32>, tensor // CHECK: %[[NEW_VEC0:.*]] = vector.transpose %{{.*}} [3, 1, 0, 2] : vector<7x14x8x16xf32> to vector<16x14x7x8xf32> // CHECK: %[[NEW_RES0:.*]] = vector.transfer_write %[[NEW_VEC0]], %[[DST]][%c0, %c0, %c0, %c0], %[[MASK]] {in_bounds = [true, false, true, false]} : vector<16x14x7x8xf32>, tensor diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir index 0ee016627440f..219367a41d51a 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f32.mlir @@ -46,7 +46,7 @@ func.func @test_outerproduct_with_accumulator_4x4xf32() { %c0 = arith.constant 0 : index %f10 = arith.constant 10.0 : f32 - %acc = vector.splat %f10 : vector<[4]x[4]xf32> + %acc = vector.broadcast %f10 : f32 to vector<[4]x[4]xf32> %vector_i32 = llvm.intr.stepvector : vector<[4]xi32> %vector = arith.sitofp %vector_i32 : vector<[4]xi32> to vector<[4]xf32> %tile = vector.outerproduct %vector, %vector, %acc : vector<[4]xf32>, vector<[4]xf32> @@ -103,7 +103,7 @@ func.func @test_masked_outerproduct_with_accumulator_4x4xf32() { %ones = arith.constant dense<1> : vector<[4]xi32> %f10 = arith.constant 10.0 : f32 - %acc = vector.splat %f10 : vector<[4]x[4]xf32> + %acc = vector.broadcast %f10 : f32 to vector<[4]x[4]xf32> %step_vector = llvm.intr.stepvector : vector<[4]xi32> %vector_i32 = arith.addi %step_vector, %ones : vector<[4]xi32> %vector = arith.sitofp %vector_i32 : vector<[4]xi32> to vector<[4]xf32> diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir index 8e812108c6055..059f24adbe721 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir @@ -52,7 +52,7 @@ func.func @test_outerproduct_with_accumulator_2x2xf64() { %ones = arith.constant dense<1> : vector<[2]xi32> %f10 = arith.constant 10.0 : f64 - %acc = vector.splat %f10 : vector<[2]x[2]xf64> + %acc = vector.broadcast %f10 : f64 to vector<[2]x[2]xf64> %step_vector = llvm.intr.stepvector : vector<[2]xi32> %vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32> %vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64> @@ -108,7 +108,7 @@ func.func @test_masked_outerproduct_with_accumulator_2x2xf64() { %ones = arith.constant dense<1> : vector<[2]xi32> %f10 = arith.constant 10.0 : f64 - %acc = vector.splat %f10 : vector<[2]x[2]xf64> + %acc = vector.broadcast %f10 : f64 to vector<[2]x[2]xf64> %step_vector = llvm.intr.stepvector : vector<[2]xi32> %vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32> %vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64> diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir index c3bf379cde617..bf6900ca810c2 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/transfer-write-2d.mlir @@ -10,7 +10,7 @@ // Vector store. func.func @transfer_write_2d(%A : memref, %base1: index, %base2: index) { %c0 = arith.constant 0.0 : f32 - %zero = vector.splat %c0 : vector<[4]x[4]xf32> + %zero = vector.broadcast %c0 : f32 to vector<[4]x[4]xf32> vector.transfer_write %zero, %A[%base1, %base2] {in_bounds=[true, true]} : vector<[4]x[4]xf32>, memref return @@ -22,7 +22,7 @@ func.func @transfer_write_2d_mask(%A : memref, %base1: index, %base2: i %c2 = arith.constant 2 : index %c3 = arith.constant 3 : index %mask = vector.create_mask %c2, %c3 : vector<[4]x[4]xi1> - %zero = vector.splat %c0 : vector<[4]x[4]xf32> + %zero = vector.broadcast %c0 : f32 to vector<[4]x[4]xf32> vector.transfer_write %zero, %A[%base1, %base2], %mask {in_bounds=[true, true]} : vector<[4]x[4]xf32>, memref return diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir index c9904328763db..192f2911a3ee2 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/contraction.mlir @@ -106,7 +106,7 @@ func.func @matvec_i32() { // val = (123 * 314) * 4 * vscale // so ... %vscale = vector.vscale - %vscale_v = vector.splat %vscale : vector<3xindex> + %vscale_v = vector.broadcast %vscale : index to vector<3xindex> %vscale_i32 = arith.index_cast %vscale_v : vector<3xindex> to vector<3xi32> %mv1_div = arith.divui %mv1, %vscale_i32 : vector<3xi32> // ... val / vscale = 123 * 314 * 4 = 154488 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir index d3b1fa4bbbc37..2d8180abdfec9 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/scalable-interleave.mlir @@ -7,8 +7,8 @@ func.func @entry() { %f1 = arith.constant 1.0 : f32 %f2 = arith.constant 2.0 : f32 - %v1 = vector.splat %f1 : vector<[4]xf32> - %v2 = vector.splat %f2 : vector<[4]xf32> + %v1 = vector.broadcast %f1 : f32 to vector<[4]xf32> + %v2 = vector.broadcast %f2 : f32 to vector<[4]xf32> vector.print %v1 : vector<[4]xf32> vector.print %v2 : vector<[4]xf32> // diff --git a/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir index f812c25c0352f..740c742863919 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/interleave.mlir @@ -6,8 +6,8 @@ func.func @entry() { %f1 = arith.constant 1.0 : f32 %f2 = arith.constant 2.0 : f32 - %v1 = vector.splat %f1 : vector<2x4xf32> - %v2 = vector.splat %f2 : vector<2x4xf32> + %v1 = vector.broadcast %f1 : f32 to vector<2x4xf32> + %v2 = vector.broadcast %f2 : f32 to vector<2x4xf32> vector.print %v1 : vector<2x4xf32> vector.print %v2 : vector<2x4xf32> // diff --git a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir index f7e2229321c00..e25795ab5f14d 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-f32.mlir @@ -14,9 +14,9 @@ !vector_type_R = vector<7xf32> func.func @vector_outerproduct_splat_8x8(%fa: f32, %fb: f32, %fc: f32) -> !vector_type_C { - %a = vector.splat %fa: !vector_type_A - %b = vector.splat %fb: !vector_type_B - %c = vector.splat %fc: !vector_type_C + %a = vector.broadcast %fa: f32 to !vector_type_A + %b = vector.broadcast %fb: f32 to !vector_type_B + %c = vector.broadcast %fc: f32 to !vector_type_C %d = vector.outerproduct %a, %b, %c : !vector_type_A, !vector_type_B return %d: !vector_type_C } diff --git a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir index a19dfa1ce818e..0675102af2759 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/outerproduct-i64.mlir @@ -14,9 +14,9 @@ !vector_type_R = vector<7xi64> func.func @vector_outerproduct_splat_8x8(%ia: i64, %ib: i64, %ic: i64) -> !vector_type_C { - %a = vector.splat %ia: !vector_type_A - %b = vector.splat %ib: !vector_type_B - %c = vector.splat %ic: !vector_type_C + %a = vector.broadcast %ia: i64 to !vector_type_A + %b = vector.broadcast %ib: i64 to !vector_type_B + %c = vector.broadcast %ic: i64 to !vector_type_C %d = vector.outerproduct %a, %b, %c : !vector_type_A, !vector_type_B return %d: !vector_type_C } diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir index 639eed49e0d20..895b8818de767 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-1d.mlir @@ -137,7 +137,7 @@ func.func @transfer_read_1d_mask_in_bounds( // Non-contiguous, strided store. func.func @transfer_write_1d(%A : memref, %base1 : index, %base2 : index) { %fn1 = arith.constant -1.0 : f32 - %vf0 = vector.splat %fn1 : vector<7xf32> + %vf0 = vector.broadcast %fn1 : f32 to vector<7xf32> vector.transfer_write %vf0, %A[%base1, %base2] {permutation_map = affine_map<(d0, d1) -> (d0)>} : vector<7xf32>, memref @@ -147,7 +147,7 @@ func.func @transfer_write_1d(%A : memref, %base1 : index, %base2 : inde // Non-contiguous, strided store. func.func @transfer_write_1d_mask(%A : memref, %base1 : index, %base2 : index) { %fn1 = arith.constant -2.0 : f32 - %vf0 = vector.splat %fn1 : vector<7xf32> + %vf0 = vector.broadcast %fn1 : f32 to vector<7xf32> %mask = arith.constant dense<[1, 0, 1, 0, 1, 1, 1]> : vector<7xi1> vector.transfer_write %vf0, %A[%base1, %base2], %mask {permutation_map = affine_map<(d0, d1) -> (d0)>} diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir index 009c1375beaab..80dff9d791f4d 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-2d.mlir @@ -100,7 +100,7 @@ func.func @transfer_read_2d_broadcast( // Vector store. func.func @transfer_write_2d(%A : memref, %base1: index, %base2: index) { %fn1 = arith.constant -1.0 : f32 - %vf0 = vector.splat %fn1 : vector<1x4xf32> + %vf0 = vector.broadcast %fn1 : f32 to vector<1x4xf32> vector.transfer_write %vf0, %A[%base1, %base2] {permutation_map = affine_map<(d0, d1) -> (d0, d1)>} : vector<1x4xf32>, memref @@ -111,7 +111,7 @@ func.func @transfer_write_2d(%A : memref, %base1: index, %base2: index) func.func @transfer_write_2d_mask(%A : memref, %base1: index, %base2: index) { %fn1 = arith.constant -2.0 : f32 %mask = arith.constant dense<[[1, 0, 1, 0]]> : vector<1x4xi1> - %vf0 = vector.splat %fn1 : vector<1x4xf32> + %vf0 = vector.broadcast %fn1 : f32 to vector<1x4xf32> vector.transfer_write %vf0, %A[%base1, %base2], %mask {permutation_map = affine_map<(d0, d1) -> (d0, d1)>} : vector<1x4xf32>, memref diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir index d41d9c93bc2b3..93e6a12365268 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read-3d.mlir @@ -62,7 +62,7 @@ func.func @transfer_read_3d_transposed(%A : memref, func.func @transfer_write_3d(%A : memref, %o: index, %a: index, %b: index, %c: index) { %fn1 = arith.constant -1.0 : f32 - %vf0 = vector.splat %fn1 : vector<2x9x3xf32> + %vf0 = vector.broadcast %fn1 : f32 to vector<2x9x3xf32> vector.transfer_write %vf0, %A[%o, %a, %b, %c] : vector<2x9x3xf32>, memref return diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir index d1a2790a7db61..18084e31c0ea0 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-read.mlir @@ -45,7 +45,7 @@ func.func @transfer_read_mask_inbounds_4(%A : memref, %base: index) { func.func @transfer_write_1d(%A : memref, %base: index) { %f0 = arith.constant 0.0 : f32 - %vf0 = vector.splat %f0 : vector<4xf32> + %vf0 = vector.broadcast %f0 : f32 to vector<4xf32> vector.transfer_write %vf0, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>} : vector<4xf32>, memref diff --git a/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir b/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir index def708103fab1..225173869edb9 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/transfer-write.mlir @@ -5,7 +5,7 @@ func.func @transfer_write16_inbounds_1d(%A : memref, %base: index) { %f = arith.constant 16.0 : f32 - %v = vector.splat %f : vector<16xf32> + %v = vector.broadcast %f : f32 to vector<16xf32> vector.transfer_write %v, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>, in_bounds = [true]} : vector<16xf32>, memref @@ -14,7 +14,7 @@ func.func @transfer_write16_inbounds_1d(%A : memref, %base: index) { func.func @transfer_write13_1d(%A : memref, %base: index) { %f = arith.constant 13.0 : f32 - %v = vector.splat %f : vector<13xf32> + %v = vector.broadcast %f : f32 to vector<13xf32> vector.transfer_write %v, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>} : vector<13xf32>, memref @@ -23,7 +23,7 @@ func.func @transfer_write13_1d(%A : memref, %base: index) { func.func @transfer_write17_1d(%A : memref, %base: index) { %f = arith.constant 17.0 : f32 - %v = vector.splat %f : vector<17xf32> + %v = vector.broadcast %f : f32 to vector<17xf32> vector.transfer_write %v, %A[%base] {permutation_map = affine_map<(d0) -> (d0)>} : vector<17xf32>, memref @@ -42,7 +42,7 @@ func.func @transfer_read_1d(%A : memref) -> vector<32xf32> { func.func @transfer_write_inbounds_3d(%A : memref<4x4x4xf32>) { %c0 = arith.constant 0: index %f = arith.constant 0.0 : f32 - %v0 = vector.splat %f : vector<2x3x4xf32> + %v0 = vector.broadcast %f : f32 to vector<2x3x4xf32> %f1 = arith.constant 1.0 : f32 %f2 = arith.constant 2.0 : f32 %f3 = arith.constant 3.0 : f32