From 45d715a7e6776f3469ede0781c4729fd56eb4688 Mon Sep 17 00:00:00 2001 From: Rishabh Bali Date: Fri, 21 Feb 2025 14:14:06 +0530 Subject: [PATCH] Add IndexBitWidth option to vector-to-llvm pass Change-Id: I1ad6f77183f1f1faf25e935131de4ef3a4334150 --- mlir/include/mlir/Conversion/Passes.td | 3 + .../VectorToLLVM/ConvertVectorToLLVM.cpp | 23 +- .../VectorToLLVM/ConvertVectorToLLVMPass.cpp | 12 +- .../vector-datalayout-bitwidth.mlir | 25 ++ .../VectorToLLVM/vector-index-bitwidth.mlir | 375 ++++++++++++++++++ .../vector-to-llvm-interface.mlir | 6 +- 6 files changed, 427 insertions(+), 17 deletions(-) create mode 100644 mlir/test/Conversion/VectorToLLVM/vector-datalayout-bitwidth.mlir create mode 100644 mlir/test/Conversion/VectorToLLVM/vector-index-bitwidth.mlir diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index cccdf0a8518bf..20eb6392daf49 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -1414,6 +1414,9 @@ def ConvertVectorToLLVMPass : Pass<"convert-vector-to-llvm"> { "vector::VectorTransformsOptions", /*default=*/"vector::VectorTransformsOptions()", "Options to lower some operations like contractions and transposes.">, + Option<"indexBitwidth", "index-bitwidth", "unsigned", + /*default=kDeriveIndexBitwidthFromDataLayout*/"0", + "Bitwidth of the index type, 0 to use size of machine word">, ]; } diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index c9d637ce81f93..ba4b60e4157cf 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -1439,8 +1439,6 @@ class VectorTypeCastOpConversion if (llvm::any_of(*targetStrides, ShapedType::isDynamic)) return failure(); - auto int64Ty = IntegerType::get(rewriter.getContext(), 64); - // Create descriptor. auto desc = MemRefDescriptor::poison(rewriter, loc, llvmTargetDescriptorTy); // Set allocated ptr. @@ -1451,21 +1449,26 @@ class VectorTypeCastOpConversion Value ptr = sourceMemRef.alignedPtr(rewriter, loc); desc.setAlignedPtr(rewriter, loc, ptr); // Fill offset 0. - auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0); - auto zero = rewriter.create(loc, int64Ty, attr); + + auto idxType = rewriter.getIndexType(); + auto zero = rewriter.create( + loc, typeConverter->convertType(idxType), + rewriter.getIntegerAttr(idxType, 0)); desc.setOffset(rewriter, loc, zero); // Fill size and stride descriptors in memref. for (const auto &indexedSize : llvm::enumerate(targetMemRefType.getShape())) { int64_t index = indexedSize.index(); - auto sizeAttr = - rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value()); - auto size = rewriter.create(loc, int64Ty, sizeAttr); + + auto size = rewriter.create( + loc, typeConverter->convertType(idxType), + rewriter.getIntegerAttr(idxType, indexedSize.value())); desc.setSize(rewriter, loc, index, size); - auto strideAttr = rewriter.getIntegerAttr(rewriter.getIndexType(), - (*targetStrides)[index]); - auto stride = rewriter.create(loc, int64Ty, strideAttr); + + auto stride = rewriter.create( + loc, typeConverter->convertType(idxType), + rewriter.getIntegerAttr(idxType, (*targetStrides)[index])); desc.setStride(rewriter, loc, index, stride); } diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp index e3a81bd20212d..77a7d2b991df8 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.cpp @@ -7,7 +7,7 @@ //===----------------------------------------------------------------------===// #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVMPass.h" - +#include "mlir/Analysis/DataLayoutAnalysis.h" #include "mlir/Conversion/LLVMCommon/ConversionTarget.h" #include "mlir/Conversion/LLVMCommon/TypeConverter.h" #include "mlir/Dialect/AMX/AMXDialect.h" @@ -64,6 +64,8 @@ void ConvertVectorToLLVMPass::runOnOperation() { // Perform progressive lowering of operations on slices and all contraction // operations. Also materializes masks, lowers vector.step, rank-reduces FMA, // applies folding and DCE. + Operation *op = getOperation(); + const auto &dataLayoutAnalysis = getAnalysis(); { RewritePatternSet patterns(&getContext()); populateVectorToVectorCanonicalizationPatterns(patterns); @@ -83,10 +85,12 @@ void ConvertVectorToLLVMPass::runOnOperation() { populateVectorRankReducingFMAPattern(patterns); (void)applyPatternsGreedily(getOperation(), std::move(patterns)); } - // Convert to the LLVM IR dialect. - LowerToLLVMOptions options(&getContext()); - LLVMTypeConverter converter(&getContext(), options); + LowerToLLVMOptions options(&getContext(), + dataLayoutAnalysis.getAtOrAbove(op)); + if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout) + options.overrideIndexBitwidth(indexBitwidth); + LLVMTypeConverter converter(&getContext(), options, &dataLayoutAnalysis); RewritePatternSet patterns(&getContext()); populateVectorTransferLoweringPatterns(patterns); populateVectorToLLVMMatrixConversionPatterns(converter, patterns); diff --git a/mlir/test/Conversion/VectorToLLVM/vector-datalayout-bitwidth.mlir b/mlir/test/Conversion/VectorToLLVM/vector-datalayout-bitwidth.mlir new file mode 100644 index 0000000000000..edf1524558eb0 --- /dev/null +++ b/mlir/test/Conversion/VectorToLLVM/vector-datalayout-bitwidth.mlir @@ -0,0 +1,25 @@ +// RUN: mlir-opt %s -convert-vector-to-llvm -split-input-file | FileCheck %s + +module attributes {dlti.dl_spec = #dlti.dl_spec< #dlti.dl_entry>} { +// CHECK-LABEL: func.func @broadcast_vec2d_from_vec0d( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector) -> vector<3x2xf32> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector to vector<1xf32> +// CHECK: %[[VAL_2:.*]] = ub.poison : vector<3x2xf32> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_5:.*]] = llvm.extractelement %[[VAL_1]]{{\[}}%[[VAL_4]] : i32] : vector<1xf32> +// CHECK: %[[VAL_6:.*]] = llvm.mlir.poison : vector<2xf32> +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_8:.*]] = llvm.insertelement %[[VAL_5]], %[[VAL_6]]{{\[}}%[[VAL_7]] : i32] : vector<2xf32> +// CHECK: %[[VAL_9:.*]] = llvm.shufflevector %[[VAL_8]], %[[VAL_6]] [0, 0] : vector<2xf32> +// CHECK: %[[VAL_10:.*]] = llvm.insertvalue %[[VAL_9]], %[[VAL_3]][0] : !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_11:.*]] = llvm.insertvalue %[[VAL_9]], %[[VAL_10]][1] : !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_12:.*]] = llvm.insertvalue %[[VAL_9]], %[[VAL_11]][2] : !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_13:.*]] = builtin.unrealized_conversion_cast %[[VAL_12]] : !llvm.array<3 x vector<2xf32>> to vector<3x2xf32> +// CHECK: return %[[VAL_13]] : vector<3x2xf32> +// CHECK: } +func.func @broadcast_vec2d_from_vec0d(%arg0: vector) -> vector<3x2xf32> { + %0 = vector.broadcast %arg0 : vector to vector<3x2xf32> + return %0 : vector<3x2xf32> +} +} diff --git a/mlir/test/Conversion/VectorToLLVM/vector-index-bitwidth.mlir b/mlir/test/Conversion/VectorToLLVM/vector-index-bitwidth.mlir new file mode 100644 index 0000000000000..696113d22ade3 --- /dev/null +++ b/mlir/test/Conversion/VectorToLLVM/vector-index-bitwidth.mlir @@ -0,0 +1,375 @@ +// RUN: mlir-opt %s -convert-vector-to-llvm='index-bitwidth=32' -split-input-file | FileCheck %s + +// CHECK-LABEL: func.func @masked_reduce_add_f32_scalable( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<[16]xf32>, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<[16]xi1>) -> f32 { +// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32 +// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(16 : i32) : i32 +// CHECK: %[[VAL_4:.*]] = "llvm.intr.vscale"() : () -> i32 +// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : i32 to index +// CHECK: %[[VAL_6:.*]] = arith.index_cast %[[VAL_5]] : index to i32 +// CHECK: %[[VAL_7:.*]] = arith.muli %[[VAL_3]], %[[VAL_6]] : i32 +// CHECK: %[[VAL_8:.*]] = "llvm.intr.vp.reduce.fadd"(%[[VAL_2]], %[[VAL_0]], %[[VAL_1]], %[[VAL_7]]) : (f32, vector<[16]xf32>, vector<[16]xi1>, i32) -> f32 +// CHECK: return %[[VAL_8]] : f32 +// CHECK: } +func.func @masked_reduce_add_f32_scalable(%arg0: vector<[16]xf32>, %mask : vector<[16]xi1>) -> f32 { + %0 = vector.mask %mask { vector.reduction , %arg0 : vector<[16]xf32> into f32 } : vector<[16]xi1> -> f32 + return %0 : f32 +} + +// ----- + +// CHECK-LABEL: func.func @shuffle_1D( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<2xf32>, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<3xf32>) -> vector<5xf32> { +// CHECK: %[[VAL_2:.*]] = llvm.mlir.poison : vector<5xf32> +// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(2 : index) : i32 +// CHECK: %[[VAL_4:.*]] = llvm.extractelement %[[VAL_1]]{{\[}}%[[VAL_3]] : i32] : vector<3xf32> +// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_6:.*]] = llvm.insertelement %[[VAL_4]], %[[VAL_2]]{{\[}}%[[VAL_5]] : i32] : vector<5xf32> +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(1 : index) : i32 +// CHECK: %[[VAL_8:.*]] = llvm.extractelement %[[VAL_1]]{{\[}}%[[VAL_7]] : i32] : vector<3xf32> +// CHECK: %[[VAL_9:.*]] = llvm.mlir.constant(1 : index) : i32 +// CHECK: %[[VAL_10:.*]] = llvm.insertelement %[[VAL_8]], %[[VAL_6]]{{\[}}%[[VAL_9]] : i32] : vector<5xf32> +// CHECK: %[[VAL_11:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_12:.*]] = llvm.extractelement %[[VAL_1]]{{\[}}%[[VAL_11]] : i32] : vector<3xf32> +// CHECK: %[[VAL_13:.*]] = llvm.mlir.constant(2 : index) : i32 +// CHECK: %[[VAL_14:.*]] = llvm.insertelement %[[VAL_12]], %[[VAL_10]]{{\[}}%[[VAL_13]] : i32] : vector<5xf32> +// CHECK: %[[VAL_15:.*]] = llvm.mlir.constant(1 : index) : i32 +// CHECK: %[[VAL_16:.*]] = llvm.extractelement %[[VAL_0]]{{\[}}%[[VAL_15]] : i32] : vector<2xf32> +// CHECK: %[[VAL_17:.*]] = llvm.mlir.constant(3 : index) : i32 +// CHECK: %[[VAL_18:.*]] = llvm.insertelement %[[VAL_16]], %[[VAL_14]]{{\[}}%[[VAL_17]] : i32] : vector<5xf32> +// CHECK: %[[VAL_19:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_20:.*]] = llvm.extractelement %[[VAL_0]]{{\[}}%[[VAL_19]] : i32] : vector<2xf32> +// CHECK: %[[VAL_21:.*]] = llvm.mlir.constant(4 : index) : i32 +// CHECK: %[[VAL_22:.*]] = llvm.insertelement %[[VAL_20]], %[[VAL_18]]{{\[}}%[[VAL_21]] : i32] : vector<5xf32> +// CHECK: return %[[VAL_22]] : vector<5xf32> +// CHECK: } +func.func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> { + %1 = vector.shuffle %arg0, %arg1 [4, 3, 2, 1, 0] : vector<2xf32>, vector<3xf32> + return %1 : vector<5xf32> +} + +// ----- + +// CHECK-LABEL: func.func @extractelement_from_vec_0d_f32( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector) -> f32 { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector to vector<1xf32> +// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_3:.*]] = llvm.extractelement %[[VAL_1]]{{\[}}%[[VAL_2]] : i32] : vector<1xf32> +// CHECK: return %[[VAL_3]] : f32 +// CHECK: } +func.func @extractelement_from_vec_0d_f32(%arg0: vector) -> f32 { + %1 = vector.extractelement %arg0[] : vector + return %1 : f32 +} + +// ----- + +// CHECK-LABEL: func.func @insertelement_into_vec_0d_f32( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: f32, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector) -> vector { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : vector to vector<1xf32> +// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_4:.*]] = llvm.insertelement %[[VAL_0]], %[[VAL_2]]{{\[}}%[[VAL_3]] : i32] : vector<1xf32> +// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : vector<1xf32> to vector +// CHECK: return %[[VAL_5]] : vector +// CHECK: } +func.func @insertelement_into_vec_0d_f32(%arg0: f32, %arg1: vector) -> vector { + %1 = vector.insertelement %arg0, %arg1[] : vector + return %1 : vector +} + +// ----- + +// CHECK-LABEL: func.func @type_cast_f32( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: memref<8x8x8xf32>) -> memref> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : memref<8x8x8xf32> to !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)> +// CHECK: %[[VAL_2:.*]] = llvm.mlir.poison : !llvm.struct<(ptr, ptr, i32)> +// CHECK: %[[VAL_3:.*]] = llvm.extractvalue %[[VAL_1]][0] : !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)> +// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_3]], %[[VAL_2]][0] : !llvm.struct<(ptr, ptr, i32)> +// CHECK: %[[VAL_5:.*]] = llvm.extractvalue %[[VAL_1]][1] : !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)> +// CHECK: %[[VAL_6:.*]] = llvm.insertvalue %[[VAL_5]], %[[VAL_4]][1] : !llvm.struct<(ptr, ptr, i32)> +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_8:.*]] = llvm.insertvalue %[[VAL_7]], %[[VAL_6]][2] : !llvm.struct<(ptr, ptr, i32)> +// CHECK: %[[VAL_9:.*]] = builtin.unrealized_conversion_cast %[[VAL_8]] : !llvm.struct<(ptr, ptr, i32)> to memref> +// CHECK: return %[[VAL_9]] : memref> +// CHECK: } +func.func @type_cast_f32(%arg0: memref<8x8x8xf32>) -> memref> { + %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref> + return %0 : memref> +} + +// ----- + +// CHECK-LABEL: func.func @type_cast_non_zero_addrspace( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: memref<8x8x8xf32, 3>) -> memref, 3> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : memref<8x8x8xf32, 3> to !llvm.struct<(ptr<3>, ptr<3>, i32, array<3 x i32>, array<3 x i32>)> +// CHECK: %[[VAL_2:.*]] = llvm.mlir.poison : !llvm.struct<(ptr<3>, ptr<3>, i32)> +// CHECK: %[[VAL_3:.*]] = llvm.extractvalue %[[VAL_1]][0] : !llvm.struct<(ptr<3>, ptr<3>, i32, array<3 x i32>, array<3 x i32>)> +// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_3]], %[[VAL_2]][0] : !llvm.struct<(ptr<3>, ptr<3>, i32)> +// CHECK: %[[VAL_5:.*]] = llvm.extractvalue %[[VAL_1]][1] : !llvm.struct<(ptr<3>, ptr<3>, i32, array<3 x i32>, array<3 x i32>)> +// CHECK: %[[VAL_6:.*]] = llvm.insertvalue %[[VAL_5]], %[[VAL_4]][1] : !llvm.struct<(ptr<3>, ptr<3>, i32)> +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_8:.*]] = llvm.insertvalue %[[VAL_7]], %[[VAL_6]][2] : !llvm.struct<(ptr<3>, ptr<3>, i32)> +// CHECK: %[[VAL_9:.*]] = builtin.unrealized_conversion_cast %[[VAL_8]] : !llvm.struct<(ptr<3>, ptr<3>, i32)> to memref, 3> +// CHECK: return %[[VAL_9]] : memref, 3> +// CHECK: } +func.func @type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref, 3> { + %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref, 3> + return %0 : memref, 3> +} + +// ----- + +// CHECK-LABEL: func.func @broadcast_vec1d_from_index( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: index) -> vector<2xindex> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : index to i32 +// CHECK: %[[VAL_2:.*]] = llvm.mlir.poison : vector<2xi32> +// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_4:.*]] = llvm.insertelement %[[VAL_1]], %[[VAL_2]]{{\[}}%[[VAL_3]] : i32] : vector<2xi32> +// CHECK: %[[VAL_5:.*]] = llvm.shufflevector %[[VAL_4]], %[[VAL_2]] [0, 0] : vector<2xi32> +// CHECK: %[[VAL_6:.*]] = builtin.unrealized_conversion_cast %[[VAL_5]] : vector<2xi32> to vector<2xindex> +// CHECK: return %[[VAL_6]] : vector<2xindex> +// CHECK: } +func.func @broadcast_vec1d_from_index(%arg0: index) -> vector<2xindex> { + %0 = vector.broadcast %arg0 : index to vector<2xindex> + return %0 : vector<2xindex> +} + +// ----- + +// CHECK-LABEL: func.func @broadcast_vec2d_from_vec0d( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector) -> vector<3x2xf32> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector to vector<1xf32> +// CHECK: %[[VAL_2:.*]] = ub.poison : vector<3x2xf32> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(0 : index) : i32 +// CHECK: %[[VAL_5:.*]] = llvm.extractelement %[[VAL_1]]{{\[}}%[[VAL_4]] : i32] : vector<1xf32> +// CHECK: %[[VAL_6:.*]] = llvm.mlir.poison : vector<2xf32> +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_8:.*]] = llvm.insertelement %[[VAL_5]], %[[VAL_6]]{{\[}}%[[VAL_7]] : i32] : vector<2xf32> +// CHECK: %[[VAL_9:.*]] = llvm.shufflevector %[[VAL_8]], %[[VAL_6]] [0, 0] : vector<2xf32> +// CHECK: %[[VAL_10:.*]] = llvm.insertvalue %[[VAL_9]], %[[VAL_3]][0] : !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_11:.*]] = llvm.insertvalue %[[VAL_9]], %[[VAL_10]][1] : !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_12:.*]] = llvm.insertvalue %[[VAL_9]], %[[VAL_11]][2] : !llvm.array<3 x vector<2xf32>> +// CHECK: %[[VAL_13:.*]] = builtin.unrealized_conversion_cast %[[VAL_12]] : !llvm.array<3 x vector<2xf32>> to vector<3x2xf32> +// CHECK: return %[[VAL_13]] : vector<3x2xf32> +// CHECK: } +func.func @broadcast_vec2d_from_vec0d(%arg0: vector) -> vector<3x2xf32> { + %0 = vector.broadcast %arg0 : vector to vector<3x2xf32> + return %0 : vector<3x2xf32> +} + +// ----- + +// CHECK-LABEL: func.func @broadcast_vec2d_from_index_vec1d( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<2xindex>) -> vector<3x2xindex> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector<2xindex> to vector<2xi32> +// CHECK: %[[VAL_2:.*]] = ub.poison : vector<3x2xindex> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<3x2xindex> to !llvm.array<3 x vector<2xi32>> +// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_1]], %[[VAL_3]][0] : !llvm.array<3 x vector<2xi32>> +// CHECK: %[[VAL_5:.*]] = llvm.insertvalue %[[VAL_1]], %[[VAL_4]][1] : !llvm.array<3 x vector<2xi32>> +// CHECK: %[[VAL_6:.*]] = llvm.insertvalue %[[VAL_1]], %[[VAL_5]][2] : !llvm.array<3 x vector<2xi32>> +// CHECK: %[[VAL_7:.*]] = builtin.unrealized_conversion_cast %[[VAL_6]] : !llvm.array<3 x vector<2xi32>> to vector<3x2xindex> +// CHECK: return %[[VAL_7]] : vector<3x2xindex> +// CHECK: } +func.func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x2xindex> { + %0 = vector.broadcast %arg0 : vector<2xindex> to vector<3x2xindex> + return %0 : vector<3x2xindex> +} + +// ----- + +// CHECK-LABEL: func.func @outerproduct_index( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<2xindex>, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<3xindex>) -> vector<2x3xindex> { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector<2xindex> to vector<2xi32> +// CHECK: %[[VAL_3:.*]] = arith.constant dense<0> : vector<2x3xindex> +// CHECK: %[[VAL_4:.*]] = builtin.unrealized_conversion_cast %[[VAL_3]] : vector<2x3xindex> to !llvm.array<2 x vector<3xi32>> +// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[VAL_6:.*]] = llvm.extractelement %[[VAL_2]]{{\[}}%[[VAL_5]] : i64] : vector<2xi32> +// CHECK: %[[VAL_7:.*]] = llvm.mlir.poison : vector<3xi32> +// CHECK: %[[VAL_8:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_9:.*]] = llvm.insertelement %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_8]] : i32] : vector<3xi32> +// CHECK: %[[VAL_10:.*]] = llvm.shufflevector %[[VAL_9]], %[[VAL_7]] [0, 0, 0] : vector<3xi32> +// CHECK: %[[VAL_11:.*]] = builtin.unrealized_conversion_cast %[[VAL_10]] : vector<3xi32> to vector<3xindex> +// CHECK: %[[VAL_12:.*]] = arith.muli %[[VAL_11]], %[[VAL_1]] : vector<3xindex> +// CHECK: %[[VAL_13:.*]] = builtin.unrealized_conversion_cast %[[VAL_12]] : vector<3xindex> to vector<3xi32> +// CHECK: %[[VAL_14:.*]] = llvm.insertvalue %[[VAL_13]], %[[VAL_4]][0] : !llvm.array<2 x vector<3xi32>> +// CHECK: %[[VAL_15:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[VAL_16:.*]] = llvm.extractelement %[[VAL_2]]{{\[}}%[[VAL_15]] : i64] : vector<2xi32> +// CHECK: %[[VAL_17:.*]] = llvm.mlir.poison : vector<3xi32> +// CHECK: %[[VAL_18:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_19:.*]] = llvm.insertelement %[[VAL_16]], %[[VAL_17]]{{\[}}%[[VAL_18]] : i32] : vector<3xi32> +// CHECK: %[[VAL_20:.*]] = llvm.shufflevector %[[VAL_19]], %[[VAL_17]] [0, 0, 0] : vector<3xi32> +// CHECK: %[[VAL_21:.*]] = builtin.unrealized_conversion_cast %[[VAL_20]] : vector<3xi32> to vector<3xindex> +// CHECK: %[[VAL_22:.*]] = arith.muli %[[VAL_21]], %[[VAL_1]] : vector<3xindex> +// CHECK: %[[VAL_23:.*]] = builtin.unrealized_conversion_cast %[[VAL_22]] : vector<3xindex> to vector<3xi32> +// CHECK: %[[VAL_24:.*]] = llvm.insertvalue %[[VAL_23]], %[[VAL_14]][1] : !llvm.array<2 x vector<3xi32>> +// CHECK: %[[VAL_25:.*]] = builtin.unrealized_conversion_cast %[[VAL_24]] : !llvm.array<2 x vector<3xi32>> to vector<2x3xindex> +// CHECK: return %[[VAL_25]] : vector<2x3xindex> +// CHECK: } +func.func @outerproduct_index(%arg0: vector<2xindex>, %arg1: vector<3xindex>) -> vector<2x3xindex> { + %2 = vector.outerproduct %arg0, %arg1 : vector<2xindex>, vector<3xindex> + return %2 : vector<2x3xindex> +} + +// ----- + +// CHECK-LABEL: func.func @extract_strided_slice_index_1d_from_1d( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<4xindex>) -> vector<2xindex> { +// CHECK: %[[VAL_1:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector<4xindex> to vector<4xi32> +// CHECK: %[[VAL_2:.*]] = llvm.shufflevector %[[VAL_1]], %[[VAL_1]] [2, 3] : vector<4xi32> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_2]] : vector<2xi32> to vector<2xindex> +// CHECK: return %[[VAL_3]] : vector<2xindex> +// CHECK: } +func.func @extract_strided_slice_index_1d_from_1d(%arg0: vector<4xindex>) -> vector<2xindex> { + %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4xindex> to vector<2xindex> + return %0 : vector<2xindex> +} + +// ----- + +// CHECK-LABEL: func.func @insert_strided_index_slice_index_2d_into_3d( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<4x4xindex>, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<4x4x4xindex>) -> vector<4x4x4xindex> { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : vector<4x4x4xindex> to !llvm.array<4 x array<4 x vector<4xi32>>> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector<4x4xindex> to !llvm.array<4 x vector<4xi32>> +// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_3]], %[[VAL_2]][2] : !llvm.array<4 x array<4 x vector<4xi32>>> +// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : !llvm.array<4 x array<4 x vector<4xi32>>> to vector<4x4x4xindex> +// CHECK: return %[[VAL_5]] : vector<4x4x4xindex> +// CHECK: } +func.func @insert_strided_index_slice_index_2d_into_3d(%b: vector<4x4xindex>, %c: vector<4x4x4xindex>) -> vector<4x4x4xindex> { + %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x4xindex> into vector<4x4x4xindex> + return %0 : vector<4x4x4xindex> +} + +// ----- + +// CHECK-LABEL: func.func @matrix_ops_index( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<64xindex>, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: vector<48xindex>) -> vector<12xindex> { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : vector<48xindex> to vector<48xi32> +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : vector<64xindex> to vector<64xi32> +// CHECK: %[[VAL_4:.*]] = llvm.intr.matrix.multiply %[[VAL_3]], %[[VAL_2]] {lhs_columns = 16 : i32, lhs_rows = 4 : i32, rhs_columns = 3 : i32} : (vector<64xi32>, vector<48xi32>) -> vector<12xi32> +// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : vector<12xi32> to vector<12xindex> +// CHECK: return %[[VAL_5]] : vector<12xindex> +// CHECK: } +func.func @matrix_ops_index(%A: vector<64xindex>, %B: vector<48xindex>) -> vector<12xindex> { + %C = vector.matrix_multiply %A, %B + { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32 } : + (vector<64xindex>, vector<48xindex>) -> vector<12xindex> + return %C: vector<12xindex> +} + +// ----- + +// CHECK-LABEL: func.func @transfer_read_write_index_1d( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: memref, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: index) -> vector<17xindex> { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : index to i32 +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : memref to !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_4:.*]] = arith.constant dense<7> : vector<17xindex> +// CHECK: %[[VAL_5:.*]] = builtin.unrealized_conversion_cast %[[VAL_4]] : vector<17xindex> to vector<17xi32> +// CHECK: %[[VAL_6:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> : vector<17xi32> +// CHECK: %[[VAL_7:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_0]], %[[VAL_7]] : memref +// CHECK: %[[VAL_9:.*]] = arith.subi %[[VAL_8]], %[[VAL_1]] : index +// CHECK: %[[VAL_10:.*]] = arith.index_cast %[[VAL_9]] : index to i32 +// CHECK: %[[VAL_11:.*]] = llvm.mlir.poison : vector<17xi32> +// CHECK: %[[VAL_12:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_13:.*]] = llvm.insertelement %[[VAL_10]], %[[VAL_11]]{{\[}}%[[VAL_12]] : i32] : vector<17xi32> +// CHECK: %[[VAL_14:.*]] = llvm.shufflevector %[[VAL_13]], %[[VAL_11]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<17xi32> +// CHECK: %[[VAL_15:.*]] = arith.cmpi sgt, %[[VAL_14]], %[[VAL_6]] : vector<17xi32> +// CHECK: %[[VAL_16:.*]] = llvm.extractvalue %[[VAL_3]][1] : !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_17:.*]] = llvm.getelementptr %[[VAL_16]]{{\[}}%[[VAL_2]]] : (!llvm.ptr, i32) -> !llvm.ptr, i32 +// CHECK: %[[VAL_18:.*]] = llvm.intr.masked.load %[[VAL_17]], %[[VAL_15]], %[[VAL_5]] {alignment = 4 : i32} : (!llvm.ptr, vector<17xi1>, vector<17xi32>) -> vector<17xi32> +// CHECK: %[[VAL_19:.*]] = builtin.unrealized_conversion_cast %[[VAL_18]] : vector<17xi32> to vector<17xindex> +// CHECK: %[[VAL_20:.*]] = memref.dim %[[VAL_0]], %[[VAL_7]] : memref +// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_1]] : index +// CHECK: %[[VAL_22:.*]] = arith.index_cast %[[VAL_21]] : index to i32 +// CHECK: %[[VAL_23:.*]] = llvm.mlir.poison : vector<17xi32> +// CHECK: %[[VAL_24:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_25:.*]] = llvm.insertelement %[[VAL_22]], %[[VAL_23]]{{\[}}%[[VAL_24]] : i32] : vector<17xi32> +// CHECK: %[[VAL_26:.*]] = llvm.shufflevector %[[VAL_25]], %[[VAL_23]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<17xi32> +// CHECK: %[[VAL_27:.*]] = arith.cmpi sgt, %[[VAL_26]], %[[VAL_6]] : vector<17xi32> +// CHECK: %[[VAL_28:.*]] = llvm.extractvalue %[[VAL_3]][1] : !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_28]]{{\[}}%[[VAL_2]]] : (!llvm.ptr, i32) -> !llvm.ptr, i32 +// CHECK: llvm.intr.masked.store %[[VAL_18]], %[[VAL_29]], %[[VAL_27]] {alignment = 4 : i32} : vector<17xi32>, vector<17xi1> into !llvm.ptr +// CHECK: return %[[VAL_19]] : vector<17xindex> +// CHECK: } +func.func @transfer_read_write_index_1d(%A : memref, %base: index) -> vector<17xindex> { + %f7 = arith.constant 7: index + %f = vector.transfer_read %A[%base], %f7 + {permutation_map = affine_map<(d0) -> (d0)>} : + memref, vector<17xindex> + vector.transfer_write %f, %A[%base] + {permutation_map = affine_map<(d0) -> (d0)>} : + vector<17xindex>, memref + return %f: vector<17xindex> +} + +// ----- + +// CHECK-LABEL: func.func @transfer_read_write_1d_non_zero_addrspace( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: memref, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: index) -> vector<17xf32> { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : index to i32 +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : memref to !llvm.struct<(ptr<3>, ptr<3>, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_4:.*]] = arith.constant dense<7.000000e+00> : vector<17xf32> +// CHECK: %[[VAL_5:.*]] = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]> : vector<17xi32> +// CHECK: %[[VAL_6:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_0]], %[[VAL_6]] : memref +// CHECK: %[[VAL_8:.*]] = arith.subi %[[VAL_7]], %[[VAL_1]] : index +// CHECK: %[[VAL_9:.*]] = arith.index_cast %[[VAL_8]] : index to i32 +// CHECK: %[[VAL_10:.*]] = llvm.mlir.poison : vector<17xi32> +// CHECK: %[[VAL_11:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_12:.*]] = llvm.insertelement %[[VAL_9]], %[[VAL_10]]{{\[}}%[[VAL_11]] : i32] : vector<17xi32> +// CHECK: %[[VAL_13:.*]] = llvm.shufflevector %[[VAL_12]], %[[VAL_10]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<17xi32> +// CHECK: %[[VAL_14:.*]] = arith.cmpi sgt, %[[VAL_13]], %[[VAL_5]] : vector<17xi32> +// CHECK: %[[VAL_15:.*]] = llvm.extractvalue %[[VAL_3]][1] : !llvm.struct<(ptr<3>, ptr<3>, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_16:.*]] = llvm.getelementptr %[[VAL_15]]{{\[}}%[[VAL_2]]] : (!llvm.ptr<3>, i32) -> !llvm.ptr<3>, f32 +// CHECK: %[[VAL_17:.*]] = llvm.intr.masked.load %[[VAL_16]], %[[VAL_14]], %[[VAL_4]] {alignment = 4 : i32} : (!llvm.ptr<3>, vector<17xi1>, vector<17xf32>) -> vector<17xf32> +// CHECK: %[[VAL_18:.*]] = memref.dim %[[VAL_0]], %[[VAL_6]] : memref +// CHECK: %[[VAL_19:.*]] = arith.subi %[[VAL_18]], %[[VAL_1]] : index +// CHECK: %[[VAL_20:.*]] = arith.index_cast %[[VAL_19]] : index to i32 +// CHECK: %[[VAL_21:.*]] = llvm.mlir.poison : vector<17xi32> +// CHECK: %[[VAL_22:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_23:.*]] = llvm.insertelement %[[VAL_20]], %[[VAL_21]]{{\[}}%[[VAL_22]] : i32] : vector<17xi32> +// CHECK: %[[VAL_24:.*]] = llvm.shufflevector %[[VAL_23]], %[[VAL_21]] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<17xi32> +// CHECK: %[[VAL_25:.*]] = arith.cmpi sgt, %[[VAL_24]], %[[VAL_5]] : vector<17xi32> +// CHECK: %[[VAL_26:.*]] = llvm.extractvalue %[[VAL_3]][1] : !llvm.struct<(ptr<3>, ptr<3>, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_27:.*]] = llvm.getelementptr %[[VAL_26]]{{\[}}%[[VAL_2]]] : (!llvm.ptr<3>, i32) -> !llvm.ptr<3>, f32 +// CHECK: llvm.intr.masked.store %[[VAL_17]], %[[VAL_27]], %[[VAL_25]] {alignment = 4 : i32} : vector<17xf32>, vector<17xi1> into !llvm.ptr<3> +// CHECK: return %[[VAL_17]] : vector<17xf32> +// CHECK: } +func.func @transfer_read_write_1d_non_zero_addrspace(%A : memref, %base: index) -> vector<17xf32> { + %f7 = arith.constant 7.0: f32 + %f = vector.transfer_read %A[%base], %f7 + {permutation_map = affine_map<(d0) -> (d0)>} : + memref, vector<17xf32> + vector.transfer_write %f, %A[%base] + {permutation_map = affine_map<(d0) -> (d0)>} : + vector<17xf32>, memref + return %f: vector<17xf32> +} + +// ----- + +// CHECK-LABEL: func.func @transfer_read_1d_inbounds( +// CHECK-SAME: %[[VAL_0:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: memref, +// CHECK-SAME: %[[VAL_1:[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*]]: index) -> vector<17xf32> { +// CHECK: %[[VAL_2:.*]] = builtin.unrealized_conversion_cast %[[VAL_1]] : index to i32 +// CHECK: %[[VAL_3:.*]] = builtin.unrealized_conversion_cast %[[VAL_0]] : memref to !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_4:.*]] = llvm.extractvalue %[[VAL_3]][1] : !llvm.struct<(ptr, ptr, i32, array<1 x i32>, array<1 x i32>)> +// CHECK: %[[VAL_5:.*]] = llvm.getelementptr %[[VAL_4]]{{\[}}%[[VAL_2]]] : (!llvm.ptr, i32) -> !llvm.ptr, f32 +// CHECK: %[[VAL_6:.*]] = llvm.load %[[VAL_5]] {alignment = 4 : i64} : !llvm.ptr -> vector<17xf32> +// CHECK: return %[[VAL_6]] : vector<17xf32> +// CHECK: } +func.func @transfer_read_1d_inbounds(%A : memref, %base: index) -> vector<17xf32> { + %f7 = arith.constant 7.0: f32 + %f = vector.transfer_read %A[%base], %f7 {in_bounds = [true]} : + memref, vector<17xf32> + return %f: vector<17xf32> +} diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir index fa7c030538401..ae33dae501ea1 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir @@ -839,7 +839,7 @@ func.func @type_cast_f32(%arg0: memref<8x8x8xf32>) -> memref> // CHECK: llvm.insertvalue %[[allocated]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64)> // CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> // CHECK: llvm.insertvalue %[[aligned]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64)> -// CHECK: llvm.mlir.constant(0 : index +// CHECK: llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr, ptr, i64)> // NOTE: No test for scalable vectors - the input memref is fixed size. @@ -870,7 +870,7 @@ func.func @type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref, ptr<3>, i64)> // CHECK: %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<3 x i64>, array<3 x i64>)> // CHECK: llvm.insertvalue %[[aligned]], {{.*}}[1] : !llvm.struct<(ptr<3>, ptr<3>, i64)> -// CHECK: llvm.mlir.constant(0 : index +// CHECK: llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<3>, ptr<3>, i64)> // NOTE: No test for scalable vectors - the input memref is fixed size. @@ -1849,7 +1849,7 @@ func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) { // CHECK: %[[CAST_MEMREF:.*]] = builtin.unrealized_conversion_cast %{{.*}} : memref<200x100xf32> to !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[CST:.*]] = arith.constant dense<1.100000e+01> : vector // CHECK: %[[VAL:.*]] = builtin.unrealized_conversion_cast %[[CST]] : vector to vector<1xf32> -// CHECK: %[[REF:.*]] = llvm.extractvalue %[[CAST_MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> +// CHECK: %[[REF:.*]] = llvm.extractvalue %[[CAST_MEMREF]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64 // CHECK: %[[MUL:.*]] = llvm.mul %[[I]], %[[C100]] : i64 // CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %[[J]] : i64