From 05e187da6973ba79418daf833a379ae702b1b2af Mon Sep 17 00:00:00 2001 From: "Golubev, Andrey" Date: Thu, 24 Apr 2025 13:41:15 +0000 Subject: [PATCH] [mlir][bufferization][NFC] Rename to_memref to to_buffer As part of the work on transitioning bufferization dialect, ops, and associated logic to operate on newly added type interfaces (see 00eaff3e9c897c263a879416d0f151d7ca7eeaff), rename the bufferization.to_memref to highlight the generic nature of the op. Bufferization process produces buffers while memref is a builtin type rather than a generic term. Preserve the current API (to_buffer still produces a memref), however, as the new type interfaces are not used yet. --- mlir/docs/Bufferization.md | 14 +-- .../IR/BufferizableOpInterface.h | 4 +- .../Dialect/Bufferization/IR/Bufferization.h | 8 +- .../Bufferization/IR/BufferizationOps.td | 12 +-- .../Bufferization/Transforms/Bufferize.h | 2 +- .../Bufferization/Transforms/Passes.td | 6 +- .../SparseTensor/IR/SparseTensorOps.td | 16 ++-- mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp | 2 +- .../IR/BufferizableOpInterface.cpp | 16 ++-- .../Bufferization/IR/BufferizationOps.cpp | 88 +++++++++---------- .../Bufferization/Transforms/Bufferize.cpp | 38 ++++---- .../FuncBufferizableOpInterfaceImpl.cpp | 6 +- .../Transforms/OneShotAnalysis.cpp | 2 +- .../Transforms/ConvertToDestinationStyle.cpp | 8 +- .../Transforms/SparseGPUCodegen.cpp | 2 +- .../Transforms/Utils/CodegenUtils.cpp | 2 +- .../Transforms/Utils/LoopEmitter.cpp | 2 +- .../BufferizableOpInterfaceImpl.cpp | 6 +- .../MeshToMPI/convert-mesh-to-mpi.mlir | 2 +- mlir/test/Dialect/Affine/loop-fusion-4.mlir | 2 +- mlir/test/Dialect/Arith/bufferize.mlir | 6 +- .../dealloc-other.mlir | 4 +- ...ne-shot-bufferize-allow-return-allocs.mlir | 4 +- .../one-shot-bufferize-analysis.mlir | 20 ++--- .../one-shot-bufferize-encodings.mlir | 12 +-- .../one-shot-bufferize-partial.mlir | 22 ++--- .../Transforms/one-shot-bufferize.mlir | 16 ++-- ...ule-bufferize-force-copy-before-write.mlir | 12 +-- .../Transforms/one-shot-module-bufferize.mlir | 10 +-- .../Transforms/tensorlike-bufferlike.mlir | 4 +- .../Transforms/transform-ops.mlir | 6 +- .../Dialect/Bufferization/canonicalize.mlir | 32 +++---- mlir/test/Dialect/Bufferization/ops.mlir | 8 +- .../ControlFlow/one-shot-bufferize.mlir | 4 +- mlir/test/Dialect/Linalg/bufferize.mlir | 14 +-- mlir/test/Dialect/Linalg/hoisting.mlir | 4 +- .../transform-op-bufferize-to-allocation.mlir | 4 +- .../Dialect/MemRef/normalize-memrefs.mlir | 2 +- mlir/test/Dialect/SCF/bufferize.mlir | 12 +-- .../SCF/one-shot-bufferize-encodings.mlir | 8 +- mlir/test/Dialect/Shape/bufferize.mlir | 2 +- .../SparseTensor/GPU/gpu_matmul24_lib.mlir | 6 +- .../SparseTensor/GPU/gpu_matmul_lib.mlir | 4 +- .../SparseTensor/GPU/gpu_matvec_lib.mlir | 4 +- .../GPU/gpu_sampled_matmul_lib.mlir | 4 +- .../SparseTensor/GPU/gpu_sddmm_lib.mlir | 4 +- .../SparseTensor/constant_index_map.mlir | 4 +- mlir/test/Dialect/SparseTensor/dense.mlir | 6 +- .../fuse_sparse_pad_with_consumer.mlir | 2 +- .../test/Dialect/SparseTensor/sorted_coo.mlir | 4 +- mlir/test/Dialect/SparseTensor/sparse_1d.mlir | 60 ++++++------- mlir/test/Dialect/SparseTensor/sparse_2d.mlir | 78 ++++++++-------- mlir/test/Dialect/SparseTensor/sparse_3d.mlir | 82 ++++++++--------- .../Dialect/SparseTensor/sparse_affine.mlir | 16 ++-- .../Dialect/SparseTensor/sparse_batch.mlir | 2 +- .../Dialect/SparseTensor/sparse_fp_ops.mlir | 22 ++--- .../Dialect/SparseTensor/sparse_fusion.mlir | 2 +- .../Dialect/SparseTensor/sparse_int_ops.mlir | 34 +++---- .../Dialect/SparseTensor/sparse_kernels.mlir | 18 ++-- .../sparse_kernels_to_iterator.mlir | 2 +- .../Dialect/SparseTensor/sparse_lower.mlir | 8 +- .../SparseTensor/sparse_lower_col.mlir | 8 +- .../SparseTensor/sparse_lower_inplace.mlir | 8 +- mlir/test/Dialect/SparseTensor/sparse_nd.mlir | 4 +- .../Dialect/SparseTensor/sparse_outbuf.mlir | 6 +- .../Dialect/SparseTensor/sparse_pack.mlir | 12 +-- .../SparseTensor/sparse_parallel_reduce.mlir | 4 +- .../Dialect/SparseTensor/sparse_perm.mlir | 4 +- .../SparseTensor/sparse_perm_lower.mlir | 4 +- .../Dialect/SparseTensor/sparse_scalars.mlir | 4 +- .../Dialect/SparseTensor/sparse_sddmm.mlir | 10 +-- .../SparseTensor/sparse_sddmm_org.mlir | 4 +- .../SparseTensor/sparse_vector_chain.mlir | 2 +- .../SparseTensor/sparse_vector_index.mlir | 4 +- mlir/test/Dialect/SparseTensor/spy_sddmm.mlir | 4 +- .../Dialect/SparseTensor/spy_sddmm_bsr.mlir | 4 +- .../Dialect/SparseTensor/unused-tensor.mlir | 4 +- .../SparseTensor/vectorize_reduction.mlir | 28 +++--- mlir/test/Dialect/Tensor/bufferize.mlir | 42 ++++----- mlir/test/Dialect/Vector/bufferize.mlir | 6 +- .../Tosa/CPU/test-maxpool-dynamic.mlir | 4 +- .../Dialect/Vector/CPU/AMX/mulf-full.mlir | 4 +- .../Dialect/Vector/CPU/AMX/muli-full.mlir | 4 +- .../tree-sitter-mlir/dialect/bufferization.js | 45 +++++----- .../tree-sitter-mlir/queries/highlights.scm | 2 +- 85 files changed, 511 insertions(+), 516 deletions(-) diff --git a/mlir/docs/Bufferization.md b/mlir/docs/Bufferization.md index 02cfee5f2b8dc..e04934a120a00 100644 --- a/mlir/docs/Bufferization.md +++ b/mlir/docs/Bufferization.md @@ -202,13 +202,13 @@ e.g.: %2 = "my_dialect.yet_another_op"(%0) : (tensor) -> (tensor) ``` -## Tensor / MemRef Boundary +## Tensor / Buffer Boundary The bufferization dialect provides a few helper ops to connect tensor IR (that should be bufferized) with existing buffers (that may be allocated/provided by a different runtime/library/etc.). -`bufferization.to_memref %t` returns the future buffer of a tensor SSA value. +`bufferization.to_buffer %t` returns the future buffer of a tensor SSA value. `bufferization.to_tensor %m` returns a tensor SSA value for a given MemRef buffer. `bufferization.materialize_in_destination` indicates that a tensor value should materialize in a certain buffer. @@ -268,7 +268,7 @@ By default, One-Shot Bufferize fails when it encounters an op with tensor semantics (i.e., tensor result or tensor operand) that is not bufferizable (i.e., does not implement `BufferizableOpInterface`). This can be avoided with `allow-unknown-ops`. In that case, One-Shot Bufferize inserts -`to_memref`/`to_tensor` ops around the bufferization boundary. +`to_buffer`/`to_tensor` ops around the bufferization boundary. One-Shot Bufferize can be configured to bufferize only ops from a set of dialects with `dialect-filter`. @@ -291,7 +291,7 @@ memref. The layout map of the memref type can be controlled with One-Shot Bufferize bufferizes ops from top to bottom. This works well when all ops are bufferizable. However, when encountering a non-bufferizable tensor with -`allow-unknown-ops`, One-Shot Bufferize must insert `to_memref` ops at the +`allow-unknown-ops`, One-Shot Bufferize must insert `to_buffer` ops at the bufferization boundary and decide on a memref type. By default, One-Shot Bufferize choose the most dynamic memref type wrt. layout maps. E.g.: @@ -300,12 +300,12 @@ Bufferize choose the most dynamic memref type wrt. layout maps. E.g.: %1 = tensor.extract %0[%idx1, %idx2] : tensor ``` -When bufferizing the above IR, One-Shot Bufferize inserts a `to_memref` ops with +When bufferizing the above IR, One-Shot Bufferize inserts a `to_buffer` ops with dynamic offset and strides: ```mlir %0 = "my_dialect.unbufferizable_op(%t) : (tensor) -> (tensor) -%0_m = bufferization.to_memref %0 : memref> +%0_m = bufferization.to_buffer %0 : memref> %1 = memref.load %0_m[%idx1, %idx2] : memref> ``` @@ -335,7 +335,7 @@ generation of layout maps when no precise layout can be inferred: * `identity-layout-map` uses static identity layout maps. This option can be useful for legacy code that cannot handle memref types with layout maps. Note that this setting can lead to additional buffer copies when folding a - `to_tensor`/`to_memref` pair with memref types that are not cast-compatible. + `to_tensor`/`to_buffer` pair with memref types that are not cast-compatible. Note: The `unknown-type-conversion` option does not affect layout maps of function signatures. There is a separate `function-signature-type-conversion` diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h index ada9539e87121..cb6ef8bc17220 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h @@ -302,7 +302,7 @@ struct BufferizationOptions { Value to) const; /// Specifies whether not bufferizable ops are allowed in the input. If so, - /// bufferization.to_memref and bufferization.to_tensor ops are inserted at + /// bufferization.to_buffer and bufferization.to_tensor ops are inserted at /// the boundaries. bool allowUnknownOps = false; @@ -587,7 +587,7 @@ allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, bool copy = true); /// Lookup the buffer for the given value. If the value was not bufferized -/// yet, wrap it in a ToMemrefOp. Otherwise, it is the result of a ToTensorOp, +/// yet, wrap it in a ToBufferOp. Otherwise, it is the result of a ToTensorOp, /// from which the memref operand is returned. FailureOr getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options); diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h index 6f19dca2e8222..1ef5370802953 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h +++ b/mlir/include/mlir/Dialect/Bufferization/IR/Bufferization.h @@ -56,10 +56,10 @@ FailureOr castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options); -/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the -/// to_memref op are different, a memref.cast is needed. -LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter, - ToMemrefOp toMemref, +/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the +/// to_buffer op are different, a memref.cast is needed. +LogicalResult foldToBufferToTensorPair(RewriterBase &rewriter, + ToBufferOp toBuffer, const BufferizationOptions &options); /// Add the canonicalization patterns for bufferization.dealloc to the given diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td index fad78a63444b9..7a1a701bea6dc 100644 --- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td +++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td @@ -394,7 +394,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [ An operation that creates a tensor from a `memref`. The result value is a tensor whose shape and element type match the memref operand. - The opposite of this op is `to_memref`. Together, these two ops are + The opposite of this op is `to_buffer`. Together, these two ops are useful for source/target materializations when doing type conversions involving tensors and memrefs. @@ -459,7 +459,7 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [ LogicalResult bufferize(RewriterBase &rewriter, const BufferizationOptions &options) const { - // to_tensor/to_memref pairs fold away after bufferization. + // to_tensor/to_buffer pairs fold away after bufferization. return success(); } @@ -490,10 +490,10 @@ def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [ //===----------------------------------------------------------------------===// -// ToMemrefOp +// ToBufferOp //===----------------------------------------------------------------------===// -def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [ +def Bufferization_ToBufferOp : Bufferization_Op<"to_buffer", [ BufferizableOpInterface, SameOperandsAndResultShape, SameOperandsAndResultElementType, @@ -507,7 +507,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [ ```mlir // Result type is memref<4x?xf32, #layout, 0> - %m = bufferization.to_memref %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0> + %m = bufferization.to_buffer %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0> ``` This operation is a specialized variant of the built-in @@ -527,7 +527,7 @@ def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [ // BufferizableOpInterface implementation //===------------------------------------------------------------------===// - // Note: ToMemrefOp / ToTensorOp are temporary ops that are inserted at the + // Note: ToBufferOp / ToTensorOp are temporary ops that are inserted at the // bufferization boundary. When One-Shot bufferization is complete, there // should be no such ops left over. If `allowUnknownOps` (or after running a // partial bufferization pass), such ops may be part of the resulting IR, diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h index 2f495d304b4a5..d5cb8d8eb673c 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Bufferize.h @@ -50,7 +50,7 @@ LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, /// Bufferize the signature of `block` and its callers (i.e., ops that have the /// given block as a successor). All block argument types are changed to memref /// types. All corresponding operands of all callers are wrapped in -/// bufferization.to_memref ops. All uses of bufferized tensor block arguments +/// bufferization.to_buffer ops. All uses of bufferized tensor block arguments /// are wrapped in bufferization.to_tensor ops. /// /// It is expected that all callers implement the `BranchOpInterface`. diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td index ee33476f441ee..a0d113c150c5e 100644 --- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td @@ -47,7 +47,7 @@ def OwnershipBasedBufferDeallocationPass Otherwise, the pass that bufferizes the remaining tensors is responsible to add the corresponding deallocation operations. Note that this pass does not consider any values of tensor type and assumes that MemRef values defined by - `bufferization.to_memref` do not return ownership and do not have to be + `bufferization.to_buffer` do not return ownership and do not have to be deallocated. `bufferization.to_tensor` operations are handled similarly to `bufferization.clone` operations with the exception that the result value is not handled because it's a tensor (not a MemRef). @@ -321,7 +321,7 @@ def OneShotBufferizePass : Pass<"one-shot-bufferize", "ModuleOp"> { One-Shot Bufferize will by default reject IR that contains non-bufferizable op, i.e., ops that do not implemement BufferizableOpInterface. Such IR can - be allowed with `allow-unknown-ops=1`. In that case, to_memref and to_tensor + be allowed with `allow-unknown-ops=1`. In that case, to_buffer and to_tensor ops will be generated at the bufferization boundary. This is useful for compatibility with existing partial bufferization passes: These can bufferize the remaining IR after running One-Shot Bufferize. @@ -341,7 +341,7 @@ def OneShotBufferizePass : Pass<"one-shot-bufferize", "ModuleOp"> { One-Shot Bufferize will by default assume memref types with fully dynamic layout maps when a precise layout cannot be inferred. E.g., this is the case - when wrapping a non-bufferizable op in to_memref/to_tensor ops. This + when wrapping a non-bufferizable op in to_buffer/to_tensor ops. This behavior can be overridden with `unknown-type-conversion`. Valid values are `fully-dynamic-layout-map` and `identity-layout-map`. diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index 2c281c9f6aa85..a61d90a0c39b1 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -266,9 +266,9 @@ def SparseTensor_ToPositionsOp : SparseTensor_Op<"positions", let summary = "Extracts the `level`-th positions array of the `tensor`"; let description = [{ Returns the positions array of the tensor's storage at the given - level. This is similar to the `bufferization.to_memref` operation + level. This is similar to the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view - and a bufferized world view. Unlike the `bufferization.to_memref` + and a bufferized world view. Unlike the `bufferization.to_buffer` operation, however, this sparse operation actually lowers into code that extracts the positions array from the sparse storage itself (either by calling a support library or through direct code). @@ -295,9 +295,9 @@ def SparseTensor_ToCoordinatesOp : SparseTensor_Op<"coordinates", let summary = "Extracts the `level`-th coordinates array of the `tensor`"; let description = [{ Returns the coordinates array of the tensor's storage at the given - level. This is similar to the `bufferization.to_memref` operation + level. This is similar to the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view - and a bufferized world view. Unlike the `bufferization.to_memref` + and a bufferized world view. Unlike the `bufferization.to_buffer` operation, however, this sparse operation actually lowers into code that extracts the coordinates array from the sparse storage itself (either by calling a support library or through direct code). @@ -326,9 +326,9 @@ def SparseTensor_ToCoordinatesBufferOp : SparseTensor_Op<"coordinates_buffer", Returns the linear coordinates array for a sparse tensor with a trailing COO region with at least two levels. It is an error if the tensor doesn't contain such a COO region. This is similar - to the `bufferization.to_memref` operation in the sense that it + to the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view and a bufferized - world view. Unlike the `bufferization.to_memref` operation, + world view. Unlike the `bufferization.to_buffer` operation, however, this operation actually lowers into code that extracts the linear coordinates array from the sparse storage scheme that stores the coordinates for the COO region as an array of structures. @@ -359,9 +359,9 @@ def SparseTensor_ToValuesOp : SparseTensor_Op<"values", let description = [{ Returns the values array of the sparse storage format for the given sparse tensor, independent of the actual dimension. This is similar to - the `bufferization.to_memref` operation in the sense that it provides a bridge + the `bufferization.to_buffer` operation in the sense that it provides a bridge between a tensor world view and a bufferized world view. Unlike the - `bufferization.to_memref` operation, however, this sparse operation actually + `bufferization.to_buffer` operation, however, this sparse operation actually lowers into code that extracts the values array from the sparse storage scheme (either by calling a support library or through direct code). diff --git a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp index cafbf835de22f..823d4d644f586 100644 --- a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp +++ b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp @@ -576,7 +576,7 @@ struct ConvertUpdateHaloOp : public OpConversionPattern { auto tensorType = MemRefType::get( dstShape, cast(array.getType()).getElementType()); array = - rewriter.create(loc, tensorType, array); + rewriter.create(loc, tensorType, array); } auto rank = cast(array.getType()).getRank(); auto opSplitAxes = adaptor.getSplitAxes().getAxes(); diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp index 99ffa62c41a4d..1fc34051680f1 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp @@ -624,8 +624,8 @@ bool AnalysisState::canOmitTensorCopy(OpOperand &opOperand) const { } bool AnalysisState::isInPlace(OpOperand &opOperand) const { - // ToMemrefOps are always in-place. - if (isa(opOperand.getOwner())) + // ToBufferOps are always in-place. + if (isa(opOperand.getOwner())) return true; // In the absence of analysis information, OpOperands that bufferize to a @@ -650,13 +650,13 @@ bool AnalysisState::hasUndefinedContents(OpOperand *opOperand) const { return false; } -// bufferization.to_memref is not allowed to change the rank. -static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) { +// bufferization.to_buffer is not allowed to change the rank. +static void ensureToBufferOpIsValid(Value tensor, Type memrefType) { #ifndef NDEBUG auto rankedTensorType = llvm::dyn_cast(tensor.getType()); assert((!rankedTensorType || llvm::cast(memrefType).getRank() == rankedTensorType.getRank()) && - "to_memref would be invalid: mismatching ranks"); + "to_buffer would be invalid: mismatching ranks"); #endif } @@ -671,15 +671,15 @@ FailureOr bufferization::getBuffer(RewriterBase &rewriter, Value value, if (auto toTensorOp = value.getDefiningOp()) return toTensorOp.getMemref(); - // Insert to_memref op. + // Insert to_buffer op. OpBuilder::InsertionGuard g(rewriter); setInsertionPointAfter(rewriter, value); FailureOr memrefType = getBufferType(value, options); if (failed(memrefType)) return failure(); - ensureToMemrefOpIsValid(value, *memrefType); + ensureToBufferOpIsValid(value, *memrefType); return rewriter - .create(value.getLoc(), *memrefType, value) + .create(value.getLoc(), *memrefType, value) .getResult(); } diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp index 4fce9be390bd6..ecd2ef15546a4 100644 --- a/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp +++ b/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp @@ -81,21 +81,21 @@ FailureOr mlir::bufferization::castOrReallocMemRefValue( return copy; } -/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the -/// to_memref op are different, a memref.cast is needed. -LogicalResult mlir::bufferization::foldToMemrefToTensorPair( - RewriterBase &rewriter, ToMemrefOp toMemref, +/// Try to fold to_buffer(to_tensor(x)). If x's type and the result type of the +/// to_buffer op are different, a memref.cast is needed. +LogicalResult mlir::bufferization::foldToBufferToTensorPair( + RewriterBase &rewriter, ToBufferOp toBuffer, const BufferizationOptions &options) { - auto memrefToTensor = toMemref.getTensor().getDefiningOp(); - if (!memrefToTensor) + auto bufferToTensor = toBuffer.getTensor().getDefiningOp(); + if (!bufferToTensor) return failure(); - Type srcType = memrefToTensor.getMemref().getType(); - Type destType = toMemref.getType(); + Type srcType = bufferToTensor.getMemref().getType(); + Type destType = toBuffer.getType(); // Directly rewrite if the type did not change. if (srcType == destType) { - rewriter.replaceOp(toMemref, memrefToTensor.getMemref()); + rewriter.replaceOp(toBuffer, bufferToTensor.getMemref()); return success(); } @@ -106,11 +106,11 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair( // Ranked memref -> Ranked memref cast. if (rankedSrcType && rankedDestType) { FailureOr replacement = castOrReallocMemRefValue( - rewriter, memrefToTensor.getMemref(), rankedDestType, options); + rewriter, bufferToTensor.getMemref(), rankedDestType, options); if (failed(replacement)) return failure(); - rewriter.replaceOp(toMemref, *replacement); + rewriter.replaceOp(toBuffer, *replacement); return success(); } @@ -123,8 +123,8 @@ LogicalResult mlir::bufferization::foldToMemrefToTensorPair( // Ranked memref -> unranked memref cast: No copy needed. assert(memref::CastOp::areCastCompatible(srcType, destType) && "expected that types are cast compatible"); - rewriter.replaceOpWithNewOp(toMemref, destType, - memrefToTensor.getMemref()); + rewriter.replaceOpWithNewOp(toBuffer, destType, + bufferToTensor.getMemref()); return success(); } @@ -738,12 +738,12 @@ bool ToTensorOp::isWritable(Value value, const AnalysisState &state) { } OpFoldResult ToTensorOp::fold(FoldAdaptor) { - if (auto toMemref = getMemref().getDefiningOp()) + if (auto toBuffer = getMemref().getDefiningOp()) // Approximate alias analysis by conservatively folding only when no there // is no interleaved operation. - if (toMemref->getBlock() == this->getOperation()->getBlock() && - toMemref->getNextNode() == this->getOperation()) - return toMemref.getTensor(); + if (toBuffer->getBlock() == this->getOperation()->getBlock() && + toBuffer->getNextNode() == this->getOperation()) + return toBuffer.getTensor(); return {}; } @@ -770,10 +770,10 @@ void ToTensorOp::getCanonicalizationPatterns(RewritePatternSet &results, } //===----------------------------------------------------------------------===// -// ToMemrefOp +// ToBufferOp //===----------------------------------------------------------------------===// -OpFoldResult ToMemrefOp::fold(FoldAdaptor) { +OpFoldResult ToBufferOp::fold(FoldAdaptor) { if (auto memrefToTensor = getTensor().getDefiningOp()) if (memrefToTensor.getMemref().getType() == getType()) return memrefToTensor.getMemref(); @@ -782,14 +782,14 @@ OpFoldResult ToMemrefOp::fold(FoldAdaptor) { namespace { -/// Replace tensor.cast + to_memref by to_memref + memref.cast. -struct ToMemrefOfCast : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +/// Replace tensor.cast + to_buffer by to_buffer + memref.cast. +struct ToBufferOfCast : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(ToMemrefOp toMemref, + LogicalResult matchAndRewrite(ToBufferOp toBuffer, PatternRewriter &rewriter) const final { auto tensorCastOperand = - toMemref.getOperand().getDefiningOp(); + toBuffer.getOperand().getDefiningOp(); if (!tensorCastOperand) return failure(); auto srcTensorType = llvm::dyn_cast( @@ -798,51 +798,51 @@ struct ToMemrefOfCast : public OpRewritePattern { return failure(); auto memrefType = MemRefType::get(srcTensorType.getShape(), srcTensorType.getElementType()); - Value memref = rewriter.create(toMemref.getLoc(), memrefType, + Value memref = rewriter.create(toBuffer.getLoc(), memrefType, tensorCastOperand.getOperand()); - rewriter.replaceOpWithNewOp(toMemref, toMemref.getType(), + rewriter.replaceOpWithNewOp(toBuffer, toBuffer.getType(), memref); return success(); } }; -/// Canonicalize bufferization.to_tensor + bufferization.to_memref. Insert a +/// Canonicalize bufferization.to_tensor + bufferization.to_buffer. Insert a /// cast if necessary. -struct ToMemrefToTensorFolding : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; +struct ToBufferToTensorFolding : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; - LogicalResult matchAndRewrite(ToMemrefOp toMemref, + LogicalResult matchAndRewrite(ToBufferOp toBuffer, PatternRewriter &rewriter) const final { BufferizationOptions options; options.bufferAlignment = 0; - return foldToMemrefToTensorPair(rewriter, toMemref, options); + return foldToBufferToTensorPair(rewriter, toBuffer, options); } }; -/// Fold a load on a to_memref operation into an tensor.extract on the +/// Fold a load on a to_buffer operation into an tensor.extract on the /// corresponding tensor. -struct LoadOfToMemref : public OpRewritePattern { +struct LoadOfToBuffer : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(memref::LoadOp load, PatternRewriter &rewriter) const override { - auto toMemref = load.getMemref().getDefiningOp(); - if (!toMemref) + auto toBuffer = load.getMemref().getDefiningOp(); + if (!toBuffer) return failure(); - rewriter.replaceOpWithNewOp(load, toMemref.getTensor(), + rewriter.replaceOpWithNewOp(load, toBuffer.getTensor(), load.getIndices()); return success(); } }; -/// Fold dim of a to_memref into the dim of the tensor. +/// Fold dim of a to_buffer into the dim of the tensor. struct DimOfCastOp : public OpRewritePattern { using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(memref::DimOp dimOp, PatternRewriter &rewriter) const override { - auto castOp = dimOp.getSource().getDefiningOp(); + auto castOp = dimOp.getSource().getDefiningOp(); if (!castOp) return failure(); Value newSource = castOp.getOperand(); @@ -854,16 +854,16 @@ struct DimOfCastOp : public OpRewritePattern { } // namespace -void ToMemrefOp::getCanonicalizationPatterns(RewritePatternSet &results, +void ToBufferOp::getCanonicalizationPatterns(RewritePatternSet &results, MLIRContext *context) { - results.add(context); + results.add(context); } -LogicalResult ToMemrefOp::bufferize(RewriterBase &rewriter, +LogicalResult ToBufferOp::bufferize(RewriterBase &rewriter, const BufferizationOptions &options) { - // Fold to_memref(to_tensor(x)) to x. Insert a cast if necessary. - (void)foldToMemrefToTensorPair(rewriter, *this, options); + // Fold to_buffer(to_tensor(x)) to x. Insert a cast if necessary. + (void)foldToBufferToTensorPair(rewriter, *this, options); // Note: The return value of `bufferize` indicates whether there was an error // or not. (And not whether the pattern matched or not.) return success(); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp index 0b60c44ece5fd..824b505517119 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp @@ -201,11 +201,11 @@ namespace { class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { public: BufferizationRewriter(MLIRContext *ctx, DenseSet &erasedOps, - DenseSet &toMemrefOps, + DenseSet &toBufferOps, SmallVector &worklist, const BufferizationOptions &options, BufferizationStatistics *statistics) - : IRRewriter(ctx), erasedOps(erasedOps), toMemrefOps(toMemrefOps), + : IRRewriter(ctx), erasedOps(erasedOps), toBufferOps(toBufferOps), worklist(worklist), analysisState(options), statistics(statistics) { setListener(this); } @@ -214,7 +214,7 @@ class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { void notifyOperationErased(Operation *op) override { erasedOps.insert(op); // Erase if present. - toMemrefOps.erase(op); + toBufferOps.erase(op); } void notifyOperationInserted(Operation *op, InsertPoint previous) override { @@ -231,9 +231,9 @@ class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { sideEffectingOp.hasEffect()); } - // Keep track of to_memref ops. - if (isa(op)) { - toMemrefOps.insert(op); + // Keep track of to_buffer ops. + if (isa(op)) { + toBufferOps.insert(op); return; } @@ -258,8 +258,8 @@ class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener { /// A set of all erased ops. DenseSet &erasedOps; - /// A set of all to_memref ops. - DenseSet &toMemrefOps; + /// A set of all to_buffer ops. + DenseSet &toBufferOps; /// The worklist of ops to be bufferized. SmallVector &worklist; @@ -282,9 +282,9 @@ LogicalResult bufferization::bufferizeOp(Operation *op, return failure(); } - // Keep track of to_memref ops. - DenseSet toMemrefOps; - op->walk([&](ToMemrefOp toMemrefOp) { toMemrefOps.insert(toMemrefOp); }); + // Keep track of to_buffer ops. + DenseSet toBufferOps; + op->walk([&](ToBufferOp toBufferOp) { toBufferOps.insert(toBufferOp); }); // Gather all bufferizable ops in top-to-bottom order. // @@ -303,7 +303,7 @@ LogicalResult bufferization::bufferizeOp(Operation *op, DenseSet erasedOps; // Bufferize all ops. - BufferizationRewriter rewriter(op->getContext(), erasedOps, toMemrefOps, + BufferizationRewriter rewriter(op->getContext(), erasedOps, toBufferOps, worklist, options, statistics); for (unsigned i = 0; i < worklist.size(); ++i) { Operation *nextOp = worklist[i]; @@ -346,11 +346,11 @@ LogicalResult bufferization::bufferizeOp(Operation *op, if (erasedOps.contains(op)) return success(); - // Fold all to_memref(to_tensor(x)) pairs. - for (Operation *op : toMemrefOps) { + // Fold all to_buffer(to_tensor(x)) pairs. + for (Operation *op : toBufferOps) { rewriter.setInsertionPoint(op); - (void)bufferization::foldToMemrefToTensorPair( - rewriter, cast(op), options); + (void)bufferization::foldToBufferToTensorPair( + rewriter, cast(op), options); } // Remove all dead to_tensor ops. @@ -381,8 +381,8 @@ LogicalResult bufferization::bufferizeOp(Operation *op, // Ops without any uses and no side effects will fold away. if (op->getUses().empty() && isMemoryEffectFree(op)) continue; - // ToTensorOps/ToMemrefOps are allowed in the output. - if (isa(op)) + // ToTensorOps/ToBufferOps are allowed in the output. + if (isa(op)) continue; return op->emitError("op was not bufferized"); } @@ -463,7 +463,7 @@ bufferization::bufferizeBlockSignature(Block *block, RewriterBase &rewriter, if (failed(operandBufferType)) return failure(); rewriter.setInsertionPointAfterValue(operand); - Value bufferizedOperand = rewriter.create( + Value bufferizedOperand = rewriter.create( operand.getLoc(), *operandBufferType, operand); // A cast is needed if the operand and the block argument have different // bufferized types. diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp index 0b0dcc9162a9a..3bdd2c35d414d 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp @@ -275,7 +275,7 @@ struct CallOpInterface memRefType = *maybeMemRefType; } - // Since we don't yet have a clear layout story, to_memref may + // Since we don't yet have a clear layout story, to_buffer may // conservatively turn tensors into more dynamic memref than necessary. // If the memref type of the callee fails, introduce an extra memref.cast // that will either canonicalize away or fail compilation until we can do @@ -456,9 +456,9 @@ struct FuncOpInterface // Note: If `inferFunctionResultLayout = true`, casts are later folded // away. - Value toMemrefOp = rewriter.create( + Value toBufferOp = rewriter.create( returnOp.getLoc(), bufferizedType, returnVal); - returnValues.push_back(toMemrefOp); + returnValues.push_back(toBufferOp); } returnOp.getOperandsMutable().assign(returnValues); diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp index 1eaf999d11c08..6e93b36d2d5a2 100644 --- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp +++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp @@ -31,7 +31,7 @@ // Ops that do not implement `BufferizableOpInterface` can be analyzed but are // treated conservatively. E.g., the analysis has to assume that their tensor // OpOperands bufferize to memory writes. While such ops can be analyzed, they -// are not bufferized and remain in the IR. to_tensor and to_memref ops are +// are not bufferized and remain in the IR. to_tensor and to_buffer ops are // inserted at the bufferization boundary. // // This analysis caters to high-performance codegen where buffer reuse is deemed diff --git a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp index 6c1087730ebba..b1340be04e011 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ConvertToDestinationStyle.cpp @@ -75,19 +75,19 @@ static void createMemcpy(OpBuilder &b, Location loc, Value tensorSource, // TODO: Support custom memory space on source. // We do not know the layout map of the source yet, so use a fully dynamic // layout for best compatibility. - Value toMemref = b.create( + Value toBuffer = b.create( loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType), tensorSource, /*readOnly=*/true); - b.create(loc, toMemref, memrefDest); + b.create(loc, toBuffer, memrefDest); } break; case linalg::BufferizeToAllocationOptions::MemcpyOp::LinalgCopy: { // TODO: Support custom memory space on source. // We do not know the layout map of the source yet, so use a fully dynamic // layout for best compatibility. - Value toMemref = b.create( + Value toBuffer = b.create( loc, bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType), tensorSource, /*readOnly=*/true); - b.create(loc, toMemref, memrefDest); + b.create(loc, toBuffer, memrefDest); } break; }; } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp index 01651b1f0ac9c..e5f2418367a58 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp @@ -212,7 +212,7 @@ static Value genTensorToMemref(PatternRewriter &rewriter, Location loc, auto tensorType = llvm::cast(tensor.getType()); auto memrefType = MemRefType::get(tensorType.getShape(), tensorType.getElementType()); - return rewriter.create(loc, memrefType, tensor); + return rewriter.create(loc, memrefType, tensor); } /// Prepares the outlined arguments, passing scalars and buffers in. Here we diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp index 0ebdc3a54e61b..ffa06bc0e2071 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/CodegenUtils.cpp @@ -550,7 +550,7 @@ TypedValue sparse_tensor::genToMemref(OpBuilder &builder, Location loc, Value tensor) { auto tTp = llvm::cast(tensor.getType()); auto mTp = MemRefType::get(tTp.getShape(), tTp.getElementType()); - return builder.create(loc, mTp, tensor) + return builder.create(loc, mTp, tensor) .getResult(); } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp index a2c6314d2a61e..3a77ce347b1c0 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Utils/LoopEmitter.cpp @@ -263,7 +263,7 @@ void LoopEmitter::initializeLoopEmit( denseTp = bufferization::getMemRefTypeWithFullyDynamicLayout(rtp); Value denseVal = - builder.create(loc, denseTp, tensor); + builder.create(loc, denseTp, tensor); // Dense outputs need special handling. if (isOutput && updater) denseVal = updater(builder, loc, denseVal, tensor); diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp index 31014172a9555..c0e697292d2a0 100644 --- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp @@ -215,7 +215,7 @@ struct CollapseShapeOpInterface MemRefType::get(collapseShapeOp.getSrcType().getShape(), collapseShapeOp.getSrcType().getElementType(), AffineMap(), bufferType.getMemorySpace()); - buffer = rewriter.create( + buffer = rewriter.create( op->getLoc(), memrefType, *tensorAlloc); } @@ -491,7 +491,7 @@ struct FromElementsOpInterface bufferization::getBufferType(*tensorAlloc, options); if (failed(memrefType)) return failure(); - Value buffer = rewriter.create( + Value buffer = rewriter.create( op->getLoc(), *memrefType, *tensorAlloc); // Case: tensor<0xelem_type>. @@ -894,7 +894,7 @@ struct ReshapeOpInterface srcType.getShape(), srcType.getElementType(), AffineMap(), cast(srcBuffer->getType()).getMemorySpace()); srcBuffer = rewriter - .create( + .create( op->getLoc(), memrefType, *tensorAlloc) .getResult(); } diff --git a/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir b/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir index 23756bb66928d..d314ad3ac30fd 100644 --- a/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir +++ b/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir @@ -193,7 +193,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:comm_world_rank" = 24> } { // CHECK-NEXT: [[vc44_i32:%.*]] = arith.constant 44 : i32 // CHECK-NEXT: [[vc4_i32:%.*]] = arith.constant 4 : i32 // CHECK-NEXT: [[vc91_i32:%.*]] = arith.constant 91 : i32 - // CHECK-NEXT: [[v0:%.*]] = bufferization.to_memref [[varg0]] : tensor<120x120x120xi8> to memref<120x120x120xi8> + // CHECK-NEXT: [[v0:%.*]] = bufferization.to_buffer [[varg0]] : tensor<120x120x120xi8> to memref<120x120x120xi8> // CHECK-NEXT: [[v1:%.*]] = mpi.comm_world : !mpi.comm // CHECK-NEXT: [[valloc:%.*]] = memref.alloc() : memref<117x113x5xi8> // CHECK-NEXT: [[vsubview:%.*]] = memref.subview [[v0]][1, 3, 109] [117, 113, 5] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>> diff --git a/mlir/test/Dialect/Affine/loop-fusion-4.mlir b/mlir/test/Dialect/Affine/loop-fusion-4.mlir index 4b9eca45492fb..ca8099b9bb51f 100644 --- a/mlir/test/Dialect/Affine/loop-fusion-4.mlir +++ b/mlir/test/Dialect/Affine/loop-fusion-4.mlir @@ -247,7 +247,7 @@ module { ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): tensor.yield %cst_f32 : f32 } : tensor<1x32x32x8xf32> to tensor<1x40x8229x8xf32> - %1 = bufferization.to_memref %padded : tensor<1x40x8229x8xf32> to memref<1x40x8229x8xf32> + %1 = bufferization.to_buffer %padded : tensor<1x40x8229x8xf32> to memref<1x40x8229x8xf32> %alloc_0 = memref.alloc() {alignment = 64 : i64} : memref<1x32x32x8xf32> affine.for %arg1 = 0 to 1 { affine.for %arg2 = 0 to 32 { diff --git a/mlir/test/Dialect/Arith/bufferize.mlir b/mlir/test/Dialect/Arith/bufferize.mlir index 0b7838e1471d3..d9d0cde642bef 100644 --- a/mlir/test/Dialect/Arith/bufferize.mlir +++ b/mlir/test/Dialect/Arith/bufferize.mlir @@ -7,7 +7,7 @@ func.func @index_cast(%tensor: tensor, %scalar: i32) -> (tensor, ind %index_scalar = arith.index_cast %scalar : i32 to index return %index_tensor, %index_scalar : tensor, index } -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor // CHECK-NEXT: %[[INDEX_MEMREF:.*]] = arith.index_cast %[[MEMREF]] // CHECK-SAME: memref to memref // CHECK-NEXT: %[[INDEX_TENSOR:.*]] = bufferization.to_tensor %[[INDEX_MEMREF]] @@ -83,8 +83,8 @@ func.func @non_tensor() { // CHECK-SAME: %[[PRED:.*]]: i1, // CHECK-SAME: %[[TRUE_VAL:.*]]: tensor, // CHECK-SAME: %[[FALSE_VAL:.*]]: tensor) -> tensor { -// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_memref %[[TRUE_VAL]] : tensor -// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_memref %[[FALSE_VAL]] : tensor +// CHECK-DAG: %[[TRUE_VAL_MEMREF:.*]] = bufferization.to_buffer %[[TRUE_VAL]] : tensor +// CHECK-DAG: %[[FALSE_VAL_MEMREF:.*]] = bufferization.to_buffer %[[FALSE_VAL]] : tensor // CHECK: %[[RET_MEMREF:.*]] = arith.select %[[PRED]], %[[TRUE_VAL_MEMREF]], %[[FALSE_VAL_MEMREF]] : memref // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[RET_MEMREF]] : memref // CHECK: return %[[RET]] : tensor diff --git a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir index 5d0657eb38baa..2204c6fae50d0 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/OwnershipBasedBufferDeallocation/dealloc-other.mlir @@ -5,11 +5,11 @@ // no memref operands. // CHECK-LABEL: func private @no_interface_no_operands( -// CHECK-NEXT: %[[m:.*]] = bufferization.to_memref +// CHECK-NEXT: %[[m:.*]] = bufferization.to_buffer // CHECK-NEXT: %[[clone:.*]] = bufferization.clone %[[m]] // CHECK-NEXT: return %[[clone]] func.func private @no_interface_no_operands(%t : tensor) -> memref { - %0 = bufferization.to_memref %t : tensor to memref + %0 = bufferization.to_buffer %t : tensor to memref return %0 : memref } diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir index 8f0170b17381a..4c7683ec211e4 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-allow-return-allocs.mlir @@ -8,7 +8,7 @@ // CHECK-LABEL: func @buffer_not_deallocated( // CHECK-SAME: %[[t:.*]]: tensor func.func @buffer_not_deallocated(%t : tensor, %c : i1) -> tensor { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[r:.*]] = scf.if %{{.*}} { %r = scf.if %c -> tensor { // CHECK: %[[some_op:.*]] = "test.some_op" @@ -37,7 +37,7 @@ func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor, %cond: i1, %val: i32) -> tensor { - // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]] + // CHECK: %[[arg0_m:.*]] = bufferization.to_buffer %[[arg0]] // CHECK: %[[r:.*]] = scf.if {{.*}} { // CHECK: scf.yield %[[arg0_m]] // CHECK: } else { diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir index 7d429e4840114..454c17aef4d8a 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir @@ -87,32 +87,32 @@ func.func @read_of_alloc_tensor_is_not_a_conflict(%f: f32, %idx: index) -> f32 { // ----- -// CHECK-LABEL: func @to_memref_not_read_only( -func.func @to_memref_not_read_only(%idx : index, %f: f32) -> f32 { +// CHECK-LABEL: func @to_buffer_not_read_only( +func.func @to_buffer_not_read_only(%idx : index, %f: f32) -> f32 { %t = tensor.generate { ^bb0(%i : index): tensor.yield %f : f32 } : tensor<5xf32> - // Some op may write into the result of to_memref later. - // CHECK: bufferization.to_memref + // Some op may write into the result of to_buffer later. + // CHECK: bufferization.to_buffer // CHECK-SAME: {__inplace_operands_attr__ = ["false"]} - %m = bufferization.to_memref %t : tensor<5xf32> to memref<5xf32> + %m = bufferization.to_buffer %t : tensor<5xf32> to memref<5xf32> %2 = tensor.extract %t[%idx] : tensor<5xf32> return %2 : f32 } // ----- -// CHECK-LABEL: func @to_memref_read_only( -func.func @to_memref_read_only(%idx : index, %f: f32) -> f32 { +// CHECK-LABEL: func @to_buffer_read_only( +func.func @to_buffer_read_only(%idx : index, %f: f32) -> f32 { %t = tensor.generate { ^bb0(%i : index): tensor.yield %f : f32 } : tensor<5xf32> - // Some op may write into the result of to_memref later. - // CHECK: bufferization.to_memref + // Some op may write into the result of to_buffer later. + // CHECK: bufferization.to_buffer // CHECK-SAME: {__inplace_operands_attr__ = ["true"]} - %m = bufferization.to_memref %t {read_only} : tensor<5xf32> to memref<5xf32> + %m = bufferization.to_buffer %t {read_only} : tensor<5xf32> to memref<5xf32> %2 = tensor.extract %t[%idx] : tensor<5xf32> return %2 : f32 } diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir index c26f1681e4d96..e97777c3e3d13 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir @@ -47,7 +47,7 @@ func.func @alloc_tesor_copy_from_default_space(%arg0: tensor<128xf32>) -> tensor // CHECK-LABEL: @alloc_tesor_copy_from_default_space // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32>) -> tensor<128xf32> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32> to memref<128xf32, strided<[?], offset: ?>> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32> to memref<128xf32, strided<[?], offset: ?>> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>> to memref<128xf32, 1> // CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 1> to tensor<128xf32> @@ -63,7 +63,7 @@ func.func @alloc_tesor_copy_from_non_default_space(%arg0: tensor<128xf32, 1>) -> // CHECK-LABEL: @alloc_tesor_copy_from_non_default_space // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> // CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 2 : i64> @@ -82,9 +82,9 @@ func.func @alloc_tesor_copy_from_non_default_space_no_cast(%arg0: tensor<128xf32 // CHECK-LABEL: @alloc_tesor_copy_from_non_default_space_no_cast // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: tensor<4xf32, 1 : i64>) -> tensor<128xf32, 1 : i64> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg1]] : tensor<4xf32, 1 : i64> to memref<4xf32, strided<[?], offset: ?>, 1> -// CHECK: %[[v1:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> -// CHECK: %[[v2:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg1]] : tensor<4xf32, 1 : i64> to memref<4xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v1:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v2:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> // CHECK: memref.copy %[[v2]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> // CHECK: %[[v3:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 1 : i64> @@ -104,7 +104,7 @@ func.func @materialize_in_destination(%arg0: tensor<128xf32, 1>) -> tensor<128xf // CHECK-LABEL: @materialize_in_destination // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -> tensor<128xf32, 2 : i64> { -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 2> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 2> // CHECK: %[[v1:.+]] = bufferization.to_tensor %[[alloc]] : memref<128xf32, 2> to tensor<128xf32, 2 : i64> diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir index 194c3278c78a1..908c760d9a0cd 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir @@ -25,9 +25,9 @@ func.func @use_of_unknown_op_1(%t1: tensor) %idx = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor to memref> + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] : tensor to memref> // CHECK: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref> - // CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor to memref + // CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] : tensor to memref // CHECK-NO-LAYOUT-MAP: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref %1 = vector.transfer_read %0[%idx], %cst : tensor, vector<5xf32> return %1 : vector<5xf32> @@ -55,13 +55,13 @@ func.func @use_of_unknown_op_3(%t1: tensor) -> (vector<5xf32>, vector<5xf32>) { %idx = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[v1:.*]] = vector.transfer_read %[[m1]] %1 = vector.transfer_read %t1[%idx], %cst : tensor, vector<5xf32> // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) %0 = "test.dummy_op"(%t1) : (tensor) -> tensor - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor to memref> + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] : tensor to memref> // CHECK: %[[v2:.*]] = vector.transfer_read %[[dummy_memref]] %2 = vector.transfer_read %0[%idx], %cst : tensor, vector<5xf32> @@ -81,7 +81,7 @@ func.func @use_of_unknown_op_4(%t1: tensor) // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) %0 = "test.dummy_op"(%t1) : (tensor) -> tensor - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] // CHECK: %[[v1:.*]] = vector.transfer_read %[[dummy_memref]] %1 = vector.transfer_read %0[%idx], %cst : tensor, vector<5xf32> @@ -98,7 +98,7 @@ func.func @use_of_unknown_op_4(%t1: tensor) // CHECK-SAME: %[[t1:.*]]: tensor func.func @use_of_bufferizable_op_in_unbufferizable_op( %t1: tensor, %o: index, %s: index) -> (tensor, tensor) { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[subview:.*]] = memref.subview %[[m1]] // The op must alloc because "test.dummy" may bufferize to a memory write. // CHECK: %[[alloc:.*]] = memref.alloc @@ -119,7 +119,7 @@ func.func @unused_unknown_op(%t1 : tensor) -> vector<5xf32> { %idx = arith.constant 0 : index %cst = arith.constant 0.0 : f32 - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: vector.transfer_read %[[m1]] %1 = vector.transfer_read %t1[%idx], %cst : tensor, vector<5xf32> @@ -166,7 +166,7 @@ func.func @unknown_op_may_read(%v: vector<5xf32>) func.func @unknown_op_not_writable( %t1 : tensor, %v : vector<5xf32>, %idx : index) -> tensor { // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) - // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] + // CHECK: %[[dummy_memref:.*]] = bufferization.to_buffer %[[dummy]] %0 = "test.dummy_op"(%t1) : (tensor) -> (tensor) // The result of an unknown op is not writable. Always generate a copy. @@ -186,7 +186,7 @@ func.func @unknown_op_not_writable( // CHECK-TENSOR-LABEL: func @simple_tensor_test( // CHECK-TENSOR-SAME: %[[t1:.*]]: tensor func.func @simple_tensor_test(%t1 : tensor, %f : f32) -> tensor { - // CHECK-TENSOR: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] + // CHECK-TENSOR: %[[t1_memref:.*]] = bufferization.to_buffer %[[t1]] %c0 = arith.constant 0 : index // CHECK-TENSOR: %[[alloc:.*]] = memref.alloc // CHECK-TENSOR: memref.copy %[[t1_memref]], %[[alloc]] @@ -203,7 +203,7 @@ func.func @simple_tensor_test(%t1 : tensor, %f : f32) -> tensor { // CHECK-SCF-SAME: %[[t1:.*]]: tensor {bufferization.writable = true}, %[[c:.*]]: i1, %[[pos:.*]]: index func.func @simple_scf_if(%t1: tensor {bufferization.writable = true}, %c: i1, %pos: index, %f: f32) -> (tensor, index) { - // CHECK-SCF: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] + // CHECK-SCF: %[[t1_memref:.*]] = bufferization.to_buffer %[[t1]] // CHECK-SCF: %[[r:.*]] = scf.if %[[c]] -> (memref) { %r1, %r2 = scf.if %c -> (tensor, index) { // CHECK-SCF: scf.yield %[[t1_memref]] @@ -211,7 +211,7 @@ func.func @simple_scf_if(%t1: tensor {bufferization.writable = true}, %c: // CHECK-SCF: } else { } else { // CHECK-SCF: %[[insert:.*]] = tensor.insert %{{.*}} into %[[t1]][{{.*}}] - // CHECK-SCF: %[[insert_memref:.*]] = bufferization.to_memref %[[insert]] + // CHECK-SCF: %[[insert_memref:.*]] = bufferization.to_buffer %[[insert]] %1 = tensor.insert %f into %t1[%pos] : tensor // CHECK-SCF: scf.yield %[[insert_memref]] scf.yield %1, %pos : tensor, index diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir index e65c5b92949f6..cd19e3a5e82aa 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir @@ -39,7 +39,7 @@ func.func @use_tensor_func_arg(%A : tensor) -> (vector<4xf32>) { %c0 = arith.constant 0 : index %f0 = arith.constant 0.0 : f32 - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[res:.*]] = vector.transfer_read %[[A_memref]] %0 = vector.transfer_read %A[%c0], %f0 : tensor, vector<4xf32> @@ -54,7 +54,7 @@ func.func @use_tensor_func_arg(%A : tensor) -> (vector<4xf32>) { func.func @return_tensor(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: memref.copy %[[A_memref]], %[[alloc]] @@ -102,7 +102,7 @@ func.func @read_after_write_conflict(%cst : f32, %idx : index, %idx2 : index) -> (f32, f32) { // CHECK-DAG: %[[alloc:.*]] = memref.alloc // CHECK-DAG: %[[dummy:.*]] = "test.dummy_op" - // CHECK-DAG: %[[dummy_m:.*]] = bufferization.to_memref %[[dummy]] + // CHECK-DAG: %[[dummy_m:.*]] = bufferization.to_buffer %[[dummy]] %t = "test.dummy_op"() : () -> (tensor<10xf32>) // CHECK: memref.copy %[[dummy_m]], %[[alloc]] @@ -134,7 +134,7 @@ func.func @copy_deallocated() -> tensor<10xf32> { // CHECK-LABEL: func @select_different_tensors( // CHECK-SAME: %[[t:.*]]: tensor func.func @select_different_tensors(%t: tensor, %sz: index, %pos: index, %c: i1) -> f32 { - // CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor to memref + // CHECK-DAG: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor to memref // CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref %0 = bufferization.alloc_tensor(%sz) : tensor @@ -154,7 +154,7 @@ func.func @select_different_tensors(%t: tensor, %sz: index, %pos: index, // moment because this would create a tensor op during bufferization. That is // currently forbidden. func.func @alloc_tensor_with_copy(%t: tensor<5xf32>) -> tensor<5xf32> { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK: memref.copy %[[m]], %[[alloc]] %0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32> @@ -200,7 +200,7 @@ func.func @read_of_alias(%t: tensor<100xf32>, %pos1: index, %pos2: index, // CHECK-LABEL: func @from_unranked_to_unranked( // CHECK-SAME: %[[arg0:.*]]: tensor<*xi32> func.func @from_unranked_to_unranked(%arg0: tensor<*xi32>) -> tensor<*xi32> { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]] : tensor<*xi32> to memref<*xi32> + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[arg0]] : tensor<*xi32> to memref<*xi32> // CHECK: %[[t:.*]] = bufferization.to_tensor %[[m]] // CHECK: return %[[t]] : tensor<*xi32> %0 = tensor.cast %arg0 : tensor<*xi32> to tensor<*xi32> @@ -212,7 +212,7 @@ func.func @from_unranked_to_unranked(%arg0: tensor<*xi32>) -> tensor<*xi32> { // CHECK-LABEL: func @tensor_copy( // CHECK-SAME: %[[arg0:.*]]: tensor<5xf32>) func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[arg0]] + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[arg0]] // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK: memref.copy %[[m]], %[[alloc]] // CHECK: %[[r:.*]] = bufferization.to_tensor %[[alloc]] @@ -227,7 +227,7 @@ func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> { // CHECK-LABEL: func @materialize_in_destination_buffer( // CHECK-SAME: %[[t:.*]]: tensor<5xf32>, %[[m:.*]]: memref<5xf32>) -// CHECK: %[[b:.*]] = bufferization.to_memref %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> +// CHECK: %[[b:.*]] = bufferization.to_buffer %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> // CHECK: memref.copy %[[b]], %[[m]] func.func @materialize_in_destination_buffer(%t: tensor<5xf32>, %m: memref<5xf32>) { bufferization.materialize_in_destination %t in restrict writable %m diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir index 230a0ed429489..3fb89aa1a0021 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-force-copy-before-write.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 no-analysis-func-filter=contains_to_memref_op" -drop-equivalent-buffer-results --split-input-file | FileCheck %s +// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 no-analysis-func-filter=contains_to_buffer_op" -drop-equivalent-buffer-results --split-input-file | FileCheck %s // ToMemref ops do not pass analysis step. CopyBeforeWrite will be true only for the -// FuncOp "contains_to_memref_op" since it is specified in no-analysis-func-filter. +// FuncOp "contains_to_buffer_op" since it is specified in no-analysis-func-filter. // RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 copy-before-write=1" -drop-equivalent-buffer-results --split-input-file | FileCheck %s --check-prefix=CHECK_COPY @@ -21,14 +21,14 @@ module { return %inserted : tensor } - // CHECK-LABEL: func.func @contains_to_memref_op( + // CHECK-LABEL: func.func @contains_to_buffer_op( // CHECK: memref.copy - // CHECK_COPY-LABEL: func.func @contains_to_memref_op( + // CHECK_COPY-LABEL: func.func @contains_to_buffer_op( // CHECK_COPY: memref.copy - func.func @contains_to_memref_op(%arg0: tensor {bufferization.writable = true}, %arg1: index) -> vector<5xf32> { - %0 = bufferization.to_memref %arg0 : tensor to memref + func.func @contains_to_buffer_op(%arg0: tensor {bufferization.writable = true}, %arg1: index) -> vector<5xf32> { + %0 = bufferization.to_buffer %arg0 : tensor to memref %cst = arith.constant 0.000000e+00 : f32 %1 = vector.transfer_read %0[%arg1], %cst : memref, vector<5xf32> return %1 : vector<5xf32> diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir index e7797d4bc50a9..2efb5893c8511 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir @@ -70,7 +70,7 @@ func.func @call_to_unknown_tensor_returning_func(%t : tensor) { // CHECK-NO-LAYOUT-MAP: %[[alloc_no_layout:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<2x?xf32> // CHECK-NO-LAYOUT-MAP: memref.copy %[[subview]], %[[alloc_no_layout]] // TODO: %alloc should be deallocated here, but we currently do not dealloc -// buffers that are inserted due to to_tensor/to_memref canonicalization (when +// buffers that are inserted due to to_tensor/to_buffer canonicalization (when // the buffer types have different layout maps). // CHECK-NO-LAYOUT-MAP: return %[[alloc_no_layout]] @@ -669,17 +669,17 @@ func.func @call_llvm_func() { // ----- -// CHECK-LABEL: func @to_memref_op_unsupported( +// CHECK-LABEL: func @to_buffer_op_unsupported( // CHECK-SAME: %[[arg0:.*]]: memref {bufferization.writable = true}, %idx1: index, %idx2: index, %idx3: index, %v1: vector<5xf32>) -> (vector<5xf32>) { // Insert a copy because we cannot analyze what happens with the result of a - // to_memref op. + // to_buffer op. // CHECK: %[[alloc:.*]] = memref.alloc // CHECK: memref.copy %[[arg0]], %[[alloc]] - %0 = bufferization.to_memref %t1 : tensor to memref + %0 = bufferization.to_buffer %t1 : tensor to memref // CHECK: "test.foo"(%[[alloc]]) "test.foo"(%0) : (memref) -> () diff --git a/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir b/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir index f8691e110aad1..d8b1a00522ab6 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/tensorlike-bufferlike.mlir @@ -4,7 +4,7 @@ // CHECK-SAME: {found = {operand_0 = "is_tensor_like", result_0 = "is_buffer_like"}} func.func @builtin_unranked(%t: tensor<*xf32>) -> (memref<*xf32>) { - %0 = bufferization.to_memref %t : tensor<*xf32> to memref<*xf32> + %0 = bufferization.to_buffer %t : tensor<*xf32> to memref<*xf32> return %0 : memref<*xf32> } @@ -14,7 +14,7 @@ func.func @builtin_unranked(%t: tensor<*xf32>) -> (memref<*xf32>) // CHECK-SAME: {found = {operand_0 = "is_tensor_like", result_0 = "is_buffer_like"}} func.func @builtin_ranked(%t: tensor<42xf32>) -> (memref<42xf32>) { - %0 = bufferization.to_memref %t : tensor<42xf32> to memref<42xf32> + %0 = bufferization.to_buffer %t : tensor<42xf32> to memref<42xf32> return %0 : memref<42xf32> } diff --git a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir index a2741abbda3b0..5e9ccc9c19074 100644 --- a/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir +++ b/mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir @@ -15,7 +15,7 @@ module attributes {transform.with_named_sequence} { func.func @test_function(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: memref.copy %[[A_memref]], %[[alloc]] @@ -45,7 +45,7 @@ module attributes {transform.with_named_sequence} { func.func @test_function(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: linalg.copy ins(%[[A_memref]] : memref<{{.*}}>) outs(%[[alloc]] @@ -116,7 +116,7 @@ module attributes {transform.with_named_sequence} { func.func @test_function(%A : tensor, %v : vector<4xf32>) -> (tensor) { %c0 = arith.constant 0 : index - // CHECK: %[[A_memref:.*]] = bufferization.to_memref %[[A]] + // CHECK: %[[A_memref:.*]] = bufferization.to_buffer %[[A]] // CHECK: %[[dim:.*]] = memref.dim %[[A_memref]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) // CHECK: memref.copy %[[A_memref]], %[[alloc]] diff --git a/mlir/test/Dialect/Bufferization/canonicalize.mlir b/mlir/test/Dialect/Bufferization/canonicalize.mlir index b662e713e189c..f44e29071796d 100644 --- a/mlir/test/Dialect/Bufferization/canonicalize.mlir +++ b/mlir/test/Dialect/Bufferization/canonicalize.mlir @@ -3,10 +3,10 @@ // RUN: --split-input-file -allow-unregistered-dialect | \ // RUN: FileCheck %s -// Basic folding of to_tensor(to_memref(t)) -> t +// Basic folding of to_tensor(to_buffer(t)) -> t // CHECK-LABEL: func @tensor_load_of_buffer_cast( func.func @tensor_load_of_buffer_cast(%arg0: tensor) -> tensor { - %0 = bufferization.to_memref %arg0 : tensor to memref + %0 = bufferization.to_buffer %arg0 : tensor to memref %1 = bufferization.to_tensor %0 : memref to tensor return %1 : tensor } @@ -15,11 +15,11 @@ func.func @tensor_load_of_buffer_cast(%arg0: tensor) -> tensor { // ----- -// Basic folding of to_memref(to_tensor(m)) -> m +// Basic folding of to_buffer(to_tensor(m)) -> m // CHECK-LABEL: func @buffer_cast_of_tensor_load( func.func @buffer_cast_of_tensor_load(%arg0: memref) -> memref { %0 = bufferization.to_tensor %arg0 : memref to tensor - %1 = bufferization.to_memref %0 : tensor to memref + %1 = bufferization.to_buffer %0 : tensor to memref return %1 : memref } // CHECK-SAME: %[[MEMREF:.*]]: memref) -> memref { @@ -34,7 +34,7 @@ func.func @buffer_cast_of_tensor_load(%arg0: memref) -> memref { // CHECK-SAME: %[[MEMREF_ADDRSPACE2:.*]]: memref) // CHECK-SAME: -> memref { // CHECK-NOT: bufferization.to_tensor -// CHECK-NOT: bufferization.to_memref +// CHECK-NOT: bufferization.to_buffer // CHECK: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[DIM:.*]] = memref.dim %[[MEMREF_ADDRSPACE2]], %[[C0]] : memref // CHECK: %[[MEMREF_ADDRSPACE7:.*]] = memref.alloc(%[[DIM]]) : memref @@ -44,7 +44,7 @@ func.func @buffer_cast_of_tensor_load(%arg0: memref) -> memref { func.func @canonicalize_buffer_cast_of_tensor_load_different_address_space(%arg0: memref) -> memref { %0 = bufferization.to_tensor %arg0 : memref to tensor - %1 = bufferization.to_memref %0 : tensor to memref + %1 = bufferization.to_buffer %0 : tensor to memref return %1 : memref } @@ -56,7 +56,7 @@ func.func @canonicalize_buffer_cast_of_tensor_load_different_address_space(%arg0 // CHECK-SAME: %[[M:.*]]: memref>) // CHECK-SAME: -> memref> { // CHECK-NOT: bufferization.to_tensor -// CHECK-NOT: bufferization.to_memref +// CHECK-NOT: bufferization.to_buffer // CHECK: %[[R:.*]] = memref.cast %[[M]] // CHECK-SAME: memref> to memref> // CHECK: return %[[R]] @@ -65,7 +65,7 @@ func.func @canonicalize_buffer_cast_of_tensor_load( -> memref> { %0 = bufferization.to_tensor %arg0 : memref> to tensor - %1 = bufferization.to_memref %0 : tensor to memref> + %1 = bufferization.to_buffer %0 : tensor to memref> return %1 : memref> } @@ -78,13 +78,13 @@ func.func @canonicalize_buffer_cast_of_tensor_load_to_copy( %arg0: memref>) -> memref> { %0 = bufferization.to_tensor %arg0 : memref> to tensor - %1 = bufferization.to_memref %0 : tensor to memref> + %1 = bufferization.to_buffer %0 : tensor to memref> return %1 : memref> } // CHECK-SAME: %[[M:.*]]: memref>) // CHECK-SAME: -> memref> { // CHECK-NOT: bufferization.to_tensor -// CHECK-NOT: bufferization.to_memref +// CHECK-NOT: bufferization.to_buffer // CHECK: %[[C0:.*]] = arith.constant 0 : index // CHECK: %[[DIM:.*]] = memref.dim %[[M]], %[[C0]] : memref> // CHECK: %[[ALLOC:.*]] = memref.alloc(%[[DIM]]) : memref> @@ -250,26 +250,26 @@ func.func @clone_and_preceding_dealloc(%arg0: memref) -> memref<32xf32> { // ----- -// CHECK-LABEL: func @tensor_cast_to_memref +// CHECK-LABEL: func @tensor_cast_to_buffer // CHECK-SAME: %[[ARG0:.+]]: tensor<4x6x16x32xi8> -func.func @tensor_cast_to_memref(%arg0 : tensor<4x6x16x32xi8>) -> +func.func @tensor_cast_to_buffer(%arg0 : tensor<4x6x16x32xi8>) -> memref { %0 = tensor.cast %arg0 : tensor<4x6x16x32xi8> to tensor - %1 = bufferization.to_memref %0 : tensor to memref + %1 = bufferization.to_buffer %0 : tensor to memref return %1 : memref } -// CHECK: %[[M:.+]] = bufferization.to_memref %[[ARG0]] : tensor<4x6x16x32xi8> +// CHECK: %[[M:.+]] = bufferization.to_buffer %[[ARG0]] : tensor<4x6x16x32xi8> // CHECK: %[[M1:.+]] = memref.cast %[[M]] // CHECK-SAME: memref<4x6x16x32xi8> to memref // CHECK: return %[[M1]] : memref // ----- -// Folding of memref.load(to_memref(%v, %idxs)) -> tensor.extract(%v, %idx) +// Folding of memref.load(to_buffer(%v, %idxs)) -> tensor.extract(%v, %idx) // CHECK-LABEL: func @load_from_buffer_cast( func.func @load_from_buffer_cast(%arg0: index, %arg1: index, %arg2: tensor) -> f32 { - %0 = bufferization.to_memref %arg2 : tensor to memref + %0 = bufferization.to_buffer %arg2 : tensor to memref %1 = memref.load %0[%arg0, %arg1] : memref return %1 : f32 } diff --git a/mlir/test/Dialect/Bufferization/ops.mlir b/mlir/test/Dialect/Bufferization/ops.mlir index 7b6a6f492d069..fc6df4a09f706 100644 --- a/mlir/test/Dialect/Bufferization/ops.mlir +++ b/mlir/test/Dialect/Bufferization/ops.mlir @@ -11,12 +11,12 @@ func.func @test_clone(%buf : memref<*xf32>) -> memref<*xf32> { return %clone : memref<*xf32> } -// CHECK-LABEL: test_to_memref -func.func @test_to_memref(%arg0: tensor, %arg1: tensor<*xi64>) +// CHECK-LABEL: test_to_buffer +func.func @test_to_buffer(%arg0: tensor, %arg1: tensor<*xi64>) -> (memref (d0 + 7)>>, memref<*xi64, 1>) { - %0 = bufferization.to_memref %arg0 + %0 = bufferization.to_buffer %arg0 : tensor to memref (d0 + 7)>> - %1 = bufferization.to_memref %arg1 + %1 = bufferization.to_buffer %arg1 : tensor<*xi64> to memref<*xi64, 1> return %0, %1 : memref (d0 + 7)>>, memref<*xi64, 1> } diff --git a/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir b/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir index f5c9f81a18997..e37b63d01378b 100644 --- a/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/ControlFlow/one-shot-bufferize.mlir @@ -3,7 +3,7 @@ // CHECK-NO-FUNC-LABEL: func @br( // CHECK-NO-FUNC-SAME: %[[t:.*]]: tensor<5xf32>) -// CHECK-NO-FUNC: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> +// CHECK-NO-FUNC: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> // CHECK-NO-FUNC: %[[r:.*]] = scf.execute_region -> memref<5xf32, strided<[?], offset: ?>> { // CHECK-NO-FUNC: cf.br ^[[block:.*]](%[[m]] // CHECK-NO-FUNC: ^[[block]](%[[arg1:.*]]: memref<5xf32, strided<[?], offset: ?>>): @@ -23,7 +23,7 @@ func.func @br(%t: tensor<5xf32>) { // CHECK-NO-FUNC-LABEL: func @cond_br( // CHECK-NO-FUNC-SAME: %[[t1:.*]]: tensor<5xf32>, -// CHECK-NO-FUNC: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> +// CHECK-NO-FUNC: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<5xf32> to memref<5xf32, strided<[?], offset: ?>> // CHECK-NO-FUNC: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> // CHECK-NO-FUNC: %[[r:.*]] = scf.execute_region -> memref<5xf32, strided<[?], offset: ?>> { // CHECK-NO-FUNC: cf.cond_br %{{.*}}, ^[[block1:.*]](%[[m1]] : {{.*}}), ^[[block2:.*]](%[[alloc]] : {{.*}}) diff --git a/mlir/test/Dialect/Linalg/bufferize.mlir b/mlir/test/Dialect/Linalg/bufferize.mlir index 530badebd5c70..1c6cb88fa028b 100644 --- a/mlir/test/Dialect/Linalg/bufferize.mlir +++ b/mlir/test/Dialect/Linalg/bufferize.mlir @@ -3,7 +3,7 @@ #map0 = affine_map<(d0) -> (d0)> // In-depth checking of a basic case, this is testing -// - bufferization.to_memref / bufferization.to_tensor materializations are +// - bufferization.to_buffer / bufferization.to_tensor materializations are // properly inserted // - payload is correctly carried over // - affine maps are correctly carried over @@ -12,7 +12,7 @@ // CHECK: #map = affine_map<(d0) -> (d0)> // CHECK-LABEL: func @basic( // CHECK-SAME: %[[TENSOR:.*]]: tensor<4xf32>) -> tensor<4xf32> { -// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<4xf32> to memref<4xf32> +// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<4xf32> to memref<4xf32> // CHECK-DAG: %[[RESULT_MEMREF:.*]] = memref.alloc() {{.*}} : memref<4xf32> // CHECK: linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel"]} // CHECK-SAME: ins(%[[MEMREF]] : memref<4xf32>) @@ -46,7 +46,7 @@ func.func @basic(%arg0: tensor<4xf32>) -> tensor<4xf32> { // CHECK: #map = affine_map<(d0) -> (d0)> // CHECK-LABEL: func @empty_tensor( // CHECK-SAME: %[[IN:.*]]: tensor, %[[SIZE:.*]]: index) -// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_memref %[[IN]] : tensor to memref +// CHECK-DAG: %[[MEMREF:.*]] = bufferization.to_buffer %[[IN]] : tensor to memref // CHECK-DAG: %[[OUT_BUF:.*]] = memref.alloc(%[[SIZE]]) {{.*}} : memref // CHECK: linalg.generic // CHECK-SAME: ins(%[[MEMREF]] : memref) @@ -105,7 +105,7 @@ func.func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf3 // CHECK-DAG: %[[DIM1:.*]] = tensor.dim %[[ARG]], %[[C1]] : tensor // CHECK-DAG: %[[RESULT0:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) {{.*}} : memref // CHECK-DAG: %[[RESULT1:.*]] = memref.alloc(%[[DIM0]], %[[DIM1]]) {{.*}} : memref -// CHECK-DAG: %[[MEMREF_ARG:.*]] = bufferization.to_memref %[[ARG]] : tensor to memref +// CHECK-DAG: %[[MEMREF_ARG:.*]] = bufferization.to_buffer %[[ARG]] : tensor to memref // CHECK: linalg.generic // CHECK-SAME: ins(%[[MEMREF_ARG]] : memref) // CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref, memref) @@ -141,8 +141,8 @@ func.func @dynamic_results(%arg0: tensor) // CHECK-SAME: %[[ARG0_TENSOR:.*]]: tensor<2x3x4xvector<3x4xi4>>, // CHECK-SAME: %[[ARG1_TENSOR:.*]]: tensor<3x2xf32>) -> tensor<3x2xf32> { // CHECK-DAG: %[[INIT_BUFFER:.*]] = memref.alloc() {{.*}} : memref<3x2xf32> -// CHECK-DAG: %[[ARG0_MEMREF:.*]] = bufferization.to_memref %[[ARG0_TENSOR]] : tensor<2x3x4xvector<3x4xi4>> -// CHECK-DAG: %[[ARG1_MEMREF:.*]] = bufferization.to_memref %[[ARG1_TENSOR]] : tensor<3x2xf32> +// CHECK-DAG: %[[ARG0_MEMREF:.*]] = bufferization.to_buffer %[[ARG0_TENSOR]] : tensor<2x3x4xvector<3x4xi4>> +// CHECK-DAG: %[[ARG1_MEMREF:.*]] = bufferization.to_buffer %[[ARG1_TENSOR]] : tensor<3x2xf32> // CHECK: memref.copy %[[ARG1_MEMREF]], %[[INIT_BUFFER]] : memref<3x2xf32> to memref<3x2xf32> // CHECK: linalg.generic // CHECK-SAME: ins(%[[ARG0_MEMREF]] : memref<2x3x4xvector<3x4xi4>>) @@ -194,7 +194,7 @@ func.func @bufferize_dot(%in: tensor<4xf32>, %out: tensor) -> tensor { // CHECK-LABEL: func @bufferize_softmax( // CHECK-SAME: %[[arg0:.*]]: tensor<2x16x32xf32>, %[[arg1:.*]]: tensor<2x16x32xf32> -// CHECK: %[[m0:.*]] = bufferization.to_memref %[[arg0]] +// CHECK: %[[m0:.*]] = bufferization.to_buffer %[[arg0]] // CHECK: %[[alloc:.*]] = memref.alloc() // CHECK-NOT: memref.copy // CHECK: linalg.softmax dimension(2) ins(%[[m0]] : {{.*}}) outs(%[[alloc:.*]] : {{.*}}) diff --git a/mlir/test/Dialect/Linalg/hoisting.mlir b/mlir/test/Dialect/Linalg/hoisting.mlir index 4e1035e038ca5..318edca73cce1 100644 --- a/mlir/test/Dialect/Linalg/hoisting.mlir +++ b/mlir/test/Dialect/Linalg/hoisting.mlir @@ -519,7 +519,7 @@ module attributes {transform.with_named_sequence} { // memory (i.e. `%collapsed_1` and `%collapsed_2` alias): // %acc = vector.transfer_read %collapsed_2[%c0] -// CHECK-LABEL: func.func @no_hoisting_write_to_memref +// CHECK-LABEL: func.func @no_hoisting_write_to_buffer // CHECK: scf.for {{.*}} { // CHECK: vector.transfer_read {{.*}} : memref<2xi32>, vector<1xi32> // CHECK-NEXT: vector.transfer_read {{.*}} : memref<2xi32>, vector<1xi32> @@ -527,7 +527,7 @@ module attributes {transform.with_named_sequence} { // CHECK-NEXT: vector.transfer_write {{.*}} : vector<1xi32>, memref<2xi32> // CHECK-NEXT: } -func.func @no_hoisting_write_to_memref(%rhs: i32, %arg1: vector<1xi32>) { +func.func @no_hoisting_write_to_buffer(%rhs: i32, %arg1: vector<1xi32>) { %c0_i32 = arith.constant 0 : i32 %c0 = arith.constant 0 : index %c1 = arith.constant 1 : index diff --git a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir index 35cbd7725ec50..4d7ddc8a513c4 100644 --- a/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir +++ b/mlir/test/Dialect/Linalg/transform-op-bufferize-to-allocation.mlir @@ -101,7 +101,7 @@ module attributes {transform.with_named_sequence} { // CHECK-LABEL: func @tensor_pad_constant( // CHECK-SAME: %[[t:.*]]: tensor -// CHECK: %[[src:.*]] = bufferization.to_memref %[[t]] +// CHECK: %[[src:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[alloc:.*]] = memref.alloc // CHECK: %[[subview:.*]] = memref.subview %[[alloc]] // CHECK: memref.copy %[[src]], %[[subview]] @@ -130,7 +130,7 @@ module attributes {transform.with_named_sequence} { // CHECK-LABEL: func @tensor_insert( // CHECK-SAME: %[[t:.*]]: tensor -// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] // CHECK: %[[alloc:.*]] = memref.alloc(%{{.*}}) : memref // CHECK: memref.copy %[[m]], %[[alloc]] // CHECK: memref.store %{{.*}}, %[[alloc]] diff --git a/mlir/test/Dialect/MemRef/normalize-memrefs.mlir b/mlir/test/Dialect/MemRef/normalize-memrefs.mlir index 440f4776424cc..d2924fb1ecf77 100644 --- a/mlir/test/Dialect/MemRef/normalize-memrefs.mlir +++ b/mlir/test/Dialect/MemRef/normalize-memrefs.mlir @@ -374,7 +374,7 @@ func.func @neg_map() -> memref<2x3xf32, #neg> { // CHECK-LABEL: func @memref_with_strided_offset func.func @memref_with_strided_offset(%arg0: tensor<128x512xf32>, %arg1: index, %arg2: index) -> tensor<16x512xf32> { %c0 = arith.constant 0 : index - %0 = bufferization.to_memref %arg0 : tensor<128x512xf32> to memref<128x512xf32, strided<[?, ?], offset: ?>> + %0 = bufferization.to_buffer %arg0 : tensor<128x512xf32> to memref<128x512xf32, strided<[?, ?], offset: ?>> %subview = memref.subview %0[%arg2, 0] [%arg1, 512] [1, 1] : memref<128x512xf32, strided<[?, ?], offset: ?>> to memref> // CHECK: %{{.*}} = memref.cast %{{.*}} : memref> to memref<16x512xf32, strided<[?, ?], offset: ?>> %cast = memref.cast %subview : memref> to memref<16x512xf32, strided<[?, ?], offset: ?>> diff --git a/mlir/test/Dialect/SCF/bufferize.mlir b/mlir/test/Dialect/SCF/bufferize.mlir index 6c08d9f68e8a9..20a640776b561 100644 --- a/mlir/test/Dialect/SCF/bufferize.mlir +++ b/mlir/test/Dialect/SCF/bufferize.mlir @@ -4,8 +4,8 @@ // CHECK-SAME: %[[PRED:.*]]: i1, // CHECK-SAME: %[[TRUE_TENSOR:.*]]: tensor, // CHECK-SAME: %[[FALSE_TENSOR:.*]]: tensor) -> tensor { -// CHECK-DAG: %[[TRUE_MEMREF:.*]] = bufferization.to_memref %[[TRUE_TENSOR]] : tensor to memref -// CHECK-DAG: %[[FALSE_MEMREF:.*]] = bufferization.to_memref %[[FALSE_TENSOR]] : tensor to memref +// CHECK-DAG: %[[TRUE_MEMREF:.*]] = bufferization.to_buffer %[[TRUE_TENSOR]] : tensor to memref +// CHECK-DAG: %[[FALSE_MEMREF:.*]] = bufferization.to_buffer %[[FALSE_TENSOR]] : tensor to memref // CHECK: %[[RESULT_MEMREF:.*]] = scf.if %[[PRED]] -> (memref) { // CHECK: scf.yield %[[TRUE_MEMREF]] : memref // CHECK: } else { @@ -29,7 +29,7 @@ func.func @if(%pred: i1, %true_val: tensor, %false_val: tensor) -> // CHECK-SAME: %[[TENSOR:.*]]: tensor, // CHECK-SAME: %[[LB:.*]]: index, %[[UB:.*]]: index, // CHECK-SAME: %[[STEP:.*]]: index) -> tensor { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor to memref +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor to memref // Note: scf.for iter_args always bufferize to a memory write. This could be // optimized by analyzing the loop body. // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc() @@ -70,7 +70,7 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor // CHECK-LABEL: func @for_correct_recursive_legalization_behavior( // CHECK-SAME: %[[TENSOR:.*]]: tensor, // CHECK-SAME: %[[INDEX:.*]]: index) -> tensor { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor to memref +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor to memref // Note: scf.for iter_args always bufferize to a memory write. This could be // optimized by analyzing the loop body. // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc() @@ -78,7 +78,7 @@ func.func @if_correct_recursive_legalization_behavior(%pred: i1, %tensor: tensor // CHECK: %[[RESULT:.*]] = scf.for %{{.*}} = %[[INDEX]] to %[[INDEX]] step %[[INDEX]] iter_args(%[[MEMREF_ITER:.*]] = %[[MEMREF_COPY]]) -> (memref) { // CHECK: %[[TENSOR_ITER:.*]] = bufferization.to_tensor %[[MEMREF_ITER]] : memref // CHECK: %[[TENSOR_MUNGED:.*]] = "test.munge_tensor"(%[[TENSOR_ITER]]) : (tensor) -> tensor -// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_memref %[[TENSOR_MUNGED]] : tensor to memref +// CHECK: %[[MEMREF_MUNGED:.*]] = bufferization.to_buffer %[[TENSOR_MUNGED]] : tensor to memref // CHECK: scf.yield %[[MEMREF_MUNGED]] : memref // CHECK: } // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[RESULT]] : memref @@ -96,7 +96,7 @@ func.func @for_correct_recursive_legalization_behavior(%arg0: tensor, %inde // CHECK-LABEL: func @bufferize_while( // CHECK-SAME: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: tensor -// CHECK: %[[M:.*]] = bufferization.to_memref %[[ARG2]] : tensor to memref +// CHECK: %[[M:.*]] = bufferization.to_buffer %[[ARG2]] : tensor to memref // Note: scf.while iter_args always bufferize to a memory write. This could be // optimized by analyzing the loop body. // CHECK: %[[MEMREF_COPY:.*]] = memref.alloc() diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir index 709943e596585..6b6207395f14e 100644 --- a/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir +++ b/mlir/test/Dialect/SCF/one-shot-bufferize-encodings.mlir @@ -13,14 +13,14 @@ func.func @scf_for_iter_arg(%arg0: tensor<128xf32, 1>, %arg1: index, %arg2: inde // CHECK-LABEL: func.func @scf_for_iter_arg // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>, %[[arg1:.+]]: index, %[[arg2:.+]]: index, %[[arg3:.+]]: index) -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[alloc:.+]] = memref.alloc() {alignment = 64 : i64} : memref<128xf32, 1> // CHECK: memref.copy %[[v0]], %[[alloc]] : memref<128xf32, strided<[?], offset: ?>, 1> to memref<128xf32, 1> // CHECK: %[[cast:.+]] = memref.cast %[[alloc]] : memref<128xf32, 1> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v1:.+]] = scf.for %{{.+}} = %[[arg1]] to %[[arg2]] step %[[arg3]] iter_args(%[[arg6:.+]] = %[[cast]]) -> (memref<128xf32, strided<[?], offset: ?>, 1>) // CHECK-NEXT: %[[v3:.+]] = bufferization.to_tensor %[[arg6]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64> // CHECK-NEXT: %[[v4:.+]] = "some.use"(%[[v3]]) : (tensor<128xf32, 1 : i64>) -> tensor<128xf32, 1 : i64> -// CHECK-NEXT: %[[v5:.+]] = bufferization.to_memref %[[v4]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK-NEXT: %[[v5:.+]] = bufferization.to_buffer %[[v4]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK-NEXT: scf.yield %[[v5]] : memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v2:.+]] = bufferization.to_tensor %[[v1]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64> // CHECK: return %[[v2]] : tensor<128xf32, 1 : i64> @@ -49,7 +49,7 @@ func.func @scf_forall( // CHECK: scf.forall // CHECK: %[[v2:.+]] = bufferization.to_tensor %{{.+}} : memref to tensor // CHECK: %[[v3:.+]] = "some.use"(%[[v2]]) : (tensor) -> tensor -// CHECK: bufferization.to_memref %[[v3]] : tensor to memref, 1> +// CHECK: bufferization.to_buffer %[[v3]] : tensor to memref, 1> // CHECK: %[[v1:.+]] = bufferization.to_tensor %{{.+}} : memref to tensor // CHECK: return %[[v1]] : tensor @@ -65,7 +65,7 @@ func.func @scf_execute_region(%arg0: tensor<128xf32, 1>) -> tensor<128xf32, 1> { // CHECK-LABEL: func.func @scf_execute_region // CHECK-SAME: (%[[arg0:.+]]: tensor<128xf32, 1 : i64>) -// CHECK: %[[v0:.+]] = bufferization.to_memref %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> +// CHECK: %[[v0:.+]] = bufferization.to_buffer %[[arg0]] : tensor<128xf32, 1 : i64> to memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v1:.+]] = scf.execute_region -> memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: scf.yield %[[v0]] : memref<128xf32, strided<[?], offset: ?>, 1> // CHECK: %[[v2:.+]] = bufferization.to_tensor %[[v1]] : memref<128xf32, strided<[?], offset: ?>, 1> to tensor<128xf32, 1 : i64> diff --git a/mlir/test/Dialect/Shape/bufferize.mlir b/mlir/test/Dialect/Shape/bufferize.mlir index 02e147d917d0f..f6788f845a833 100644 --- a/mlir/test/Dialect/Shape/bufferize.mlir +++ b/mlir/test/Dialect/Shape/bufferize.mlir @@ -6,7 +6,7 @@ // CHECK: %[[WTRUE:.*]] = shape.const_witness true // CHECK: %[[MEMREF:.*]] = shape.assuming %[[WTRUE]] -> (memref<2xf16>) { // CHECK: %[[TENSOR_VAL:.*]] = "test.source"() : () -> tensor<2xf16> -// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_memref %[[TENSOR_VAL]] : tensor<2xf16> to memref<2xf16> +// CHECK: %[[YIELDED_MEMREF:.*]] = bufferization.to_buffer %[[TENSOR_VAL]] : tensor<2xf16> to memref<2xf16> // CHECK: shape.assuming_yield %[[YIELDED_MEMREF]] : memref<2xf16> // CHECK: } // CHECK: %[[TENSOR:.*]] = bufferization.to_tensor %[[MEMREF:.*]] : memref<2xf16> diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir index 6d98667e77563..e20345f27b11a 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul24_lib.mlir @@ -14,19 +14,19 @@ // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_5:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_6:.*]] = gpu.wait async // CHECK: %[[VAL_7:.*]] = memref.dim %[[VAL_5]], %[[VAL_3]] : memref // CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_5]], %[[VAL_4]] : memref // CHECK: %[[VAL_9:.*]], %[[VAL_10:.*]] = gpu.alloc async {{\[}}%[[VAL_6]]] (%[[VAL_7]], %[[VAL_8]]) : memref // CHECK: %[[VAL_11:.*]] = gpu.memcpy async {{\[}}%[[VAL_10]]] %[[VAL_9]], %[[VAL_5]] : memref, memref -// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_13:.*]] = gpu.wait async // CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_12]], %[[VAL_3]] : memref // CHECK: %[[VAL_15:.*]] = memref.dim %[[VAL_12]], %[[VAL_4]] : memref // CHECK: %[[VAL_16:.*]], %[[VAL_17:.*]] = gpu.alloc async {{\[}}%[[VAL_13]]] (%[[VAL_14]], %[[VAL_15]]) : memref // CHECK: %[[VAL_18:.*]] = gpu.memcpy async {{\[}}%[[VAL_17]]] %[[VAL_16]], %[[VAL_12]] : memref, memref -// CHECK: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_19:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_20:.*]] = gpu.wait async // CHECK: %[[VAL_21:.*]] = memref.dim %[[VAL_19]], %[[VAL_3]] : memref // CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_19]], %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir index 63c308a3d5e6f..01906f4c45171 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matmul_lib.mlir @@ -30,13 +30,13 @@ // CHECK: %[[VAL_23:.*]] = memref.dim %[[VAL_11]], %[[VAL_3]] : memref // CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]] = gpu.alloc async {{\[}}%[[VAL_22]]] (%[[VAL_23]]) : memref // CHECK: %[[VAL_26:.*]] = gpu.memcpy async {{\[}}%[[VAL_25]]] %[[VAL_24]], %[[VAL_11]] : memref, memref -// CHECK: %[[VAL_27:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_27:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_28:.*]] = gpu.wait async // CHECK: %[[VAL_29:.*]] = memref.dim %[[VAL_27]], %[[VAL_3]] : memref // CHECK: %[[VAL_30:.*]] = memref.dim %[[VAL_27]], %[[VAL_4]] : memref // CHECK: %[[VAL_31:.*]], %[[VAL_32:.*]] = gpu.alloc async {{\[}}%[[VAL_28]]] (%[[VAL_29]], %[[VAL_30]]) : memref // CHECK: %[[VAL_33:.*]] = gpu.memcpy async {{\[}}%[[VAL_32]]] %[[VAL_31]], %[[VAL_27]] : memref, memref -// CHECK: %[[VAL_34:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_34:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_35:.*]] = gpu.wait async // CHECK: %[[VAL_36:.*]] = memref.dim %[[VAL_34]], %[[VAL_3]] : memref // CHECK: %[[VAL_37:.*]] = memref.dim %[[VAL_34]], %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir index 088e468cee795..dea71fa03c777 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec_lib.mlir @@ -30,12 +30,12 @@ module { // CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_10]], %[[VAL_3]] : memref // CHECK: %[[VAL_23:.*]], %[[VAL_24:.*]] = gpu.alloc async {{\[}}%[[VAL_21]]] (%[[VAL_22]]) : memref // CHECK: %[[VAL_25:.*]] = gpu.memcpy async {{\[}}%[[VAL_24]]] %[[VAL_23]], %[[VAL_10]] : memref, memref -// CHECK: %[[VAL_26:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_26:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_27:.*]] = gpu.wait async // CHECK: %[[VAL_28:.*]] = memref.dim %[[VAL_26]], %[[VAL_3]] : memref // CHECK: %[[VAL_29:.*]], %[[VAL_30:.*]] = gpu.alloc async {{\[}}%[[VAL_27]]] (%[[VAL_28]]) : memref // CHECK: %[[VAL_31:.*]] = gpu.memcpy async {{\[}}%[[VAL_30]]] %[[VAL_29]], %[[VAL_26]] : memref, memref -// CHECK: %[[VAL_32:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_32:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_33:.*]] = gpu.wait async // CHECK: %[[VAL_34:.*]] = memref.dim %[[VAL_32]], %[[VAL_3]] : memref // CHECK: %[[VAL_35:.*]], %[[VAL_36:.*]] = gpu.alloc async {{\[}}%[[VAL_33]]] (%[[VAL_34]]) : memref diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir index 1058bc03fe9cb..6675df2be0c53 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir @@ -28,11 +28,11 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK: %[[VAL_5:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> -// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK: %[[VAL_7:.*]] = gpu.wait async // CHECK: %[[VAL_8:.*]], %[[VAL_9:.*]] = gpu.alloc async {{\[}}%[[VAL_7]]] () : memref<8x8xf64> // CHECK: %[[VAL_10:.*]] = gpu.memcpy async {{\[}}%[[VAL_9]]] %[[VAL_8]], %[[VAL_6]] : memref<8x8xf64>, memref<8x8xf64> -// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK: %[[VAL_12:.*]] = gpu.wait async // CHECK: %[[VAL_13:.*]], %[[VAL_14:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] () : memref<8x8xf64> // CHECK: %[[VAL_15:.*]] = gpu.memcpy async {{\[}}%[[VAL_14]]] %[[VAL_13]], %[[VAL_11]] : memref<8x8xf64>, memref<8x8xf64> diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir index 32741086b9e6e..7b7657a0e9ba5 100644 --- a/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sddmm_lib.mlir @@ -30,13 +30,13 @@ // CHECK: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor // CHECK: %[[VAL_9:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK: %[[VAL_10:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor -// CHECK: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_12:.*]] = gpu.wait async // CHECK: %[[VAL_13:.*]] = memref.dim %[[VAL_11]], %[[VAL_3]] : memref // CHECK: %[[VAL_14:.*]] = memref.dim %[[VAL_11]], %[[VAL_4]] : memref // CHECK: %[[VAL_15:.*]], %[[VAL_16:.*]] = gpu.alloc async {{\[}}%[[VAL_12]]] (%[[VAL_13]], %[[VAL_14]]) : memref // CHECK: %[[VAL_17:.*]] = gpu.memcpy async {{\[}}%[[VAL_16]]] %[[VAL_15]], %[[VAL_11]] : memref, memref -// CHECK: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_18:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_19:.*]] = gpu.wait async // CHECK: %[[VAL_20:.*]] = memref.dim %[[VAL_18]], %[[VAL_3]] : memref // CHECK: %[[VAL_21:.*]] = memref.dim %[[VAL_18]], %[[VAL_4]] : memref diff --git a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir index 857967bcf521a..cf1eb3e9e44f5 100644 --- a/mlir/test/Dialect/SparseTensor/constant_index_map.mlir +++ b/mlir/test/Dialect/SparseTensor/constant_index_map.mlir @@ -14,8 +14,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_5:.*]] = tensor.empty() : tensor<77xi1, #{{.*}}> -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<1x77xi1> -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<1x77xi1> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<1x77xi1> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<1x77xi1> // CHECK: %[[VAL_8:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_3]] to %[[VAL_2]] step %[[VAL_4]] iter_args(%[[VAL_10:.*]] = %[[VAL_5]]) -> (tensor<77xi1, #{{.*}}>) { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_9]]] : memref<1x77xi1> diff --git a/mlir/test/Dialect/SparseTensor/dense.mlir b/mlir/test/Dialect/SparseTensor/dense.mlir index 5ed1558a53163..c7022706f1e05 100644 --- a/mlir/test/Dialect/SparseTensor/dense.mlir +++ b/mlir/test/Dialect/SparseTensor/dense.mlir @@ -40,7 +40,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] { @@ -79,7 +79,7 @@ func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>, // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index @@ -122,7 +122,7 @@ func.func @dense2(%arga: tensor<32x16xf32>, // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 16 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index diff --git a/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir b/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir index 275f7f2ff25f7..d828afe13c622 100644 --- a/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir +++ b/mlir/test/Dialect/SparseTensor/fuse_sparse_pad_with_consumer.mlir @@ -30,7 +30,7 @@ // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x4xf32, #sparse> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_10]] : +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_10]] : // CHECK-DAG: linalg.fill ins(%[[VAL_8]] : f32) outs(%[[VAL_14]] : memref<8x8xf32>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_5]] { // CHECK: %[[VAL_16:.*]] = arith.subi %[[VAL_15]], %[[VAL_7]] : index diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir index 58f182dbdc44d..81d300e851ec1 100644 --- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir +++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir @@ -101,7 +101,7 @@ func.func @sparse_scale(%argx: tensor) -> tensor to memref> // C_HECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref> // C_HECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref -// C_HECK: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// C_HECK: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // C_HECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // C_HECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref // C_HECK: %[[VAL_13:.*]] = scf.while (%[[VAL_14:.*]] = %[[VAL_11]]) : (index) -> index { @@ -170,7 +170,7 @@ func.func @matvec(%arga: tensor<32x64xf64, #SortedCOO>, // C_HECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref> // C_HECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref> // C_HECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref -// C_HECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x64xf64> to memref<32x64xf64> +// C_HECK: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x64xf64> to memref<32x64xf64> // C_HECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>) // C_HECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref // C_HECK: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir index 003dcc6708d63..a2f3f7704ddde 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_1d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_1d.mlir @@ -21,7 +21,7 @@ // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref @@ -51,7 +51,7 @@ func.func @add_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK: %[[VAL_INITTENSOR:.*]] = tensor.empty() : tensor<32xf32> // CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_INITTENSOR]] : tensor<32xf32> to memref<32xf32> +// CHECK: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_INITTENSOR]] : tensor<32xf32> to memref<32xf32> // CHECK: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_7]] : memref<32xf32>) // CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] { // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_8]]] : memref @@ -81,7 +81,7 @@ func.func @add_d_init(%arga: tensor<32xf32, #DV>, %argb: f32) -> tensor<32xf32> // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) // CHECK: scf.for %[[VAL_9:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref @@ -115,7 +115,7 @@ func.func @mul_d(%arga: tensor<32xf32, #DV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32xf32>) // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { // CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_13]] : index @@ -165,7 +165,7 @@ func.func @add_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_8]] : memref<32xf32>) @@ -205,7 +205,7 @@ func.func @repeated_add_s(%arga: tensor<32xf32, #SV>, %argx: tensor<32xf32>) -> // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -247,8 +247,8 @@ func.func @mul_s(%arga: tensor<32xf32, #SV>, %argb: f32, %argx: tensor<32xf32>) // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref @@ -278,8 +278,8 @@ func.func @add_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_9]] : memref<32xf32>) // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref @@ -309,11 +309,11 @@ func.func @mul_dd(%arga: tensor<32xf32, #DV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant true // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32xf32> to memref<32xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref @@ -366,11 +366,11 @@ func.func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tens // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_5:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<32xf32> to memref<32xf32> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -406,8 +406,8 @@ func.func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32, #SV>, %argx: tens // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -463,8 +463,8 @@ func.func @add_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32xf32>) // CHECK-DAG: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -500,7 +500,7 @@ func.func @mul_sd(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32>, %argx: tens // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -582,7 +582,7 @@ func.func @add_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32xf32>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -643,7 +643,7 @@ func.func @mul_ss(%arga: tensor<32xf32, #SV>, %argb: tensor<32xf32, #SV>, %argx: // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -735,7 +735,7 @@ func.func @two_way_inv(%arga: tensor<16xf32, #SV>, %argb: tensor<16xf32, #SV>, % // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<16xf32>) // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -830,7 +830,7 @@ func.func @two_way_inv_alt(%arga: tensor<16xf32, #SV>, // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_10:.*]] = memref.load %[[VAL_6]][] : memref @@ -875,7 +875,7 @@ func.func @sum_reduction(%arga: tensor, %argx: tensor) -> tenso // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_11]][] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -977,11 +977,11 @@ func.func @sum_reduction_ss(%arga: tensor<16xf32, #SV>, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_13]][] : memref // CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_9]][] : memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -1089,16 +1089,16 @@ func.func @sum_reduction_inv(%arga: tensor<16xf32, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant true // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_3]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_16:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor -// CHECK-DAG: %[[VAL_18:.*]] = bufferization.to_memref %[[VAL_4]] +// CHECK-DAG: %[[VAL_18:.*]] = bufferization.to_buffer %[[VAL_4]] // CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_18]] : memref) // CHECK-DAG: %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_5]]] : memref // CHECK-DAG: %[[VAL_20:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref @@ -1272,7 +1272,7 @@ func.func @four_tensors_op(%arga: tensor, // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_15]][] : memref // CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-DAG: %[[VAL_19:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir index 9c34e54db6c85..faf6404a96564 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_2d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_2d.mlir @@ -25,8 +25,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index @@ -62,8 +62,8 @@ func.func @add_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_10]] : memref<32x16xi1>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index @@ -98,8 +98,8 @@ func.func @cmp_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_10]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_4]] : index @@ -137,8 +137,8 @@ func.func @mul_dd(%arga: tensor<32x16xf32, #Tdd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref @@ -202,8 +202,8 @@ func.func @add_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref @@ -265,8 +265,8 @@ func.func @cmp_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref @@ -306,8 +306,8 @@ func.func @mul_ds(%arga: tensor<32x16xf32, #Tds>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref @@ -376,8 +376,8 @@ func.func @add_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_14]] : memref<32x16xi1>) // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref @@ -444,8 +444,8 @@ func.func @cmp_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_11]] : memref<32x16xf32>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -488,8 +488,8 @@ func.func @mul_sd(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref @@ -584,8 +584,8 @@ func.func @add_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_5]] : i1) outs(%[[VAL_16]] : memref<32x16xi1>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_7]]] : memref @@ -679,8 +679,8 @@ func.func @cmp_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_12]] : memref<32x16xf32>) // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -726,7 +726,7 @@ func.func @mul_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32>, %arg // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -891,7 +891,7 @@ func.func @add_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xi1> to memref<32x16xi1> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i1) outs(%[[VAL_17]] : memref<32x16xi1>) // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref @@ -1166,7 +1166,7 @@ func.func @sub_ss_batched(%0: tensor<2x3xf64, #BatchedVector>, %1: tensor<2x3xf6 // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_16]] : memref<32x16xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -1260,7 +1260,7 @@ func.func @mul_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_15]] : memref<32x16xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref @@ -1362,7 +1362,7 @@ func.func @add_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: linalg.fill ins(%{{.*}} : f32) outs(%[[VAL_13]] : memref<32x16xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -1415,8 +1415,8 @@ func.func @mul_sd_ds(%arga: tensor<32x16xf32, #Tsd>, %argb: tensor<32x16xf32, #T // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<16xf32> to memref<16xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<16xf32> to memref<16xf32> // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_5]] : index @@ -1464,7 +1464,7 @@ func.func @matvec(%argA: tensor<16x32xf32, #Tds>, %argb: tensor<32xf32>, %argx: // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_9]]) -> (f32) { // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref @@ -1511,7 +1511,7 @@ func.func @sum_reduction(%arga: tensor<10x20xf32, #Tds>, %argx: tensor) -> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.lvl %[[VAL_0]], %[[VAL_3]] : tensor -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: linalg.fill ins(%{{.*}} : f64) outs(%[[VAL_11]] : memref) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_3]] to %[[VAL_8]] step %[[VAL_4]] { // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_12]]] : memref @@ -1563,9 +1563,9 @@ func.func @scale(%arga: tensor, %argx: tensor) -> tensor // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_11:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_4]] { @@ -1638,10 +1638,10 @@ func.func @sampled_dense_dense(%args: tensor, // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref -// CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_memref %[[VAL_4]] : tensor to memref +// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_21:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor to memref // CHECK-DAG: %[[VAL_22:.*]] = sparse_tensor.lvl %[[VAL_2]], %[[VAL_6]] : tensor to memref +// CHECK-DAG: %[[VAL_24:.*]] = bufferization.to_buffer %[[VAL_5]] : tensor to memref // CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_21]][] : memref // CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir index 9158ac427763b..f6ecfa0beba26 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_3d.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_3d.mlir @@ -33,8 +33,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_4]] : index @@ -75,8 +75,8 @@ func.func @add_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_11]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_4]] : index @@ -120,8 +120,8 @@ func.func @mul_ddd(%arga: tensor<32x16x8xf32, #Tddd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_16:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_9]] { // CHECK: %[[VAL_18:.*]] = arith.muli %[[VAL_16]], %[[VAL_5]] : index @@ -187,8 +187,8 @@ func.func @add_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] { // CHECK: %[[VAL_16:.*]] = arith.muli %[[VAL_14]], %[[VAL_5]] : index @@ -234,8 +234,8 @@ func.func @mul_dds(%arga: tensor<32x16x8xf32, #Tdds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_7]] to %[[VAL_3]] step %[[VAL_8]] { // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_15]]] : memref @@ -305,8 +305,8 @@ func.func @add_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref @@ -354,8 +354,8 @@ func.func @mul_dsd(%arga: tensor<32x16x8xf32, #Tdsd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_8]] to %[[VAL_4]] step %[[VAL_9]] { // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref @@ -450,8 +450,8 @@ func.func @add_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] { // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref @@ -499,8 +499,8 @@ func.func @mul_dss(%arga: tensor<32x16x8xf32, #Tdss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref @@ -575,8 +575,8 @@ func.func @add_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_12]] : memref<32x16x8xf32>) // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -625,8 +625,8 @@ func.func @mul_sdd(%arga: tensor<32x16x8xf32, #Tsdd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_17]] : memref<32x16x8xf32>) // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref @@ -726,8 +726,8 @@ func.func @add_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_14]] : memref<32x16x8xf32>) // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref @@ -778,8 +778,8 @@ func.func @mul_sds(%arga: tensor<32x16x8xf32, #Tsds>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_16]] : memref<32x16x8xf32>) // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_8]]] : memref @@ -883,8 +883,8 @@ func.func @add_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<32x16x8xf32>) // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -937,8 +937,8 @@ func.func @mul_ssd(%arga: tensor<32x16x8xf32, #Tssd>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_19:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_19]] : memref<32x16x8xf32>) // CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_8]]] : memref // CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_9]]] : memref @@ -1067,8 +1067,8 @@ func.func @add_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32x16x8xf32> to memref<32x16x8xf32> +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16x8xf32> to memref<32x16x8xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_15]] : memref<32x16x8xf32>) // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -1127,11 +1127,11 @@ func.func @mul_sss(%arga: tensor<32x16x8xf32, #Tsss>, %argb: tensor<32x16x8xf32> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : tensor to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_6]] : tensor -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_3]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.lvl %[[VAL_1]], %[[VAL_5]] : tensor // CHECK-DAG: %[[VAL_14:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_5]] to %[[VAL_13]] step %[[VAL_6]] { // CHECK: %[[VAL_19:.*]] = arith.muli %[[VAL_17]], %[[VAL_10]] : index // CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_6]] { @@ -1191,7 +1191,7 @@ func.func @kernel_3d(%arga: tensor, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20x30xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<10x20x30xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_10]][] : memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -1246,10 +1246,10 @@ func.func @sum_reduction(%arga: tensor<10x20x30xf32, #Tsss>, %argx: tensor) // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_6:.*]] = tensor.dim %[[VAL_0]], %[[VAL_3]] : tensor // CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_4]] : tensor -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-DAG: %[[VAL_9:.*]] = tensor.dim %[[VAL_0]], %[[VAL_5]] : tensor // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_12]][] : memref // CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) { // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref @@ -1305,9 +1305,9 @@ func.func @sum_reduction_inv(%arga: tensor, // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_8:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20xf32> to memref<20xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<30xf32> to memref<30xf32> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_3]] : tensor<10x20x30xf32> to memref<10x20x30xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<20xf32> to memref<20xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<30xf32> to memref<30xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_3]] : tensor<10x20x30xf32> to memref<10x20x30xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_13]] : memref<10x20x30xf32>) // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_7]] to %[[VAL_4]] step %[[VAL_8]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_14]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir index e2dbadc4db5bf..973f8f575ed7d 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_affine.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_affine.mlir @@ -25,8 +25,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<4xf32> to memref<4xf32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<4xf32> to memref<4xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf32> to memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<4xf32> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -112,8 +112,8 @@ func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34xi32> to memref<34xi32> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi32> to memref<32xi32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<34xi32> to memref<34xi32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi32> to memref<32xi32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : i32) outs(%[[VAL_11]] : memref<32xi32>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref @@ -163,8 +163,8 @@ func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>, // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34x19xf64> to memref<34x19xf64> -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<34x19xf64> to memref<34x19xf64> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_3]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref // CHECK: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_3]] : index @@ -223,7 +223,7 @@ func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>, // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_5]] { @@ -287,7 +287,7 @@ func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>, // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref // CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_6]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_batch.mlir b/mlir/test/Dialect/SparseTensor/sparse_batch.mlir index cfddef743cf28..88e93be62a9e6 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_batch.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_batch.mlir @@ -14,7 +14,7 @@ // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 2 : index} : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xindex> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xindex> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x4x2xf32, #sparse{{[0-9]*}}> to memref<8x?xf32> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_6]] : tensor<8x4x2xf32> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_6]] : tensor<8x4x2xf32> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_10]] : memref<8x4x2xf32>) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_2]] to %[[VAL_5]] step %[[VAL_1]] { // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_2]] to %[[VAL_4]] step %[[VAL_1]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir index d1d8276f8daef..69c0d8c84abbe 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir @@ -38,7 +38,7 @@ // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -70,7 +70,7 @@ func.func @abs(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -102,7 +102,7 @@ func.func @ceil(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -134,7 +134,7 @@ func.func @floor(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { @@ -169,8 +169,8 @@ func.func @neg(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -229,8 +229,8 @@ func.func @add(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -289,8 +289,8 @@ func.func @sub(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { @@ -325,7 +325,7 @@ func.func @mul(%arga: tensor<32xf64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xf64> to memref<32xf64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir b/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir index d9f48afef4810..352a0fa242300 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_fusion.mlir @@ -25,7 +25,7 @@ // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<100xf64, #sparse> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<100xf64, #sparse> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<100xf64, #sparse> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_8]] : +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_8]] : // CHECK-DAG: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_12]] : memref<100xf64>) // CHECK-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_3]]] : memref // CHECK-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_2]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir index 3a33a200f8279..be96dbf10242e 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_int_ops.mlir @@ -33,8 +33,8 @@ // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -94,8 +94,8 @@ func.func @add(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_15:.*]]:2 = scf.while (%[[VAL_16:.*]] = %[[VAL_13]], %[[VAL_17:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -154,8 +154,8 @@ func.func @sub(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { @@ -190,7 +190,7 @@ func.func @mul(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -224,7 +224,7 @@ func.func @divsbyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -258,8 +258,8 @@ func.func @divubyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { @@ -296,8 +296,8 @@ func.func @and(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -356,8 +356,8 @@ func.func @or(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref // CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { @@ -414,7 +414,7 @@ func.func @xor(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -448,7 +448,7 @@ func.func @ashrbyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { @@ -482,7 +482,7 @@ func.func @lsrbyc(%arga: tensor<32xi64, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xi64> to memref<32xi64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<32xi64> to memref<32xi64> // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir index d215ebb1c0c6f..5f2aa5e3a2736 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels.mlir @@ -18,8 +18,8 @@ // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20x30xf32> to memref<20x30xf32> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<20x30xf32> to memref<20x30xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref // CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_5]] { @@ -58,13 +58,13 @@ func.func @matmul1(%a: tensor<10x20xf32, #DCSR>, // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 10 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20xf32> to memref<10x20xf32> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<10x20xf32> to memref<10x20xf32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<10x30xf32> to memref<10x30xf32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref @@ -203,13 +203,13 @@ func.func @matmul2(%A: tensor<4x8xf64, #DCSR>, // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 6 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<8x8xi32> to memref<8x8xi32> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<8x8xi32> to memref<8x8xi32> // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<6x6xi32> to memref<6x6xi32> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<6x6xi32> to memref<6x6xi32> // CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_13]], %[[VAL_14]]] : memref<6x6xi32> @@ -255,13 +255,13 @@ func.func @conv2d(%input: tensor<8x8xi32>, // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : i64 -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<5x3xi8> to memref<5x3xi8> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<5x3xi8> to memref<5x3xi8> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<3x6xi8, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<5x6xi64> to memref<5x6xi64> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<5x6xi64> to memref<5x6xi64> // CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref @@ -309,7 +309,7 @@ func.func @quantized_matmul(%input1: tensor<5x3xi8>, // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<1024xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<1024xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_11]][] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref // CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir b/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir index 836e26b51f7c1..f6f7f396adab5 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_kernels_to_iterator.mlir @@ -85,7 +85,7 @@ func.func @sqsum(%arg0: tensor) -> tensor { // CHECK: %[[VAL_3:.*]] = arith.constant 0 : index // CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32 // CHECK: %[[VAL_5:.*]] = arith.constant dense<0> : tensor<10xi32> -// CHECK: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<10xi32> to memref<10xi32> +// CHECK: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_5]] : tensor<10xi32> to memref<10xi32> // CHECK: linalg.fill ins(%[[VAL_4]] : i32) outs(%[[VAL_6]] : memref<10xi32>) // CHECK: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #sparse{{.*}}> to memref // CHECK: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #sparse{{.*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir index cab57389f032e..2866e115065d2 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower.mlir @@ -29,8 +29,8 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> -// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-HIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref // CHECK-HIR-DAG: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_5]] : index @@ -60,8 +60,8 @@ // CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-MIR-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-MIR: scf.for %[[VAL_14:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-MIR-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_14]]] : memref // CHECK-MIR-DAG: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_5]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir index b998eeb0d3944..17c3c29cf5211 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_col.mlir @@ -32,8 +32,8 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[DEMAP]] {level = 1 : index} // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[DEMAP]] {level = 1 : index} // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[DEMAP]] -// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-HIR-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-HIR: scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-HIR-DAG: %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_12]]] : memref<64xf64> // CHECK-HIR-DAG: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref @@ -62,8 +62,8 @@ // CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-MIR-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-MIR: scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK-MIR: %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<64xf64> // CHECK-MIR: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir index e1e1953779fa8..f2a29a550ed01 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_lower_inplace.mlir @@ -29,8 +29,8 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse{{[0-9]*}}> -// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-HIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-HIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-HIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref // CHECK-HIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index @@ -60,8 +60,8 @@ // CHECK-MIR-DAG: %[[VAL_6:.*]] = call @sparsePositions0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_7:.*]] = call @sparseCoordinates0(%[[VAL_0]], %[[VAL_5]]) : (!llvm.ptr, index) -> memref // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<64xf64> to memref<64xf64> -// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> +// CHECK-MIR-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<64xf64> to memref<64xf64> +// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<32xf64> to memref<32xf64> // CHECK-MIR: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] { // CHECK-MIR-DAG: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref // CHECK-MIR-DAG: %[[VAL_13:.*]] = arith.addi %[[VAL_11]], %[[VAL_5]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir index b80a48363773f..8f06df3c9b98d 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_nd.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_nd.mlir @@ -35,13 +35,13 @@ // CHECK-DAG: %[[VAL_10:.*]] = arith.constant 80 : index // CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> +// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> +// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32> // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] { // CHECK: %[[VAL_23:.*]] = arith.muli %[[VAL_21]], %[[VAL_9]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir index ab7a30e2f96a5..ebb5ab6075da2 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_outbuf.mlir @@ -19,7 +19,7 @@ // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<10xf32> to memref<10xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<10xf32> to memref<10xf32> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f32) outs(%[[VAL_8]] : memref<10xf32>) // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref @@ -53,7 +53,7 @@ func.func @allout_inplace(%arga: tensor<10xi32, #SV>, // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xi32, #{{.*}}> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<10xf32> to memref<10xf32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor<10xf32> to memref<10xf32> // CHECK-DAG: linalg.fill ins(%[[VAL_2]] : f32) outs(%[[VAL_8]] : memref<10xf32>) // CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref // CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref @@ -86,7 +86,7 @@ func.func @allout_materialize(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> { // CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10xf32, #{{.*}}> to memref -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<10xf32> to memref<10xf32> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<10xf32> to memref<10xf32> // CHECK-DAG: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-DAG: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { diff --git a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir index 91e3842bdd367..4546d3367b16d 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_pack.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_pack.mlir @@ -12,12 +12,12 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 2 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 100 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<2xindex> to memref<2xindex> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<2xindex> to memref<2xindex> // CHECK-DAG: %[[VAL_7:.*]] = memref.cast %[[VAL_6]] : memref<2xindex> to memref -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<6x2xi32> to memref<6x2xi32> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<6x2xi32> to memref<6x2xi32> // CHECK-DAG: %[[VAL_9:.*]] = memref.collapse_shape %[[VAL_8]] {{\[\[}}0, 1]] : memref<6x2xi32> into memref<12xi32> // CHECK-DAG: %[[VAL_10:.*]] = memref.cast %[[VAL_9]] : memref<12xi32> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<6xf64> to memref<6xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<6xf64> to memref<6xf64> // CHECK-DAG: %[[VAL_12:.*]] = memref.cast %[[VAL_11]] : memref<6xf64> to memref // CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.init // CHECK: %[[VAL_14:.*]] = sparse_tensor.storage_specifier.set %[[VAL_13]] lvl_sz at 0 with %[[VAL_4]] @@ -45,18 +45,18 @@ func.func @sparse_pack(%values: tensor<6xf64>, %pos:tensor<2xindex>, %coordinate // CHECK-SAME: %[[VAL_5:.*]]: tensor<2xindex>, // CHECK-SAME: %[[VAL_6:.*]]: tensor<6x2xi32>) -> (tensor<6xf64>, tensor<2xindex>, tensor<6x2xi32>) { // CHECK: %[[VAL_7:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] pos_mem_sz at 0 -// CHECK: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_5]] : tensor<2xindex> to memref<2xindex> +// CHECK: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_5]] : tensor<2xindex> to memref<2xindex> // CHECK: %[[VAL_9:.*]] = memref.subview %[[VAL_8]][0] {{\[}}%[[VAL_7]]] [1] : memref<2xindex> to memref // CHECK: %[[VAL_10:.*]] = memref.subview %[[VAL_0]][0] {{\[}}%[[VAL_7]]] [1] : memref to memref // CHECK: memref.copy %[[VAL_10]], %[[VAL_9]] : memref to memref // CHECK: %[[VAL_11:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] crd_mem_sz at 0 -// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_6]] : tensor<6x2xi32> to memref<6x2xi32> +// CHECK: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_6]] : tensor<6x2xi32> to memref<6x2xi32> // CHECK: %[[VAL_13:.*]] = memref.collapse_shape %[[VAL_12]] {{\[\[}}0, 1]] : memref<6x2xi32> into memref<12xi32> // CHECK: %[[VAL_14:.*]] = memref.subview %[[VAL_13]][0] {{\[}}%[[VAL_11]]] [1] : memref<12xi32> to memref // CHECK: %[[VAL_15:.*]] = memref.subview %[[VAL_1]][0] {{\[}}%[[VAL_11]]] [1] : memref to memref // CHECK: memref.copy %[[VAL_15]], %[[VAL_14]] : memref to memref // CHECK: %[[VAL_16:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] val_mem_sz -// CHECK: %[[VAL_17:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<6xf64> to memref<6xf64> +// CHECK: %[[VAL_17:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor<6xf64> to memref<6xf64> // CHECK: %[[VAL_18:.*]] = memref.subview %[[VAL_17]][0] {{\[}}%[[VAL_16]]] [1] : memref<6xf64> to memref // CHECK: %[[VAL_19:.*]] = memref.subview %[[VAL_2]][0] {{\[}}%[[VAL_16]]] [1] : memref to memref // CHECK: memref.copy %[[VAL_19]], %[[VAL_18]] : memref to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir index c2cabd4351112..1cfa8571a9f0f 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_parallel_reduce.mlir @@ -24,8 +24,8 @@ // CHECK-DAG: %[[TMP_0:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} // CHECK-DAG: %[[TMP_1:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} // CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.values %[[TMP_arg0]] -// CHECK-DAG: %[[TMP_3:.*]] = bufferization.to_memref %[[TMP_arg1]] : tensor<32xf32> to memref<32xf32> -// CHECK-DAG: %[[TMP_4:.*]] = bufferization.to_memref %[[TMP_arg2]] : tensor<16xf32> to memref<16xf32> +// CHECK-DAG: %[[TMP_3:.*]] = bufferization.to_buffer %[[TMP_arg1]] : tensor<32xf32> to memref<32xf32> +// CHECK-DAG: %[[TMP_4:.*]] = bufferization.to_buffer %[[TMP_arg2]] : tensor<16xf32> to memref<16xf32> // CHECK: scf.parallel (%[[TMP_arg3:.*]]) = (%[[TMP_c0]]) to (%[[TMP_c16]]) step (%[[TMP_c1]]) { // CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_4]][%[[TMP_arg3]]] : memref<16xf32> // CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_0]][%[[TMP_arg3]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir index 5f8002b5b6d31..289939fbfc16b 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_perm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm.mlir @@ -24,7 +24,7 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index // CHECK: %[[DEMAP:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[DEMAP]] : tensor<30x10x20xf32, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<20x30x10xf32> to memref<20x30x10xf32> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<20x30x10xf32> to memref<20x30x10xf32> // CHECK: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_9]] : memref<20x30x10xf32>) // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] { // CHECK: %[[VAL_12:.*]] = arith.muli %[[VAL_10]], %[[VAL_4]] : index @@ -64,7 +64,7 @@ func.func @sparse_static_dims(%arga: tensor<10x20x30xf32, #X>, // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_2]] : tensor // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_3]] : tensor // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_4]] : tensor -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_10]] : memref) // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_3]] to %[[VAL_7]] step %[[VAL_4]] { // CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_11]], %[[VAL_8]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir index 93b5da41fc7f9..4abaf03dff50f 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_perm_lower.mlir @@ -26,7 +26,7 @@ // CHECK-HIR-DAG: %[[VAL_6:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_2]] : tensor // CHECK-HIR-DAG: %[[VAL_7:.*]] = sparse_tensor.lvl %[[DEMAP]], %[[VAL_4]] : tensor // CHECK-HIR-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[DEMAP]] : tensor -// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref +// CHECK-HIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref // CHECK-HIR: %[[VAL_11:.*]] = tensor.extract %[[VAL_1]][] : tensor // CHECK-HIR: %[[VAL_12:.*]] = scf.for %[[VAL_13:.*]] = %[[VAL_3]] to %[[VAL_5]] step %[[VAL_2]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) { // CHECK-HIR: %[[VAL_18:.*]] = arith.muli %[[VAL_13]], %[[VAL_6]] : index @@ -58,7 +58,7 @@ // CHECK-MIR-DAG: %[[DimSize1:.*]] = call @sparseLvlSize(%[[ARGA]], %[[I1]]) // CHECK-MIR-DAG: %[[DimSize2:.*]] = call @sparseLvlSize(%[[ARGA]], %[[I2]]) // CHECK-MIR-DAG: %[[VAL_8:.*]] = call @sparseValuesF32(%[[ARGA]]) : (!llvm.ptr) -> memref -// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[ARGX]] : tensor to memref +// CHECK-MIR-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[ARGX]] : tensor to memref // CHECK-MIR: %[[VAL_11:.*]] = tensor.extract %[[ARGX]][] : tensor // CHECK-MIR: %[[VAL_12:.*]] = scf.for %[[D2:.*]] = %[[I0]] to %[[DimSize0]] step %[[I1]] iter_args(%[[VAL_14:.*]] = %[[VAL_11]]) -> (f32) { // CHECK-MIR: %[[VAL_18:.*]] = arith.muli %[[D2]], %[[DimSize1]] : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir index e5df646851d43..8d1f62f69f0f6 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_scalars.mlir @@ -33,8 +33,8 @@ // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_4]] : tensor<32x16xf32> to memref<32x16xf32> +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_15:.*]] = bufferization.to_buffer %[[VAL_4]] : tensor<32x16xf32> to memref<32x16xf32> // CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_14]][] : memref // CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_6]]] : memref // CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_7]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir index e769534641ec8..d653e144fb3bd 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir @@ -64,14 +64,14 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> { // CHECK-DAG: %[[VAL_6:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK-DAG: %[[VAL_7:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64> // CHECK-DAG: %[[VAL_8:.*]] = bufferization.alloc_tensor() copy(%[[VAL_6]]) : tensor<8x8xf64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_memref %[[VAL_8]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_16:.*]] = bufferization.to_buffer %[[VAL_8]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref // CHECK: scf.for %[[VAL_19:.*]] = %[[VAL_17]] to %[[VAL_18]] step %[[VAL_5]] { @@ -132,8 +132,8 @@ func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>, // CHECK-DAG: %[[VAL_8:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64> // CHECK-DAG: %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) : tensor<8x8xf64> // CHECK-DAG: %[[VAL_10:.*]] = tensor.empty() : tensor<8x8xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir index 3cc0aa26c8bc2..39962b46d5d51 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm_org.mlir @@ -30,8 +30,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant false // CHECK-DAG: %[[VAL_7:.*]] = arith.constant true // CHECK-DAG: %[[VAL_8:.*]] = tensor.empty() : tensor<8x8xf64, #sparse{{[0-9]*}}> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir index c99d5d25f7b4a..f4b565c7f9c8a 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_chain.mlir @@ -31,7 +31,7 @@ // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<64x32xf64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_14]][] : memref // CHECK: %[[VAL_16:.*]] = scf.for %[[VAL_17:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] iter_args(%[[VAL_18:.*]] = %[[VAL_15]]) -> (f64) { // CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir index d88372276989d..e9587edef4678 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector_index.mlir @@ -28,7 +28,7 @@ // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : tensor<8xi64> to memref<8xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_7]] : tensor<8xi64> to memref<8xi64> // CHECK-DAG: linalg.fill ins(%[[VAL_4]] : i64) outs(%[[VAL_11]] : memref<8xi64>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_6]]] : memref @@ -70,7 +70,7 @@ func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8xi64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8xi64, #sparse{{[0-9]*}}> to memref -// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_7]] : tensor<8xi64> to memref<8xi64> +// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_buffer %[[VAL_7]] : tensor<8xi64> to memref<8xi64> // CHECK-DAG: linalg.fill ins(%[[VAL_3]] : i64) outs(%[[VAL_11]] : memref<8xi64>) // CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref // CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_5]]] : memref diff --git a/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir b/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir index 6c3acf43f241e..0c73d2fe8a079 100644 --- a/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir +++ b/mlir/test/Dialect/SparseTensor/spy_sddmm.mlir @@ -24,8 +24,8 @@ // CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index // CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<8x8xf64> to memref<8x8xf64> -// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<8x8xf64> to memref<8x8xf64> +// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor<8x8xf64> to memref<8x8xf64> // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<8x8xf64, #sparse{{[0-9]*}}> to memref diff --git a/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir b/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir index df1e564c06231..a673b0dacf4af 100755 --- a/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir +++ b/mlir/test/Dialect/SparseTensor/spy_sddmm_bsr.mlir @@ -37,8 +37,8 @@ // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.reinterpret_map %[[VAL_0]] : tensor to tensor // CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_1]], %[[VAL_3]] : tensor -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor to memref -// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_2]] : tensor to memref +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_1]] : tensor to memref +// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor to memref // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.lvl %[[VAL_7]], %[[VAL_4]] : tensor // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.positions %[[VAL_7]] {level = 1 : index} : tensor to memref // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_7]] {level = 1 : index} : tensor to memref diff --git a/mlir/test/Dialect/SparseTensor/unused-tensor.mlir b/mlir/test/Dialect/SparseTensor/unused-tensor.mlir index 7e8b9f83fac79..526c3f4f8830c 100644 --- a/mlir/test/Dialect/SparseTensor/unused-tensor.mlir +++ b/mlir/test/Dialect/SparseTensor/unused-tensor.mlir @@ -28,8 +28,8 @@ // CHECK-DAG: %[[VAL_5:.*]] = arith.constant 4 : index // CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index // CHECK-DAG: %[[VAL_7:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<2x4xf64> -// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<2x4xf64> +// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor<2x4xf64> +// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_buffer %[[VAL_2]] : tensor<2x4xf64> // CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] { // CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] { // CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] { diff --git a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir index 15228c6a5f79a..01b717090e87a 100644 --- a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir +++ b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir @@ -16,7 +16,7 @@ // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -42,7 +42,7 @@ // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -93,7 +93,7 @@ func.func @sparse_reduction_ori(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -119,7 +119,7 @@ func.func @sparse_reduction_ori(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -168,7 +168,7 @@ func.func @sparse_reduction_ori_accumulator_on_rhs(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -194,7 +194,7 @@ func.func @sparse_reduction_ori_accumulator_on_rhs(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -243,7 +243,7 @@ func.func @sparse_reduction_subi(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -269,7 +269,7 @@ func.func @sparse_reduction_subi(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -319,7 +319,7 @@ func.func @sparse_reduction_xor(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -345,7 +345,7 @@ func.func @sparse_reduction_xor(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -395,7 +395,7 @@ func.func @sparse_reduction_addi(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -421,7 +421,7 @@ func.func @sparse_reduction_addi(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref @@ -471,7 +471,7 @@ func.func @sparse_reduction_subf(%argx: tensor, // CHECK-ON-DAG: %[[VAL_5:.*]] = arith.constant 1 : index // CHECK-ON-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-ON-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-ON-DAG: %[[VAL_8:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref // CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref // CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref @@ -497,7 +497,7 @@ func.func @sparse_reduction_subf(%argx: tensor, // CHECK-OFF-DAG: %[[VAL_3:.*]] = arith.constant 1 : index // CHECK-OFF-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor to memref // CHECK-OFF-DAG: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref -// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : tensor to memref +// CHECK-OFF-DAG: %[[VAL_6:.*]] = bufferization.to_buffer %[[VAL_0]] : tensor to memref // CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref // CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref // CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir index c1beed95f2006..567c4abea488e 100644 --- a/mlir/test/Dialect/Tensor/bufferize.mlir +++ b/mlir/test/Dialect/Tensor/bufferize.mlir @@ -3,7 +3,7 @@ // CHECK-LABEL: func @dim( // CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>, // CHECK-SAME: %[[INDEX:.*]]: index) -> index { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<*xf32> to memref<*xf32> +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<*xf32> to memref<*xf32> // CHECK: %[[EXTENT:.*]] = memref.dim %[[MEMREF]], %[[INDEX]] : memref<*xf32> // CHECK: return %[[EXTENT]] : index func.func @dim(%arg0: tensor<*xf32>, %arg1: index) -> index { @@ -15,7 +15,7 @@ func.func @dim(%arg0: tensor<*xf32>, %arg1: index) -> index { // CHECK-LABEL: func @rank( // CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> index { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] // CHECK: %[[EXTENT:.*]] = memref.rank %[[MEMREF]] : memref<*xf32> func.func @rank(%arg0: tensor<*xf32>) -> index { %0 = tensor.rank %arg0 : tensor<*xf32> @@ -26,7 +26,7 @@ func.func @rank(%arg0: tensor<*xf32>) -> index { // CHECK-LABEL: func @tensor.cast( // CHECK-SAME: %[[TENSOR:.*]]: tensor) -> tensor<2xindex> { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] // CHECK: %[[CASTED:.*]] = memref.cast %[[MEMREF]] : memref to memref<2xindex> // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED]] // CHECK: return %[[RET]] : tensor<2xindex> @@ -39,7 +39,7 @@ func.func @tensor.cast(%arg0: tensor) -> tensor<2xindex> { // CHECK-LABEL: func @tensor.cast_from_unranked( // CHECK-SAME: %[[TENSOR:.*]]: tensor<*xf32>) -> tensor<2xf32> { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<*xf32> to memref<*xf32> +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<*xf32> to memref<*xf32> // CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<*xf32> to memref<2xf32, strided<[?], offset: ?>> // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<2xf32, strided<[?], offset: ?>> // CHECK: return %[[RET]] : tensor<2xf32> @@ -52,7 +52,7 @@ func.func @tensor.cast_from_unranked(%arg0: tensor<*xf32>) -> tensor<2xf32> { // CHECK-LABEL: func @tensor.cast_to_unranked( // CHECK-SAME: %[[TENSOR:.*]]: tensor<2xf32>) -> tensor<*xf32> { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor<2xf32> to memref<2xf32> +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor<2xf32> to memref<2xf32> // CHECK: %[[CASTED_MEMREF:.*]] = memref.cast %[[MEMREF]] : memref<2xf32> to memref<*xf32> // CHECK: %[[RET:.*]] = bufferization.to_tensor %[[CASTED_MEMREF]] : memref<*xf32> // CHECK: return %[[RET]] : tensor<*xf32> @@ -77,7 +77,7 @@ func.func @tensor.empty() -> tensor<5xf32> { // CHECK-LABEL: func @tensor.extract( // CHECK-SAME: %[[TENSOR:.*]]: tensor, // CHECK-SAME: %[[IDX:.*]]: index) -> f32 { -// CHECK: %[[MEMREF:.*]] = bufferization.to_memref %[[TENSOR]] : tensor to memref +// CHECK: %[[MEMREF:.*]] = bufferization.to_buffer %[[TENSOR]] : tensor to memref // CHECK: %[[RET:.*]] = memref.load %[[MEMREF]][%[[IDX]]] : memref // CHECK: return %[[RET]] : f32 // CHECK: } @@ -199,7 +199,7 @@ func.func @tensor.from_elements_3d(%f0 : f32) -> tensor<3x2x2xf32> { // CHECK-LABEL: func @tensor.generate( // CHECK-SAME: %[[ARG:.*]]: tensor<*xf32>, // CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor { -// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_memref %[[ARG]] : tensor<*xf32> to memref<*xf32> +// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_buffer %[[ARG]] : tensor<*xf32> to memref<*xf32> // CHECK-DAG: %[[ALLOC:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref // CHECK: %[[ALLOC_T:.*]] = bufferization.to_tensor %[[ALLOC]] // CHECK: %[[MAPPED:.*]] = linalg.map @@ -266,7 +266,7 @@ func.func @tensor.generate_unknown_ops_in_body(%arg0: index) -> tensor // CHECK-SAME: %[[t1:.*]]: tensor, %[[idx1:.*]]: index, %[[idx2:.*]]: index func.func @tensor.extract_slice( %t1: tensor, %idx1: index, %idx2: index) -> tensor { - // CHECK: %[[m:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[r:.*]] = memref.subview %[[m]][5, %[[idx2]]] [%[[idx1]], 10] [1, 1] : memref to memref> %0 = tensor.extract_slice %t1[5, %idx2][%idx1, 10][1, 1] : tensor to tensor @@ -282,7 +282,7 @@ func.func @tensor.extract_slice( // CHECK-SAME: %[[idx2:.*]]: index func.func @tensor.extract_slice_rank_reducing( %t1: tensor, %idx1: index, %idx2: index) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[r:.*]] = memref.subview %[[m1]][5, %[[idx1]], 10] [%[[idx2]], 1, 15] [1, 1, 1] : memref to memref> %0 = tensor.extract_slice %t1[5, %idx1, 10][%idx2, 1, 15][1, 1, 1] : tensor to tensor @@ -300,8 +300,8 @@ func.func @tensor.insert_slice(%t1: tensor, %t2: tensor, %idx1: index, %idx2: index) -> tensor { // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index - // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref - // CHECK-DAG: %[[m2:.*]] = bufferization.to_memref %[[t2]] : tensor to memref + // CHECK-DAG: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref + // CHECK-DAG: %[[m2:.*]] = bufferization.to_buffer %[[t2]] : tensor to memref // CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]] // CHECK-DAG: %[[dim1:.*]] = memref.dim %[[m1]], %[[c1]] // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim0]], %[[dim1]]) @@ -353,7 +353,7 @@ func.func @tensor.insert_slice_rank_reducing_2( // CHECK-SAME: %[[f:.*]]: f32 func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5xf32> { // CHECK-DAG: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<5xf32> - // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<5xf32> to memref<5xf32> + // CHECK-DAG: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<5xf32> to memref<5xf32> // CHECK: memref.copy %[[m1]], %[[alloc]] // CHECK: memref.store %[[f]], %[[alloc]][%[[idx1]]] %0 = tensor.insert %f into %t1[%idx1] : tensor<5xf32> @@ -368,7 +368,7 @@ func.func @tensor.insert(%t1: tensor<5xf32>, %idx1: index, %f: f32) -> tensor<5x // CHECK-LABEL: func @tensor.expand_shape( // CHECK-SAME: %[[t1:.*]]: tensor, %[[sz0:.*]]: index func.func @tensor.expand_shape(%t1: tensor, %sz0: index) -> tensor<2x?x10xf32> { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[expanded:.*]] = memref.expand_shape %[[m1]] {{\[\[}}0, 1], [2]] output_shape [2, %[[sz0]], 10] : memref into memref<2x?x10xf32> %0 = tensor.expand_shape %t1 [[0, 1], [2]] output_shape [2, %sz0, 10] : tensor into tensor<2x?x10xf32> @@ -384,7 +384,7 @@ func.func @tensor.expand_shape(%t1: tensor, %sz0: index) -> tensor<2x? // CHECK-SAME: %[[t1:.*]]: tensor, %{{.*}}: index, %{{.*}}: index, %[[sz0:.*]]: index func.func @tensor.expand_shape_of_slice( %t1: tensor, %o1: index, %s1: index, %sz0: index) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}, 5] [%{{.*}}, 10] [1, 1] : memref to memref> %0 = tensor.extract_slice %t1[%o1, 5][%s1, 10][1, 1] : tensor to tensor @@ -401,7 +401,7 @@ func.func @tensor.expand_shape_of_slice( // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.expand_shape_of_scalar_slice( %t1: tensor, %o1: index, %s1: index) -> tensor<1xf32> { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[subview:.*]] = memref.subview %[[m1]][%{{.*}}] [1] [1] : memref to memref> %0 = tensor.extract_slice %t1[%o1][1][1] : tensor to tensor // CHECK: %[[expanded:.*]] = memref.expand_shape %[[subview]] [] output_shape [1] : memref into memref<1xf32, strided<[1], offset: ?>> @@ -415,7 +415,7 @@ func.func @tensor.expand_shape_of_scalar_slice( // CHECK-LABEL: func @tensor.expand_shape_multiple_dynamic_indices( // CHECK-SAME: %[[t1:.*]]: tensor, %[[sz0:.*]]: index, %[[sz1:.*]]: index, %[[sz2:.*]]: index func.func @tensor.expand_shape_multiple_dynamic_indices(%t1: tensor, %sz0: index, %sz1: index, %sz2: index) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] // CHECK: %[[expanded:.*]] = memref.expand_shape %[[m1]] {{\[\[}}0, 1, 2], [3]] output_shape [%[[sz0]], %[[sz1]], %[[sz2]], 256] : memref into memref %0 = tensor.expand_shape %t1 [[0, 1, 2], [3]] output_shape [%sz0, %sz1, %sz2, 256] : tensor into tensor @@ -429,7 +429,7 @@ func.func @tensor.expand_shape_multiple_dynamic_indices(%t1: tensor, // CHECK-LABEL: func @tensor.collapse_shape( // CHECK-SAME: %[[t1:.*]]: tensor<2x?x?xf32> func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<2x?x?xf32> to memref<2x?x?xf32> + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<2x?x?xf32> to memref<2x?x?xf32> // CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [ // CHECK-SAME: [0, 1], [2]] : memref<2x?x?xf32> into memref %0 = tensor.collapse_shape %t1 [[0, 1], [2]] @@ -445,7 +445,7 @@ func.func @tensor.collapse_shape(%t1: tensor<2x?x?xf32>) -> tensor { // CHECK-LABEL: func @tensor.collapse_shape_to_scalar( // CHECK-SAME: %[[t1:.*]]: tensor<1x1x1xf32> func.func @tensor.collapse_shape_to_scalar(%t1: tensor<1x1x1xf32>) -> tensor { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor<1x1x1xf32> to memref<1x1x1xf32> + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor<1x1x1xf32> to memref<1x1x1xf32> // CHECK: %[[collapsed:.*]] = memref.collapse_shape %[[m1]] [] : memref<1x1x1xf32> into memref %0 = tensor.collapse_shape %t1 [] : tensor<1x1x1xf32> into tensor @@ -534,7 +534,7 @@ func.func @tensor.collapse_shape_of_slice5(%arg0: tensor<2x2x2xi64>) -> tensor<4 // CHECK-LABEL: func @tensor.reshape( // CHECK-SAME: %[[t1:.*]]: tensor func.func @tensor.reshape(%t1: tensor) -> tensor<2x2x5xf32> { - // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK: %[[two:.*]] = arith.constant 2 : i64 %two = arith.constant 2 : i64 @@ -566,7 +566,7 @@ func.func @tensor.reshape(%t1: tensor) -> tensor<2x2x5xf32> { // CHECK-SAME: %[[t1:.*]]: tensor, %[[l2:.*]]: index, %[[h1:.*]]: index, %[[h2:.*]]: index func.func @tensor.pad(%t1: tensor, %l2: index, %h1: index, %h2: index) -> tensor { - // CHECK-DAG: %[[m1:.*]] = bufferization.to_memref %[[t1]] : tensor to memref + // CHECK-DAG: %[[m1:.*]] = bufferization.to_buffer %[[t1]] : tensor to memref // CHECK-DAG: %[[c0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[c1:.*]] = arith.constant 1 : index // CHECK-DAG: %[[dim0:.*]] = memref.dim %[[m1]], %[[c0]] @@ -582,7 +582,7 @@ func.func @tensor.pad(%t1: tensor, %l2: index, %h1: index, // CHECK: %[[mul:.*]] = arith.muli %[[index0]], %[[index1]] // CHECK: linalg.yield %[[mul]] // CHECK: } - // CHECK: %[[mapped_m:.*]] = bufferization.to_memref %[[mapped]] + // CHECK: %[[mapped_m:.*]] = bufferization.to_buffer %[[mapped]] // CHECK: %[[subview:.*]] = memref.subview %[[mapped_m]][5, %[[l2]]] [%[[dim0]], 10] [1, 1] // CHECK: memref.copy %[[m1]], %[[subview]] %0 = tensor.pad %t1 low[5, %l2] high[%h1, %h2] { diff --git a/mlir/test/Dialect/Vector/bufferize.mlir b/mlir/test/Dialect/Vector/bufferize.mlir index c2abebe706ac0..887fb941cc651 100644 --- a/mlir/test/Dialect/Vector/bufferize.mlir +++ b/mlir/test/Dialect/Vector/bufferize.mlir @@ -2,7 +2,7 @@ // CHECK-LABEL: func @transfer_read( // CHECK-SAME: %[[t:.*]]: tensor, %[[o1:.*]]: index, %[[o2:.*]]: index, %[[pad:.*]]: f32) -// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor to memref +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor to memref // CHECK: %[[r:.*]] = vector.transfer_read %[[m]][%[[o1]], %[[o2]]], %[[pad]] {in_bounds = [true, false]} : memref, vector<5x6xf32> // CHECK: return %[[r]] func.func @transfer_read(%t: tensor, %o1: index, @@ -16,7 +16,7 @@ func.func @transfer_read(%t: tensor, %o1: index, // CHECK-LABEL: func @transfer_write( // CHECK-SAME: %[[t:.*]]: tensor, %[[o1:.*]]: index, %[[o2:.*]]: index, %[[vec:.*]]: vector<5x6xf32>, %[[mask:.*]]: vector<5x6xi1>) -// CHECK: %[[m:.*]] = bufferization.to_memref %[[t]] : tensor to memref +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[t]] : tensor to memref // CHECK: %[[alloc:.*]] = memref.alloc(%{{.*}}, %{{.*}}) {{.*}} : memref // CHECK: memref.copy %[[m]], %[[alloc]] // CHECK: vector.transfer_write %[[vec]], %[[alloc]][%[[o1]], %[[o2]]], %[[mask]] {in_bounds = [true, false]} : vector<5x6xf32>, memref @@ -35,7 +35,7 @@ func.func @transfer_write(%t: tensor, %o1: index, // CHECK-LABEL: func @gather( // CHECK-SAME: %[[base:.*]]: tensor, %[[v:.*]]: vector<16xi32>, // CHECK-SAME: %[[mask:.*]]: vector<16xi1>, %[[pass_thru:.*]]: vector<16xf32>) -// CHECK: %[[m:.*]] = bufferization.to_memref %[[base]] : tensor to memref +// CHECK: %[[m:.*]] = bufferization.to_buffer %[[base]] : tensor to memref // CHECK: %[[c0:.*]] = arith.constant 0 : index // CHECK: %[[out:.*]] = vector.gather %[[m]][%[[c0]], %[[c0]]] [%[[v]]], %[[mask]], %[[pass_thru]] : memref, vector<16xi32>, vector<16xi1>, vector<16xf32> into vector<16xf32> func.func @gather(%base: tensor, %v: vector<16xi32>, %mask: vector<16xi1>, %pass_thru: vector<16xf32>) -> vector<16xf32> { diff --git a/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir b/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir index 3c508fbb67a11..06bc0e7ef44ec 100644 --- a/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir +++ b/mlir/test/Integration/Dialect/Tosa/CPU/test-maxpool-dynamic.mlir @@ -54,7 +54,7 @@ func.func @main() { %result_static = func.call @max_pool_static(%A) : (!tensor_type) -> !tensor_type %result_dynamic = func.call @max_pool_dynamic(%A_dynamic) : (tensor) -> tensor - %static_buffer = bufferization.to_memref %result_static : !tensor_type to !memref_type + %static_buffer = bufferization.to_buffer %result_static : !tensor_type to !memref_type %unranked_static_buffer = memref.cast %static_buffer : !memref_type to memref<*xf32> // CHECK: Unranked Memref base@ = {{.*}} rank = 4 offset = 0 sizes = [1, 4, 4, 1] strides = [16, 4, 1, 1] data = @@ -81,7 +81,7 @@ func.func @main() { func.call @printMemrefF32(%unranked_static_buffer) : (memref<*xf32>) -> () - %dynamic_buffer = bufferization.to_memref %result_dynamic : tensor to memref + %dynamic_buffer = bufferization.to_buffer %result_dynamic : tensor to memref %unranked_dynamic_buffer = memref.cast %dynamic_buffer : memref to memref<*xf32> // CHECK: Unranked Memref base@ = {{.*}} rank = 4 offset = 0 sizes = [1, 4, 4, 1] strides = [16, 4, 1, 1] data = diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir index 8cf15cd697868..8014bb7d2dcce 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/mulf-full.mlir @@ -100,8 +100,8 @@ func.func @entry() -> i32 { ]> : tensor<16x32xbf16> // Set up memory. - %a = bufferization.to_memref %0 : tensor<16x32xbf16> to memref<16x32xbf16> - %b = bufferization.to_memref %1 : tensor<16x32xbf16> to memref<16x32xbf16> + %a = bufferization.to_buffer %0 : tensor<16x32xbf16> to memref<16x32xbf16> + %b = bufferization.to_buffer %1 : tensor<16x32xbf16> to memref<16x32xbf16> %c = memref.alloc() : memref<16x16xf32> // Call kernel. diff --git a/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir b/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir index 652ba0698c4c9..a0076db6660d7 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/AMX/muli-full.mlir @@ -100,8 +100,8 @@ func.func @entry() -> i32 { ]> : tensor<16x64xi8> // Set up memory. - %a = bufferization.to_memref %0 : tensor<16x64xi8> to memref<16x64xi8> - %b = bufferization.to_memref %1 : tensor<16x64xi8> to memref<16x64xi8> + %a = bufferization.to_buffer %0 : tensor<16x64xi8> to memref<16x64xi8> + %b = bufferization.to_buffer %1 : tensor<16x64xi8> to memref<16x64xi8> %c = memref.alloc() : memref<16x16xi32> // Call kernel. diff --git a/mlir/utils/tree-sitter-mlir/dialect/bufferization.js b/mlir/utils/tree-sitter-mlir/dialect/bufferization.js index 8d9fdb1fcfc39..d5c99263f8ec4 100644 --- a/mlir/utils/tree-sitter-mlir/dialect/bufferization.js +++ b/mlir/utils/tree-sitter-mlir/dialect/bufferization.js @@ -2,31 +2,26 @@ module.exports = { bufferization_dialect : $ => choice( - seq('bufferization.alloc_tensor', - field('in', $._value_use_list_parens), - field('copy', optional(seq(token('copy'), '(', - $.value_use, ')'))), - field('size_hint', - optional(seq(token('size_hint'), '=', - $.value_use))), - field('attributes', optional($.attribute)), - field('return', $._type_annotation)), + seq('bufferization.alloc_tensor', field('in', $._value_use_list_parens), + field('copy', optional(seq(token('copy'), '(', $.value_use, ')'))), + field('size_hint', + optional(seq(token('size_hint'), '=', $.value_use))), + field('attributes', optional($.attribute)), + field('return', $._type_annotation)), - // operation ::= `bufferization.to_memref` $tensor - // attr-dict `:` type($memref) - seq('bufferization.to_memref', - field('tensor', $.value_use), - field('attributes', optional($.attribute)), - field('return', $._type_annotation)), + // operation ::= `bufferization.to_buffer` $tensor + // attr-dict `:` type($memref) + seq('bufferization.to_buffer', field('tensor', $.value_use), + field('attributes', optional($.attribute)), + field('return', $._type_annotation)), - // operation ::= `bufferization.to_tensor` $memref - // (`restrict` $restrict^)? - // (`writable` $writable^)? attr-dict - // `:` type($memref) - seq('bufferization.to_tensor', - field('memref', $.value_use), - field('restrict', optional($.restrict_attr)), - field('writable', optional($.writable_attr)), - field('attributes', optional($.attribute)), - field('return', $._type_annotation))) + // operation ::= `bufferization.to_tensor` $memref + // (`restrict` $restrict^)? + // (`writable` $writable^)? attr-dict + // `:` type($memref) + seq('bufferization.to_tensor', field('memref', $.value_use), + field('restrict', optional($.restrict_attr)), + field('writable', optional($.writable_attr)), + field('attributes', optional($.attribute)), + field('return', $._type_annotation))) } diff --git a/mlir/utils/tree-sitter-mlir/queries/highlights.scm b/mlir/utils/tree-sitter-mlir/queries/highlights.scm index 97aba2b266eca..4cbea7bbca031 100644 --- a/mlir/utils/tree-sitter-mlir/queries/highlights.scm +++ b/mlir/utils/tree-sitter-mlir/queries/highlights.scm @@ -209,7 +209,7 @@ "tensor.yield" "bufferization.alloc_tensor" - "bufferization.to_memref" + "bufferization.to_buffer" "bufferization.to_tensor" "linalg.batch_matmul"