Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,46 @@
// CHECK-NOT: copy
// CHECK: %[[call:.*]]:2 = call @inner_func(%[[arg0]])
%0, %1 = call @inner_func(%t0) : (tensor<?xf32>) -> (tensor<?xf32>, f32)
// CHECK: return %[[call]]#1, %[[call]]#0 : f32, memref<?xf32,{{.*}}>
// CHECK: return %[[call]]#1, %[[call]]#0 : f32, memref<?xf32{{.*}}>
Copy link
Contributor Author

@andrey-golubev andrey-golubev Oct 6, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

note: i don't know why, but somehow running the test locally for me failed without this fix. given that mlir-opt produces memref<?xf32> for me, I'm not sure how this works in main right now :|

return %1, %0 : f32, tensor<?xf32>
}
"test.finish" () : () -> ()
}) : () -> ()

// -----

#enc1 = #test.tensor_encoding<"hello">
#enc2 = #test.tensor_encoding<"not hello">

"test.symbol_scope_isolated"() ({
// CHECK: func @inner_func(
// CHECK-SAME: %[[arg0:.*]]: memref<?xf32, #test.memref_layout<"hello">>)
// CHECK-SAME: -> memref<?xf32, #test.memref_layout<"hello">>
func.func @inner_func(%t: tensor<?xf32, #enc1>)
-> tensor<?xf32, #enc1> {
// CHECK: return %[[arg0]]
return %t : tensor<?xf32, #enc1>
}

// CHECK: func @outer_func(
// CHECK-SAME: %[[arg0:.*]]: memref<?xf32, #test.memref_layout<"hello">>)
// CHECK-SAME: -> (memref<?xf32, #test.memref_layout<"hello">>,
// CHECK-SAME: memref<?xf32, #test.memref_layout<"not hello">>)
func.func @outer_func(%t0: tensor<?xf32, #enc1>)
-> (tensor<?xf32, #enc1>, tensor<?xf32, #enc2>) {
// CHECK: %[[call:.*]] = call @inner_func(%[[arg0]])
%0 = call @inner_func(%t0)
: (tensor<?xf32, #enc1>) -> (tensor<?xf32, #enc1>)

// CHECK: %[[local:.*]] = "test.create_memref_op"() : ()
// CHECK-SAME: -> memref<?xf32, #test.memref_layout<"not hello">>
%local = "test.create_tensor_op"() : () -> tensor<?xf32, #enc2>
// CHECK: %[[dummy:.*]] = "test.dummy_memref_op"(%[[local]])
%1 = "test.dummy_tensor_op"(%local) : (tensor<?xf32, #enc2>)
-> tensor<?xf32, #enc2>

// CHECK: return %[[call]], %[[dummy]]
return %0, %1 : tensor<?xf32, #enc1>, tensor<?xf32, #enc2>
}
"test.finish" () : () -> ()
}) : () -> ()
26 changes: 26 additions & 0 deletions mlir/test/lib/Dialect/Bufferization/TestOneShotModuleBufferize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,25 @@
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotModuleBufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/Transforms.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"

#include "TestAttributes.h" // TestTensorEncodingAttr, TestMemRefLayoutAttr
#include "TestDialect.h"

using namespace mlir;

namespace {
MemRefLayoutAttrInterface
getMemRefLayoutForTensorEncoding(RankedTensorType tensorType) {
if (auto encoding = dyn_cast_if_present<test::TestTensorEncodingAttr>(
tensorType.getEncoding())) {
return cast<MemRefLayoutAttrInterface>(test::TestMemRefLayoutAttr::get(
tensorType.getContext(), encoding.getDummy()));
}
return {};
}

struct TestOneShotModuleBufferizePass
: public PassWrapper<TestOneShotModuleBufferizePass, OperationPass<>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestOneShotModuleBufferizePass)
Expand All @@ -25,6 +39,7 @@ struct TestOneShotModuleBufferizePass
: PassWrapper(pass) {}

void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<test::TestDialect>();
registry.insert<bufferization::BufferizationDialect>();
}
StringRef getArgument() const final {
Expand All @@ -41,6 +56,17 @@ struct TestOneShotModuleBufferizePass
bufferization::OneShotBufferizationOptions opt;

opt.bufferizeFunctionBoundaries = true;
opt.functionArgTypeConverterFn =
[&](bufferization::TensorLikeType tensor, Attribute memSpace,
func::FuncOp, const bufferization::BufferizationOptions &) {
assert(isa<RankedTensorType>(tensor) && "tests only builtin tensors");
auto tensorType = cast<RankedTensorType>(tensor);
auto layout = getMemRefLayoutForTensorEncoding(tensorType);
return cast<bufferization::BufferLikeType>(
MemRefType::get(tensorType.getShape(),
tensorType.getElementType(), layout, memSpace));
};

bufferization::BufferizationState bufferizationState;

if (failed(bufferization::runOneShotModuleBufferize(getOperation(), opt,
Expand Down
17 changes: 17 additions & 0 deletions mlir/test/lib/Dialect/Test/TestAttrDefs.td
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ include "mlir/IR/AttrTypeBase.td"
include "mlir/IR/BuiltinAttributeInterfaces.td"
include "mlir/IR/EnumAttr.td"
include "mlir/IR/OpAsmInterface.td"
include "mlir/IR/TensorEncoding.td"

// All of the attributes will extend this class.
class Test_Attr<string name, list<Trait> traits = []>
Expand Down Expand Up @@ -439,4 +440,20 @@ def TestCustomStorageCtorAttr : Test_Attr<"TestCustomStorageCtorAttr"> {
let hasStorageCustomConstructor = 1;
}

def TestTensorEncodingAttr : Test_Attr<"TestTensorEncoding",
[DeclareAttrInterfaceMethods<VerifiableTensorEncoding>]> {
let mnemonic = "tensor_encoding";

let parameters = (ins "mlir::StringAttr":$dummy);
let assemblyFormat = "`<` $dummy `>`";
}

def TestMemRefLayoutAttr : Test_Attr<"TestMemRefLayout",
[DeclareAttrInterfaceMethods<MemRefLayoutAttrInterface>]> {
let mnemonic = "memref_layout";

let parameters = (ins "mlir::StringAttr":$dummy);
let assemblyFormat = "`<` $dummy `>`";
}

#endif // TEST_ATTRDEFS
18 changes: 18 additions & 0 deletions mlir/test/lib/Dialect/Test/TestAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,24 @@ test::detail::TestCustomStorageCtorAttrAttrStorage::construct(
return nullptr;
}

//===----------------------------------------------------------------------===//
// TestTensorEncodingAttr
//===----------------------------------------------------------------------===//

::llvm::LogicalResult TestTensorEncodingAttr::verifyEncoding(
mlir::ArrayRef<int64_t> shape, mlir::Type elementType,
llvm::function_ref<::mlir::InFlightDiagnostic()> emitError) const {
return mlir::success();
}

//===----------------------------------------------------------------------===//
// TestMemRefLayoutAttr
//===----------------------------------------------------------------------===//

mlir::AffineMap TestMemRefLayoutAttr::getAffineMap() const {
return mlir::AffineMap::getMultiDimIdentityMap(1, getContext());
}

//===----------------------------------------------------------------------===//
// TestDialect
//===----------------------------------------------------------------------===//
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Test/TestAttributes.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/DialectResourceBlobManager.h"
#include "mlir/IR/TensorEncoding.h"

// generated files require above includes to come first
#include "TestAttrInterfaces.h.inc"
Expand Down
1 change: 1 addition & 0 deletions mlir/test/lib/Dialect/Test/TestDialect.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "TestInterfaces.h"
#include "TestTypes.h"
#include "mlir/Bytecode/BytecodeImplementation.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/DLTI/Traits.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
Expand Down
5 changes: 4 additions & 1 deletion mlir/test/lib/Dialect/Test/TestDialect.td
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,10 @@ def Test_Dialect : Dialect {
let useDefaultTypePrinterParser = 0;
let useDefaultAttributePrinterParser = 1;
let isExtensible = 1;
let dependentDialects = ["::mlir::DLTIDialect"];
let dependentDialects = [
"::mlir::DLTIDialect",
"::mlir::bufferization::BufferizationDialect"
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems required, otherwise some verifyInvariantsImpl checks failed internally when checking if a ranked tensor is a tensor-like type.

];
let discardableAttrs = (ins
"mlir::IntegerAttr":$discardable_attr_key,
"SimpleAAttr":$other_discardable_attr_key
Expand Down
44 changes: 38 additions & 6 deletions mlir/test/lib/Dialect/Test/TestOpDefs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1425,6 +1425,39 @@ TestMultiSlotAlloca::handleDestructuringComplete(
return createNewMultiAllocaWithoutSlot(slot, builder, *this);
}

namespace {
/// Returns test dialect's memref layout for test dialect's tensor encoding when
/// applicable.
MemRefLayoutAttrInterface
getMemRefLayoutForTensorEncoding(RankedTensorType tensorType) {
if (auto encoding =
dyn_cast<test::TestTensorEncodingAttr>(tensorType.getEncoding())) {
return cast<MemRefLayoutAttrInterface>(test::TestMemRefLayoutAttr::get(
tensorType.getContext(), encoding.getDummy()));
}
return {};
}

/// Auxiliary bufferization function for test and builtin tensors.
bufferization::BufferLikeType
convertTensorToBuffer(mlir::Operation *op,
const bufferization::BufferizationOptions &options,
bufferization::TensorLikeType tensorLike) {
auto buffer =
*tensorLike.getBufferType(options, [&]() { return op->emitError(); });
if (auto memref = dyn_cast<MemRefType>(buffer)) {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

note: if we'd have a option callback that provides customizable layout inference, this branch could be avoided. instead, the one-shot bufferization options could be configured and this whole thing becomes just return TensorLike::getBufferType().

// Note: For the sake of testing, we want to ensure that encoding -> layout
// bufferization happens. This is currently achieved manually.
auto layout =
getMemRefLayoutForTensorEncoding(cast<RankedTensorType>(tensorLike));
return cast<bufferization::BufferLikeType>(
MemRefType::get(memref.getShape(), memref.getElementType(), layout,
memref.getMemorySpace()));
}
return buffer;
}
} // namespace

::mlir::LogicalResult test::TestDummyTensorOp::bufferize(
::mlir::RewriterBase &rewriter,
const ::mlir::bufferization::BufferizationOptions &options,
Expand All @@ -1435,8 +1468,8 @@ ::mlir::LogicalResult test::TestDummyTensorOp::bufferize(
return failure();

const auto outType = getOutput().getType();
const auto bufferizedOutType = test::TestMemrefType::get(
getContext(), outType.getShape(), outType.getElementType(), nullptr);
const auto bufferizedOutType =
convertTensorToBuffer(getOperation(), options, outType);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's better to call getBufferType here, so that the two function cannot get out-of-sync.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do you mean Op::getBufferType? note that this is another op from the one below (dummy vs create) where i don't overwrite ::getBufferType(). the "create tensor" one already does what you suggest here.

// replace op with memref analogy
auto dummyMemrefOp = test::TestDummyMemrefOp::create(
rewriter, getLoc(), bufferizedOutType, *buffer);
Expand Down Expand Up @@ -1470,13 +1503,12 @@ ::mlir::LogicalResult test::TestCreateTensorOp::bufferize(

mlir::FailureOr<mlir::bufferization::BufferLikeType>
test::TestCreateTensorOp::getBufferType(
mlir::Value value, const mlir::bufferization::BufferizationOptions &,
mlir::Value value, const mlir::bufferization::BufferizationOptions &options,
const mlir::bufferization::BufferizationState &,
llvm::SmallVector<::mlir::Value> &) {
const auto type = dyn_cast<test::TestTensorType>(value.getType());
const auto type = dyn_cast<bufferization::TensorLikeType>(value.getType());
if (type == nullptr)
return failure();

return cast<mlir::bufferization::BufferLikeType>(test::TestMemrefType::get(
getContext(), type.getShape(), type.getElementType(), nullptr));
return convertTensorToBuffer(getOperation(), options, type);
}
15 changes: 8 additions & 7 deletions mlir/test/lib/Dialect/Test/TestOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ include "mlir/Interfaces/MemorySlotInterfaces.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/Interfaces/ValueBoundsOpInterface.td"
include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td"
include "mlir/Dialect/Bufferization/IR/BufferizationTypeInterfaces.td"

// Include the attribute definitions.
include "TestAttrDefs.td"
Expand Down Expand Up @@ -2322,7 +2323,7 @@ def SideEffectWithRegionOp : TEST_Op<"side_effect_with_region_op",
}

//===----------------------------------------------------------------------===//
// Copy Operation Test
// Copy Operation Test
//===----------------------------------------------------------------------===//

def CopyOp : TEST_Op<"copy", []> {
Expand Down Expand Up @@ -3663,10 +3664,10 @@ def TestDummyTensorOp : TEST_Op<"dummy_tensor_op",
["bufferize", "bufferizesToMemoryRead",
"bufferizesToMemoryWrite", "getAliasingValues"]>]> {
let arguments = (ins
Arg<TestTensorType>:$input
Arg<Bufferization_TensorLikeTypeInterface>:$input
);
let results = (outs
Arg<TestTensorType>:$output
Arg<Bufferization_TensorLikeTypeInterface>:$output
);

let extraClassDefinition = [{
Expand All @@ -3688,10 +3689,10 @@ def TestDummyTensorOp : TEST_Op<"dummy_tensor_op",

def TestDummyMemrefOp : TEST_Op<"dummy_memref_op", []> {
let arguments = (ins
Arg<TestMemrefType>:$input
Arg<Bufferization_BufferLikeTypeInterface>:$input
);
let results = (outs
Arg<TestMemrefType>:$output
Arg<Bufferization_BufferLikeTypeInterface>:$output
);
}

Expand All @@ -3701,7 +3702,7 @@ def TestCreateTensorOp : TEST_Op<"create_tensor_op",
"bufferizesToMemoryWrite", "getAliasingValues",
"bufferizesToAllocation"]>]> {
let arguments = (ins);
let results = (outs Arg<TestTensorType>:$output);
let results = (outs Arg<Bufferization_TensorLikeTypeInterface>:$output);
let extraClassDefinition = [{
bool test::TestCreateTensorOp::bufferizesToMemoryRead(::mlir::OpOperand&,
const ::mlir::bufferization::AnalysisState&) {
Expand All @@ -3725,7 +3726,7 @@ def TestCreateTensorOp : TEST_Op<"create_tensor_op",

def TestCreateMemrefOp : TEST_Op<"create_memref_op"> {
let arguments = (ins);
let results = (outs Arg<TestMemrefType>:$output);
let results = (outs Arg<Bufferization_BufferLikeTypeInterface>:$output);
}

//===----------------------------------------------------------------------===//
Expand Down