Skip to content

Commit 571596e

Browse files
authored
[LLVM Pulldown] Another LLVM version bump (#1084)
LLVM Head is on https://github.com/llvm/llvm-project/commits/306148b5412ab87b518becffa85908ba04611fc8 now
1 parent 5a388a3 commit 571596e

File tree

7 files changed

+37
-138
lines changed

7 files changed

+37
-138
lines changed

build_tools/llvm_version.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
c82e2f5c9ed08a270a1ec60bf7313af9c236ab98
1+
306148b5412ab87b518becffa85908ba04611fc8

build_tools/patches/0005-Add-memref.extract-aligned-pointer-as-index-to-spirv.patch

Lines changed: 0 additions & 72 deletions
This file was deleted.

build_tools/patches/0011-add-mem-copy-support-in-sycl-runtime.patch

Lines changed: 0 additions & 38 deletions
This file was deleted.

lib/Dialect/NDArray/Extensions/BufferizableOpInterfaceImpl.cpp

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,10 @@ struct SubviewOpInterface
3939
return {{op->getOpResult(0), BufferRelation::Unknown}};
4040
}
4141

42-
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
43-
const BufferizationOptions &options) const {
42+
LogicalResult
43+
bufferize(Operation *op, RewriterBase &rewriter,
44+
const BufferizationOptions &options,
45+
const mlir::bufferization::BufferizationState &state) const {
4446
auto subviewOp = cast<SubviewOp>(op);
4547
SmallVector<OpFoldResult> mixedOffsets = subviewOp.getMixedOffsets();
4648
SmallVector<OpFoldResult> mixedSizes = subviewOp.getMixedSizes();
@@ -49,13 +51,13 @@ struct SubviewOpInterface
4951

5052
// Get source buffer.
5153
FailureOr<Value> srcMemref =
52-
getBuffer(rewriter, subviewOp.getSource(), options);
54+
getBuffer(rewriter, subviewOp.getSource(), options, state);
5355
if (failed(srcMemref))
5456
return failure();
5557

5658
// Take a subview of the source buffer.
5759
auto resultMemrefType =
58-
bufferization::getBufferType(subviewOp.getResult(), options);
60+
bufferization::getBufferType(subviewOp.getResult(), options, state);
5961
if (failed(resultMemrefType))
6062
return failure();
6163
Value subView = rewriter.create<memref::SubViewOp>(
@@ -68,11 +70,12 @@ struct SubviewOpInterface
6870

6971
FailureOr<BaseMemRefType>
7072
getBufferType(Operation *op, Value value, const BufferizationOptions &options,
73+
const mlir::bufferization::BufferizationState &state,
7174
SmallVector<Value> &invocationStack) const {
7275
auto subviewOp = cast<SubviewOp>(op);
7376
assert(value == subviewOp.getResult() && "invalid value");
74-
auto srcMemrefType = bufferization::getBufferType(subviewOp.getSource(),
75-
options, invocationStack);
77+
auto srcMemrefType = bufferization::getBufferType(
78+
subviewOp.getSource(), options, state, invocationStack);
7679
if (failed(srcMemrefType))
7780
return failure();
7881
SmallVector<OpFoldResult> mixedOffsets = subviewOp.getMixedOffsets();
@@ -108,8 +111,10 @@ struct InsertSliceOpInterface
108111
return true;
109112
}
110113

111-
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
112-
const BufferizationOptions &options) const {
114+
LogicalResult
115+
bufferize(Operation *op, RewriterBase &rewriter,
116+
const BufferizationOptions &options,
117+
const mlir::bufferization::BufferizationState &state) const {
113118
// insert_slice ops arise from tiling and bufferizing them out-of-place is
114119
// generally a deal breaker. When used with loops, this ends up cloning the
115120
// whole tensor on every single iteration and is a symptom of a
@@ -123,10 +128,11 @@ struct InsertSliceOpInterface
123128

124129
// Get destination buffer.
125130
auto dstMemref =
126-
getBuffer(rewriter, insertSliceOp.getDestination(), options);
131+
getBuffer(rewriter, insertSliceOp.getDestination(), options, state);
127132
if (failed(dstMemref))
128133
return failure();
129-
auto srcMemref = getBuffer(rewriter, insertSliceOp.getSource(), options);
134+
auto srcMemref =
135+
getBuffer(rewriter, insertSliceOp.getSource(), options, state);
130136
if (failed(srcMemref))
131137
return failure();
132138
auto srcRank = mlir::cast<mlir::ShapedType>(srcMemref->getType()).getRank();

lib/Dialect/Region/Transforms/BufferizableOpInterfaceImpl.cpp

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,13 @@ ::mlir::LogicalResult
2020
convertToBuffers(::mlir::ValueRange values,
2121
::mlir::SmallVector<::mlir::Value> &buffers,
2222
::mlir::RewriterBase &rewriter,
23-
const ::mlir::bufferization::BufferizationOptions &options) {
23+
const ::mlir::bufferization::BufferizationOptions &options,
24+
const mlir::bufferization::BufferizationState &state) {
2425
buffers.reserve(values.size());
2526
for (auto val : values) {
2627
if (::mlir::isa<::mlir::TensorType>(val.getType())) {
2728
::mlir::FailureOr<::mlir::Value> maybeBuffer =
28-
::mlir::bufferization::getBuffer(rewriter, val, options);
29+
::mlir::bufferization::getBuffer(rewriter, val, options, state);
2930
if (failed(maybeBuffer)) {
3031
return ::mlir::failure();
3132
}
@@ -69,18 +70,19 @@ struct EnvironmentRegionOpInterface
6970

7071
::mlir::LogicalResult
7172
bufferize(::mlir::Operation *op, ::mlir::RewriterBase &rewriter,
72-
const ::mlir::bufferization::BufferizationOptions &options) const {
73+
const ::mlir::bufferization::BufferizationOptions &options,
74+
mlir::bufferization::BufferizationState &bufState) const {
7375
auto envOp = ::mlir::cast<region::EnvironmentRegionOp>(op);
7476
// Convert op arguments to memrefs.
7577
::mlir::SmallVector<::mlir::Value> newArguments;
7678
if (failed(convertToBuffers(envOp.getArgs(), newArguments, rewriter,
77-
options))) {
79+
options, bufState))) {
7880
return ::mlir::failure();
7981
}
8082
// Infer result memref types by converting yield op operands to memrefs
8183
::mlir::SmallVector<::mlir::Value> newResults;
8284
if (failed(convertToBuffers(envOp.getBody()->getTerminator()->getOperands(),
83-
newResults, rewriter, options))) {
85+
newResults, rewriter, options, bufState))) {
8486
return ::mlir::failure();
8587
}
8688
::mlir::TypeRange resTypes(newResults);
@@ -134,13 +136,14 @@ struct EnvironmentRegionYieldOpInterface
134136

135137
::mlir::LogicalResult
136138
bufferize(::mlir::Operation *op, ::mlir::RewriterBase &rewriter,
137-
const ::mlir::bufferization::BufferizationOptions &options) const {
139+
const ::mlir::bufferization::BufferizationOptions &options,
140+
mlir::bufferization::BufferizationState &state) const {
138141
auto yieldOp = ::mlir::cast<region::EnvironmentRegionYieldOp>(op);
139142

140143
// Create a new terminator with bufferized operands.
141144
::mlir::SmallVector<::mlir::Value> newOperands;
142145
if (failed(convertToBuffers(yieldOp.getOperands(), newOperands, rewriter,
143-
options))) {
146+
options, state))) {
144147
return ::mlir::failure();
145148
}
146149
::mlir::bufferization::replaceOpWithNewBufferizedOp<

lib/Utils/PassUtils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -483,7 +483,7 @@ ::mlir::Value createToMemRef(::mlir::Location loc, ::mlir::OpBuilder &builder,
483483
auto mrTyp = mlir::cast<::mlir::MemRefType>(toTyp);
484484
auto shapedMrTyp =
485485
mlir::cast<::mlir::ShapedType>(mrTyp).clone(iTyp.getShape());
486-
::mlir::Value shapedMr = builder.create<::mlir::bufferization::ToMemrefOp>(
486+
::mlir::Value shapedMr = builder.create<::mlir::bufferization::ToBufferOp>(
487487
loc, shapedMrTyp, input);
488488
if (clone) {
489489
shapedMr = builder.create<::mlir::bufferization::CloneOp>(loc, shapedMr);

test/Conversion/NDArrayToLinalg/NDArrayToLinalg.mlir

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ func.func @test_reshape2(%arg0: index) -> tensor<?x?xi64> {
4949
// CHECK: tensor.empty
5050
// CHECK: tensor.dim
5151
// CHECK: memref.alloc
52-
// CHECK: bufferization.to_memref
52+
// CHECK: bufferization.to_buffer
5353
// CHECK: region.env_region "protect_copy_op"
5454
// CHECK: memref.copy
5555
// CHECK: tensor.from_elements
@@ -92,24 +92,24 @@ func.func @test_env() -> (tensor<16x16xf32, #GPUENV>, tensor<256xf32, #GPUENV>)
9292
// COM: CHECK-NEXT: arith.constant 0 : index
9393
// COM: CHECK-NEXT: tensor.dim
9494
// COM: CHECK-NEXT: memref.alloc
95-
// COM: CHECK-NEXT: bufferization.to_memref
95+
// COM: CHECK-NEXT: bufferization.to_buffer
9696
// COM: CHECK-NEXT: region.env_region "protect_copy_op"
9797
// COM: CHECK-NEXT: memref.copy
9898
// COM: CHECK-NEXT: }
9999
// COM: CHECK-NEXT: bufferization.to_tensor
100-
// COM: CHECK-NEXT: bufferization.to_memref
100+
// COM: CHECK-NEXT: bufferization.to_buffer
101101
// COM: CHECK-NEXT: arith.constant 0 : index
102102
// COM: CHECK-NEXT: tensor.dim
103103
// COM: CHECK-NEXT: memref.alloc
104-
// COM: CHECK-NEXT: bufferization.to_memref
104+
// COM: CHECK-NEXT: bufferization.to_buffer
105105
// COM: CHECK-NEXT: region.env_region "protect_copy_op"
106106
// COM: CHECK-NEXT: memref.copy
107107
// COM: CHECK-NEXT: }
108108
// COM: CHECK-NEXT: bufferization.to_tensor
109109
// COM: CHECK-NEXT: arith.constant 0 : index
110110
// COM: CHECK-NEXT: tensor.dim
111111
// COM: CHECK-NEXT: memref.alloc
112-
// COM: CHECK-NEXT: bufferization.to_memref
112+
// COM: CHECK-NEXT: bufferization.to_buffer
113113
// COM: CHECK-NEXT: region.env_region "protect_copy_op"
114114
// COM: CHECK-NEXT: memref.copy
115115
// COM: CHECK-NEXT: }
@@ -129,21 +129,21 @@ func.func @test_copy(%a: tensor<?xi64>) -> tensor<?xi64> {
129129
// CHECK-NEXT: [[vc0:%.*]] = arith.constant 0 : index
130130
// CHECK-NEXT: [[vdim:%.*]] = tensor.dim [[varg0]], [[vc0]] : tensor<?xi64>
131131
// CHECK-NEXT: [[valloc:%.*]] = memref.alloc([[vdim]]) {alignment = 8 : i64} : memref<?xi64>
132-
// CHECK-NEXT: [[v0:%.*]] = bufferization.to_memref [[varg0]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
132+
// CHECK-NEXT: [[v0:%.*]] = bufferization.to_buffer [[varg0]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
133133
// CHECK-NEXT: region.env_region "protect_copy_op" {
134134
// CHECK-NEXT: memref.copy [[v0]], [[valloc]] : memref<?xi64, strided<[?], offset: ?>> to memref<?xi64>
135135
// CHECK: [[v1:%.*]] = bufferization.to_tensor [[valloc]] restrict writable : memref<?xi64> to tensor<?xi64>
136136
// CHECK-NEXT: [[vc0_0:%.*]] = arith.constant 0 : index
137137
// CHECK-NEXT: [[vdim_1:%.*]] = tensor.dim [[v1]], [[vc0_0]] : tensor<?xi64>
138138
// CHECK-NEXT: [[valloc_2:%.*]] = memref.alloc([[vdim_1]]) {alignment = 8 : i64} : memref<?xi64>
139-
// CHECK-NEXT: [[v2:%.*]] = bufferization.to_memref [[v1]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
139+
// CHECK-NEXT: [[v2:%.*]] = bufferization.to_buffer [[v1]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
140140
// CHECK-NEXT: region.env_region "protect_copy_op" {
141141
// CHECK-NEXT: memref.copy [[v2]], [[valloc_2]] : memref<?xi64, strided<[?], offset: ?>> to memref<?xi64>
142142
// CHECK: [[v3:%.*]] = bufferization.to_tensor [[valloc_2]] restrict writable : memref<?xi64> to tensor<?xi64, #region.gpu_env<device = "XeGPU">>
143143
// CHECK-NEXT: [[vc0_3:%.*]] = arith.constant 0 : index
144144
// CHECK-NEXT: [[vdim_4:%.*]] = tensor.dim [[v3]], [[vc0_3]] : tensor<?xi64, #region.gpu_env<device = "XeGPU">>
145145
// CHECK-NEXT: [[valloc_5:%.*]] = memref.alloc([[vdim_4]]) {alignment = 8 : i64} : memref<?xi64>
146-
// CHECK-NEXT: [[v4:%.*]] = bufferization.to_memref [[v3]] : tensor<?xi64, #region.gpu_env<device = "XeGPU">> to memref<?xi64, strided<[?], offset: ?>>
146+
// CHECK-NEXT: [[v4:%.*]] = bufferization.to_buffer [[v3]] : tensor<?xi64, #region.gpu_env<device = "XeGPU">> to memref<?xi64, strided<[?], offset: ?>>
147147
// CHECK-NEXT: region.env_region "protect_copy_op" {
148148
// CHECK-NEXT: memref.copy [[v4]], [[valloc_5]] : memref<?xi64, strided<[?], offset: ?>> to memref<?xi64>
149149
// CHECK: [[v5:%.*]] = bufferization.to_tensor [[valloc_5]] restrict writable : memref<?xi64> to tensor<?xi64>
@@ -223,7 +223,7 @@ func.func @test_cast_elemtype_copy(%arg0: tensor<16xi32>) -> tensor<16xi32> {
223223
return %0 : tensor<16xi32>
224224
}
225225
// CHECK-LABEL: @test_cast_elemtype_copy
226-
// CHECK: bufferization.to_memref
226+
// CHECK: bufferization.to_buffer
227227
// CHECK: region.env_region "protect_copy_op"
228228
// CHECK-NEXT: memref.copy
229229
// CHECK-NEXT: }

0 commit comments

Comments
 (0)