Skip to content

Commit b3128ba

Browse files
authored
[NFC] Rename load_from/store_to_memref to load_from/store_to_buffer (#20897)
Renames `iree_codegen.load_from_memref` to `iree_codegen.load_from_buffer`, and `iree_codegen.store_to_memref` to `iree_codegen.store_to_buffer` for consistency with the op definition. Signed-off-by: Max Dawkins <[email protected]>
1 parent 922f751 commit b3128ba

18 files changed

+194
-194
lines changed

compiler/src/iree/compiler/Codegen/Common/BufferizeDispatchTensorLoadStore.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ bufferizeDispatchTensorLoad(RewriterBase &rewriter,
5656
rewriter, loadOp.getLoc(), subspanOp, loadOp.getType(),
5757
loadOp.getMixedOffsets(), loadOp.getMixedSizes(),
5858
loadOp.getMixedStrides());
59-
rewriter.replaceOpWithNewOp<IREE::Codegen::LoadFromMemrefOp>(
59+
rewriter.replaceOpWithNewOp<IREE::Codegen::LoadFromBufferOp>(
6060
loadOp, loadOp.getType(), sourceBuffer);
6161
}
6262

@@ -68,10 +68,10 @@ bufferizeDispatchTensorStore(RewriterBase &rewriter,
6868
if (!subspanOp) {
6969
return;
7070
}
71-
// For the store_to_memref op, generate any subviews as early as possible in
72-
// the IR. This opens more opportunities for using the store_to_memref op's
71+
// For the store_to_buffer op, generate any subviews as early as possible in
72+
// the IR. This opens more opportunities for using the store_to_buffer op's
7373
// SubsetInsertionOpInterface, since equivalent subset extractions can only be
74-
// created after the store_to_memref op's output (the subspan or subview) in
74+
// created after the store_to_buffer op's output (the subspan or subview) in
7575
// the IR.
7676
OpBuilder::InsertionGuard g(rewriter);
7777
(void)setInsertionPointAfterLastNeededValue(
@@ -83,7 +83,7 @@ bufferizeDispatchTensorStore(RewriterBase &rewriter,
8383

8484
Value tensor = storeOp.getValue();
8585
rewriter.setInsertionPoint(storeOp);
86-
rewriter.replaceOpWithNewOp<IREE::Codegen::StoreToMemrefOp>(storeOp, tensor,
86+
rewriter.replaceOpWithNewOp<IREE::Codegen::StoreToBufferOp>(storeOp, tensor,
8787
outputBuffer);
8888
}
8989

compiler/src/iree/compiler/Codegen/Common/CombineLayoutTransformation.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ static void buildNestedDistributionLoops(
259259
/// the writing of padding values into a separate operation on the buffer that
260260
/// the map_scatter op is ultimately written into. The result buffer is taken
261261
/// from the direct consumer of the `mapScatterOp`, which is expected to be an
262-
/// `iree_codegen.store_to_memref` op. Return failure if the result buffer is
262+
/// `iree_codegen.store_to_buffer` op. Return failure if the result buffer is
263263
/// not found.
264264
static FailureOr<MapScatterOp>
265265
foldPadIntoMapScatter(RewriterBase &rewriter, tensor::PadOp padOp,
@@ -270,12 +270,12 @@ foldPadIntoMapScatter(RewriterBase &rewriter, tensor::PadOp padOp,
270270
return rewriter.notifyMatchFailure(
271271
mapScatterOp, "map_scatter does not have a single user");
272272
}
273-
auto storeOp = dyn_cast<IREE::Codegen::StoreToMemrefOp>(
273+
auto storeOp = dyn_cast<IREE::Codegen::StoreToBufferOp>(
274274
*mapScatterOp->getUsers().begin());
275275
if (!storeOp) {
276276
return rewriter.notifyMatchFailure(
277277
mapScatterOp,
278-
"map_scatter user is not an iree_codegen.store_to_memref op");
278+
"map_scatter user is not an iree_codegen.store_to_buffer op");
279279
}
280280

281281
rewriter.setInsertionPointAfter(storeOp);
@@ -420,7 +420,7 @@ combineRelayoutOpChain(RewriterBase &rewriter, MapScatterOp mapScatterOp,
420420

421421
static MapScatterOp
422422
insertIdentityMapScatter(RewriterBase &rewriter,
423-
IREE::Codegen::StoreToMemrefOp storeOp) {
423+
IREE::Codegen::StoreToBufferOp storeOp) {
424424
Location loc = storeOp->getLoc();
425425
OpBuilder::InsertionGuard g(rewriter);
426426
rewriter.setInsertionPoint(storeOp);
@@ -447,11 +447,11 @@ combineLayoutTransformation(MLIRContext *ctx, FunctionOpInterface funcOp,
447447
IRRewriter rewriter(ctx);
448448
simplifyComplexRelayoutOps(rewriter, funcOp);
449449

450-
// Start from iree_codegen.store_to_memref ops, and combine producer
450+
// Start from iree_codegen.store_to_buffer ops, and combine producer
451451
// relayout ops into a single map_scatter.
452-
SmallVector<IREE::Codegen::StoreToMemrefOp> dispatchResults(
453-
funcOp.getFunctionBody().getOps<IREE::Codegen::StoreToMemrefOp>());
454-
for (IREE::Codegen::StoreToMemrefOp dispatchResult : dispatchResults) {
452+
SmallVector<IREE::Codegen::StoreToBufferOp> dispatchResults(
453+
funcOp.getFunctionBody().getOps<IREE::Codegen::StoreToBufferOp>());
454+
for (IREE::Codegen::StoreToBufferOp dispatchResult : dispatchResults) {
455455
MapScatterOp mapScatterOp =
456456
insertIdentityMapScatter(rewriter, dispatchResult);
457457
combineRelayoutOpChain(rewriter, mapScatterOp, padDistributionConfigFn);

compiler/src/iree/compiler/Codegen/Common/CombineLayoutTransformation.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,15 +32,15 @@ using PadDistributionConfigFn = function_ref<SmallVector<DistributionConfig>(
3232
ArrayRef<int64_t> iterationBounds, MLIRContext *)>;
3333

3434
/// Combines any layout/indexing transformation ops at the ends of a dispatch.
35-
/// Finds `iree_codegen.store_to_memref` ops in the `funcOp`, and combines any
35+
/// Finds `iree_codegen.store_to_buffer` ops in the `funcOp`, and combines any
3636
/// layout transformation ops (like expand_shape, transpose, pack, etc.) that
3737
/// produce the tensor being stored into a single `iree_linalg_ext.map_scatter`
3838
/// op.
3939
///
4040
/// This transformation will also combine `tensor.pad` ops into the map_scatter
41-
/// op, by moving the writing of the padding values to after the store_to_memref
41+
/// op, by moving the writing of the padding values to after the store_to_buffer
4242
/// op, and writing the padding values directly to the output buffer of the
43-
/// store_to_memref. The writes of the pad values will be distributed based on
43+
/// store_to_buffer. The writes of the pad values will be distributed based on
4444
/// the `DistributionConfig`s returned by `padDistributionConfigFn`, and then
4545
/// the inner distributed tile will be tiled to a loop nest of memref.store ops.
4646
LogicalResult

compiler/src/iree/compiler/Codegen/Common/GPU/Passes.td

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,12 @@ def GPUCombineLayoutTransformationPass :
3535
let summary =
3636
"Combines layout transformation operations into a single map_scatter operation.";
3737
let description = [{
38-
Starting from iree_codegen.store_to_memref ops, iteratively combine producer
38+
Starting from iree_codegen.store_to_buffer ops, iteratively combine producer
3939
layout/indexing transformation ops (linalg.transpose, tensor.collapse_shape,
4040
etc.) into a single iree_linalg_ext.map_scatter operation. For tensor.pad
4141
ops, the writing of pad values is distributed to workgroups and threads, and
4242
then the padding values are written directly to the output buffer of the
43-
store_to_memref op.
43+
store_to_buffer op.
4444
}];
4545
let dependentDialects = [
4646
"iree_compiler::IREE::LinalgExt::IREELinalgExtDialect",

compiler/src/iree/compiler/Codegen/Common/GPU/test/gpu_combine_layout_transformation.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ func.func @fold_pad_op(%source : tensor<250xf32>, %result : memref<256xf32>) {
66
^bb0(%arg0: index):
77
tensor.yield %cst : f32
88
} : tensor<250xf32> to tensor<256xf32>
9-
iree_codegen.store_to_memref %padded, %result : tensor<256xf32> into memref<256xf32>
9+
iree_codegen.store_to_buffer %padded, %result : tensor<256xf32> into memref<256xf32>
1010
return
1111
}
1212
// CHECK: #[[$MAP:.+]] = affine_map<(d0) -> (256, d0 + 64)>
@@ -25,7 +25,7 @@ func.func @fold_pad_op(%source : tensor<250xf32>, %result : memref<256xf32>) {
2525
// CHECK-NEXT: ^bb0(%[[IDX0:.+]]: index):
2626
// CHECK: iree_linalg_ext.yield %[[IDX0]], %[[TRUE]]
2727
// CHECK: } : tensor<250xf32> into tensor<256xf32> -> tensor<256xf32>
28-
// CHECK: iree_codegen.store_to_memref %[[MAP_SCATTER]], %[[RESULT]] : tensor<256xf32> into memref<256xf32>
28+
// CHECK: iree_codegen.store_to_buffer %[[MAP_SCATTER]], %[[RESULT]] : tensor<256xf32> into memref<256xf32>
2929

3030
// CHECK: scf.forall (%[[WG_IV:.+]]) = (0) to (256) step (64) {
3131
// CHECK: %[[WG_TILE_UB:.+]] = affine.min #[[$MAP]](%[[WG_IV]])

compiler/src/iree/compiler/Codegen/Common/Passes.td

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ def BufferizeDispatchTensorLoadStorePass :
5151
"Bufferize the iree_tensor_ext.dispatch.tensor.load/store ops at dispatch boundaries";
5252
let description = [{
5353
Pass to bufferize the edges of dispatch regions, converting
54-
iree_tensor_ext.dispatch.tensor.load ops to iree_codegen.load_from_memref, and
55-
iree_tensor_ext.dispatch.tensor.store ops to iree_codegen.store_to_memref.
54+
iree_tensor_ext.dispatch.tensor.load ops to iree_codegen.load_from_buffer, and
55+
iree_tensor_ext.dispatch.tensor.store ops to iree_codegen.store_to_buffer.
5656
}];
5757
let dependentDialects = [
5858
"IREE::Codegen::IREECodegenDialect",
@@ -128,12 +128,12 @@ def CombineLayoutTransformationPass :
128128
let summary =
129129
"Combines layout transformation operations into a single map_scatter operation.";
130130
let description = [{
131-
Starting from iree_codegen.store_to_memref ops, iteratively combine producer
131+
Starting from iree_codegen.store_to_buffer ops, iteratively combine producer
132132
layout/indexing transformation ops (linalg.transpose, tensor.collapse_shape,
133133
etc.) into a single iree_linalg_ext.map_scatter operation. For tensor.pad
134134
ops, the writing of pad values is distributed to workgroups, and then the
135135
padding values are written directly to the output buffer of the
136-
store_to_memref op.
136+
store_to_buffer op.
137137
}];
138138
let dependentDialects = [
139139
"iree_compiler::IREE::LinalgExt::IREELinalgExtDialect",

compiler/src/iree/compiler/Codegen/Common/test/bufferize_dispatch_tensor_load_store.mlir

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@ func.func @dispatch_tensor_load_and_store() {
1919
// CHECK-SAME: binding(0) : memref<16xf32, #hal.descriptor_type<storage_buffer>>
2020
// CHECK: %[[OUTPUT:.+]] = hal.interface.binding.subspan
2121
// CHECK-SAME: binding(1) : memref<16xf32, #hal.descriptor_type<storage_buffer>>
22-
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_memref %[[INPUT]]
22+
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_buffer %[[INPUT]]
2323
// CHECK-SAME: : memref<16xf32, #hal.descriptor_type<storage_buffer>> -> tensor<16xf32>
24-
// CHECK: iree_codegen.store_to_memref %[[LOAD]], %[[OUTPUT]]
24+
// CHECK: iree_codegen.store_to_buffer %[[LOAD]], %[[OUTPUT]]
2525
// CHECK-SAME: : tensor<16xf32> into memref<16xf32, #hal.descriptor_type<storage_buffer>>
2626

2727
// -----
@@ -51,9 +51,9 @@ func.func @dispatch_tensor_load_and_store_slices() {
5151
// CHECK: %[[INPUT_SUBVIEW:.+]] = memref.subview %[[INPUT]][2] [12] [1]
5252
// CHECK-SAME: : memref<16xf32, #hal.descriptor_type<storage_buffer>> to
5353
// CHECK-SAME: memref<12xf32, strided<[1], offset: 2>, #hal.descriptor_type<storage_buffer>>
54-
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_memref %[[INPUT_SUBVIEW]]
54+
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_buffer %[[INPUT_SUBVIEW]]
5555
// CHECK-SAME: : memref<12xf32, strided<[1], offset: 2>, #hal.descriptor_type<storage_buffer>> -> tensor<12xf32>
56-
// CHECK: iree_codegen.store_to_memref %[[LOAD]], %[[OUTPUT_SUBVIEW]]
56+
// CHECK: iree_codegen.store_to_buffer %[[LOAD]], %[[OUTPUT_SUBVIEW]]
5757
// CHECK-SAME: : tensor<12xf32> into memref<12xf32, strided<[1], offset: 4>, #hal.descriptor_type<storage_buffer>>
5858

5959
// -----
@@ -81,10 +81,10 @@ func.func @dispatch_tensor_load_and_store_with_compute_op() {
8181
// CHECK-SAME: binding(1) : memref<16xf32, #hal.descriptor_type<storage_buffer>>
8282
// CHECK: %[[OUTPUT_SUBVIEW:.+]] = memref.subview %[[OUTPUT]][4] [12] [1]
8383
// CHECK: %[[INPUT_SUBVIEW:.+]] = memref.subview %[[INPUT]][2] [12] [1]
84-
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_memref %[[INPUT_SUBVIEW]]
84+
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_buffer %[[INPUT_SUBVIEW]]
8585
// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<12xf32>
8686
// CHECK: %[[COPY:.+]] = linalg.copy ins(%[[LOAD]]{{.*}} outs(%[[INIT]]
87-
// CHECK: iree_codegen.store_to_memref %[[COPY]], %[[OUTPUT_SUBVIEW]]
87+
// CHECK: iree_codegen.store_to_buffer %[[COPY]], %[[OUTPUT_SUBVIEW]]
8888

8989
// -----
9090

@@ -117,9 +117,9 @@ func.func @dynamic_dispatch_tensor_load_and_store(%offset: index, %size: index,
117117
// CHECK: %[[INPUT_SUBVIEW:.+]] = memref.subview %[[INPUT]][%[[OFFSET]]] [%[[SIZE]]] [%[[STRIDE]]]
118118
// CHECK-SAME: : memref<?xf32, #hal.descriptor_type<storage_buffer>> to
119119
// CHECK-SAME: memref<?xf32, strided<[?], offset: ?>, #hal.descriptor_type<storage_buffer>>
120-
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_memref %[[INPUT_SUBVIEW]]
120+
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_buffer %[[INPUT_SUBVIEW]]
121121
// CHECK-SAME: : memref<?xf32, strided<[?], offset: ?>, #hal.descriptor_type<storage_buffer>> -> tensor<?xf32>
122-
// CHECK: iree_codegen.store_to_memref %[[LOAD]], %[[OUTPUT_SUBVIEW]]
122+
// CHECK: iree_codegen.store_to_buffer %[[LOAD]], %[[OUTPUT_SUBVIEW]]
123123
// CHECK-SAME: : tensor<?xf32> into memref<?xf32, strided<[?], offset: ?>, #hal.descriptor_type<storage_buffer>>
124124

125125
// -----
@@ -149,7 +149,7 @@ func.func @rank_reducing_slices() {
149149
// CHECK: %[[INPUT_SUBVIEW:.+]] = memref.subview %[[INPUT]][0, 2] [1, 12] [1, 1]
150150
// CHECK-SAME: : memref<8x16xf32, #hal.descriptor_type<storage_buffer>> to
151151
// CHECK-SAME: memref<12xf32, strided<[1], offset: 2>, #hal.descriptor_type<storage_buffer>>
152-
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_memref %[[INPUT_SUBVIEW]]
152+
// CHECK: %[[LOAD:.+]] = iree_codegen.load_from_buffer %[[INPUT_SUBVIEW]]
153153
// CHECK-SAME: : memref<12xf32, strided<[1], offset: 2>, #hal.descriptor_type<storage_buffer>> -> tensor<12xf32>
154-
// CHECK: iree_codegen.store_to_memref %[[LOAD]], %[[OUTPUT_SUBVIEW]]
154+
// CHECK: iree_codegen.store_to_buffer %[[LOAD]], %[[OUTPUT_SUBVIEW]]
155155
// CHECK-SAME: : tensor<12xf32> into memref<12xf32, strided<[1], offset: 4>, #hal.descriptor_type<storage_buffer>>

0 commit comments

Comments
 (0)