@@ -49,7 +49,7 @@ func.func @test_reshape2(%arg0: index) -> tensor<?x?xi64> {
49
49
// CHECK: tensor.empty
50
50
// CHECK: tensor.dim
51
51
// CHECK: memref.alloc
52
- // CHECK: bufferization.to_memref
52
+ // CHECK: bufferization.to_buffer
53
53
// CHECK: region.env_region "protect_copy_op"
54
54
// CHECK: memref.copy
55
55
// CHECK: tensor.from_elements
@@ -92,24 +92,24 @@ func.func @test_env() -> (tensor<16x16xf32, #GPUENV>, tensor<256xf32, #GPUENV>)
92
92
// COM: CHECK-NEXT: arith.constant 0 : index
93
93
// COM: CHECK-NEXT: tensor.dim
94
94
// COM: CHECK-NEXT: memref.alloc
95
- // COM: CHECK-NEXT: bufferization.to_memref
95
+ // COM: CHECK-NEXT: bufferization.to_buffer
96
96
// COM: CHECK-NEXT: region.env_region "protect_copy_op"
97
97
// COM: CHECK-NEXT: memref.copy
98
98
// COM: CHECK-NEXT: }
99
99
// COM: CHECK-NEXT: bufferization.to_tensor
100
- // COM: CHECK-NEXT: bufferization.to_memref
100
+ // COM: CHECK-NEXT: bufferization.to_buffer
101
101
// COM: CHECK-NEXT: arith.constant 0 : index
102
102
// COM: CHECK-NEXT: tensor.dim
103
103
// COM: CHECK-NEXT: memref.alloc
104
- // COM: CHECK-NEXT: bufferization.to_memref
104
+ // COM: CHECK-NEXT: bufferization.to_buffer
105
105
// COM: CHECK-NEXT: region.env_region "protect_copy_op"
106
106
// COM: CHECK-NEXT: memref.copy
107
107
// COM: CHECK-NEXT: }
108
108
// COM: CHECK-NEXT: bufferization.to_tensor
109
109
// COM: CHECK-NEXT: arith.constant 0 : index
110
110
// COM: CHECK-NEXT: tensor.dim
111
111
// COM: CHECK-NEXT: memref.alloc
112
- // COM: CHECK-NEXT: bufferization.to_memref
112
+ // COM: CHECK-NEXT: bufferization.to_buffer
113
113
// COM: CHECK-NEXT: region.env_region "protect_copy_op"
114
114
// COM: CHECK-NEXT: memref.copy
115
115
// COM: CHECK-NEXT: }
@@ -129,21 +129,21 @@ func.func @test_copy(%a: tensor<?xi64>) -> tensor<?xi64> {
129
129
// CHECK-NEXT: [[vc0:%.*]] = arith.constant 0 : index
130
130
// CHECK-NEXT: [[vdim:%.*]] = tensor.dim [[varg0]], [[vc0]] : tensor<?xi64>
131
131
// CHECK-NEXT: [[valloc:%.*]] = memref.alloc([[vdim]]) {alignment = 8 : i64} : memref<?xi64>
132
- // CHECK-NEXT: [[v0:%.*]] = bufferization.to_memref [[varg0]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
132
+ // CHECK-NEXT: [[v0:%.*]] = bufferization.to_buffer [[varg0]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
133
133
// CHECK-NEXT: region.env_region "protect_copy_op" {
134
134
// CHECK-NEXT: memref.copy [[v0]], [[valloc]] : memref<?xi64, strided<[?], offset: ?>> to memref<?xi64>
135
135
// CHECK: [[v1:%.*]] = bufferization.to_tensor [[valloc]] restrict writable : memref<?xi64> to tensor<?xi64>
136
136
// CHECK-NEXT: [[vc0_0:%.*]] = arith.constant 0 : index
137
137
// CHECK-NEXT: [[vdim_1:%.*]] = tensor.dim [[v1]], [[vc0_0]] : tensor<?xi64>
138
138
// CHECK-NEXT: [[valloc_2:%.*]] = memref.alloc([[vdim_1]]) {alignment = 8 : i64} : memref<?xi64>
139
- // CHECK-NEXT: [[v2:%.*]] = bufferization.to_memref [[v1]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
139
+ // CHECK-NEXT: [[v2:%.*]] = bufferization.to_buffer [[v1]] : tensor<?xi64> to memref<?xi64, strided<[?], offset: ?>>
140
140
// CHECK-NEXT: region.env_region "protect_copy_op" {
141
141
// CHECK-NEXT: memref.copy [[v2]], [[valloc_2]] : memref<?xi64, strided<[?], offset: ?>> to memref<?xi64>
142
142
// CHECK: [[v3:%.*]] = bufferization.to_tensor [[valloc_2]] restrict writable : memref<?xi64> to tensor<?xi64, #region.gpu_env<device = "XeGPU">>
143
143
// CHECK-NEXT: [[vc0_3:%.*]] = arith.constant 0 : index
144
144
// CHECK-NEXT: [[vdim_4:%.*]] = tensor.dim [[v3]], [[vc0_3]] : tensor<?xi64, #region.gpu_env<device = "XeGPU">>
145
145
// CHECK-NEXT: [[valloc_5:%.*]] = memref.alloc([[vdim_4]]) {alignment = 8 : i64} : memref<?xi64>
146
- // CHECK-NEXT: [[v4:%.*]] = bufferization.to_memref [[v3]] : tensor<?xi64, #region.gpu_env<device = "XeGPU">> to memref<?xi64, strided<[?], offset: ?>>
146
+ // CHECK-NEXT: [[v4:%.*]] = bufferization.to_buffer [[v3]] : tensor<?xi64, #region.gpu_env<device = "XeGPU">> to memref<?xi64, strided<[?], offset: ?>>
147
147
// CHECK-NEXT: region.env_region "protect_copy_op" {
148
148
// CHECK-NEXT: memref.copy [[v4]], [[valloc_5]] : memref<?xi64, strided<[?], offset: ?>> to memref<?xi64>
149
149
// CHECK: [[v5:%.*]] = bufferization.to_tensor [[valloc_5]] restrict writable : memref<?xi64> to tensor<?xi64>
@@ -223,7 +223,7 @@ func.func @test_cast_elemtype_copy(%arg0: tensor<16xi32>) -> tensor<16xi32> {
223
223
return %0 : tensor <16 xi32 >
224
224
}
225
225
// CHECK-LABEL: @test_cast_elemtype_copy
226
- // CHECK: bufferization.to_memref
226
+ // CHECK: bufferization.to_buffer
227
227
// CHECK: region.env_region "protect_copy_op"
228
228
// CHECK-NEXT: memref.copy
229
229
// CHECK-NEXT: }
0 commit comments