Skip to content

Commit 96cb62b

Browse files
committed
address comments
1 parent 1d3d12c commit 96cb62b

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

mlir/include/mlir/Dialect/XeGPU/Transforms/Transforms.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@ namespace xegpu {
1717
/// Options to control the XeGPU unrolling. Its main purpose is to
1818
/// provide a way to customize the native shape of the operation.
1919
struct UnrollOptions {
20-
using FilterConstraintFnType = std::function<LogicalResult(Operation *op)>;
2120
/// Callback function that indicates whether vector unrolling should be
2221
/// attempted on the operation.
22+
using FilterConstraintFnType = std::function<LogicalResult(Operation *op)>;
2323
FilterConstraintFnType filterConstraint = nullptr;
2424
UnrollOptions &setFilterConstraint(FilterConstraintFnType constraint) {
2525
filterConstraint = std::move(constraint);

mlir/lib/Dialect/XeGPU/Transforms/XeGPUUnroll.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -136,9 +136,9 @@ struct UnrollPattern : public OpRewritePattern<SourceOp> {
136136
}
137137

138138
private:
139-
const char *const packAttrName = "__xetile_blocking_pack__";
140-
const char *const unpackAttrName = "__xetile_blocking_unpack__";
141-
const char *const blockAttrName = "__xetile_blocking_inner_block__";
139+
const char *const packAttrName = "__xegpu_blocking_pack__";
140+
const char *const unpackAttrName = "__xegpu_blocking_unpack__";
141+
const char *const blockAttrName = "__xegpu_blocking_tile_shape__";
142142

143143
xegpu::UnrollOptions options;
144144
};

mlir/test/Dialect/XeGPU/xegpu-unroll-patterns.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ gpu.module @test {
99
// CHECK-SAME: !xegpu.tensor_desc<8x16xf32>, !xegpu.tensor_desc<8x16xf32>,
1010
// CHECK-SAME: !xegpu.tensor_desc<8x16xf32>, !xegpu.tensor_desc<8x16xf32>,
1111
// CHECK-SAME: !xegpu.tensor_desc<8x16xf32>, !xegpu.tensor_desc<8x16xf32>
12-
// CHECK-SAME: to !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> {__xetile_blocking_inner_block__ = array<i64: 8, 16>, __xetile_blocking_unpack__}
12+
// CHECK-SAME: to !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> {__xegpu_blocking_tile_shape__ = array<i64: 8, 16>, __xegpu_blocking_unpack__}
1313
gpu.func @test_create_nd_tdesc(%src: memref<24x32xf32>) -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>> {
1414
%tdesc = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
1515
gpu.return %tdesc : !xegpu.tensor_desc<24x32xf32, #xegpu.layout<inst_data = [8, 16]>>
@@ -22,7 +22,7 @@ gpu.module @test {
2222
// CHECK-COUNT-2: [[tdesc:%.+]] = xegpu.create_nd_tdesc [[arg0]][{{.*}}] : memref<64xf32> -> !xegpu.tensor_desc<16xf32>
2323
// CHECK: [[cast:%.+]] = builtin.unrealized_conversion_cast
2424
// CHECK-SAME: !xegpu.tensor_desc<16xf32>, !xegpu.tensor_desc<16xf32>
25-
// CHECK-SAME: to !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>> {__xetile_blocking_inner_block__ = array<i64: 16>, __xetile_blocking_unpack__}
25+
// CHECK-SAME: to !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>> {__xegpu_blocking_tile_shape__ = array<i64: 16>, __xegpu_blocking_unpack__}
2626
gpu.func @test_create_nd_tdesc_1d(%src: memref<64xf32>) -> !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>> {
2727
%tdesc = xegpu.create_nd_tdesc %src[0] : memref<64xf32> -> !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>
2828
gpu.return %tdesc : !xegpu.tensor_desc<32xf32, #xegpu.layout<inst_data = [16]>>

0 commit comments

Comments
 (0)