-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[mlir][XeGPU] Add optional layout attribute to LoadGather StoreScatter ops #163414
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -843,7 +843,8 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> { | |
AnyTypeOf<[XeGPU_MaskType, I1]>:$mask, OptionalAttr<I64Attr>:$chunk_size, | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l1_hint, | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l2_hint, | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l3_hint); | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l3_hint, | ||
OptionalAttr<DistributeLayoutAttr>:$layout); | ||
let results = (outs AnyTypeOf<[XeGPU_ValueType, XeGPU_ScalarType]>:$value); | ||
|
||
let extraClassDeclaration = extraBaseClassDeclaration # [{ | ||
|
@@ -852,6 +853,16 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> { | |
return getSource().getType(); | ||
} | ||
|
||
xegpu::DistributeLayoutAttr getDistributeLayout() { | ||
xegpu::DistributeLayoutAttr layout = nullptr; | ||
if (auto tdescType = getTensorDescType()) { | ||
layout = tdescType.getLayoutAttr(); | ||
} | ||
if (!layout) | ||
layout = getLayoutAttr(); | ||
return layout; | ||
} | ||
|
||
TypedValue<xegpu::TensorDescType> getTensorDesc() { | ||
if (auto tdescType = getTensorDescType()) { | ||
return llvm::cast<TypedValue<xegpu::TensorDescType>>(getSource()); | ||
|
@@ -895,7 +906,19 @@ def XeGPU_LoadGatherOp : XeGPU_Op<"load", [MemoryEffects<[MemRead]>]> { | |
"IntegerAttr": $chunk_size, | ||
"xegpu::CachePolicyAttr": $l1_hint, | ||
"xegpu::CachePolicyAttr": $l2_hint, | ||
"xegpu::CachePolicyAttr": $l3_hint)> | ||
"xegpu::CachePolicyAttr": $l3_hint)>, | ||
OpBuilder<(ins "Type": $value, "Value": $source, "Value": $mask, | ||
"xegpu::CachePolicyAttr": $l1_hint, | ||
"xegpu::CachePolicyAttr": $l2_hint, | ||
"xegpu::CachePolicyAttr": $l3_hint, | ||
"xegpu::DistributeLayoutAttr": $layout)>, | ||
OpBuilder<(ins "Type": $value, "Value": $source, | ||
"ArrayRef<OpFoldResult>": $offsets, "Value": $mask, | ||
"IntegerAttr": $chunk_size, | ||
"xegpu::CachePolicyAttr": $l1_hint, | ||
"xegpu::CachePolicyAttr": $l2_hint, | ||
"xegpu::CachePolicyAttr": $l3_hint, | ||
"xegpu::DistributeLayoutAttr": $layout)> | ||
]; | ||
|
||
let hasVerifier = 1; | ||
|
@@ -979,7 +1002,8 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> { | |
AnyTypeOf<[XeGPU_MaskType, I1]>:$mask, OptionalAttr<I64Attr>:$chunk_size, | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l1_hint, | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l2_hint, | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l3_hint); | ||
OptionalAttr<XeGPU_CacheHintAttr>:$l3_hint, | ||
OptionalAttr<DistributeLayoutAttr>:$layout); | ||
|
||
let extraClassDeclaration = extraBaseClassDeclaration#[{ | ||
Type getDestType() { | ||
|
@@ -993,6 +1017,16 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> { | |
return TypedValue<xegpu::TensorDescType>(); | ||
} | ||
|
||
xegpu::DistributeLayoutAttr getDistributeLayout() { | ||
xegpu::DistributeLayoutAttr layout = nullptr; | ||
if (auto tdescType = getTensorDescType()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. no need to support the tdesc form. |
||
layout = tdescType.getLayoutAttr(); | ||
} | ||
if (!layout) | ||
layout = getLayoutAttr(); | ||
return layout; | ||
} | ||
|
||
xegpu::TensorDescType getTensorDescType() { | ||
return dyn_cast<xegpu::TensorDescType>(getDestType()); | ||
} | ||
|
@@ -1030,7 +1064,19 @@ def XeGPU_StoreScatterOp : XeGPU_Op<"store", [MemoryEffects<[MemWrite]>]> { | |
"IntegerAttr": $chunk_size, | ||
"xegpu::CachePolicyAttr": $l1_hint, | ||
"xegpu::CachePolicyAttr": $l2_hint, | ||
"xegpu::CachePolicyAttr": $l3_hint)> | ||
"xegpu::CachePolicyAttr": $l3_hint)>, | ||
OpBuilder<(ins "Value": $value, "Value": $dest, "Value": $mask, | ||
"xegpu::CachePolicyAttr": $l1_hint, | ||
"xegpu::CachePolicyAttr": $l2_hint, | ||
"xegpu::CachePolicyAttr": $l3_hint, | ||
"xegpu::DistributeLayoutAttr": $layout)>, | ||
OpBuilder<(ins "Value": $value, "Value": $dest, | ||
"ArrayRef<OpFoldResult>": $offsets, "Value": $mask, | ||
"IntegerAttr": $chunk_size, | ||
"xegpu::CachePolicyAttr": $l1_hint, | ||
"xegpu::CachePolicyAttr": $l2_hint, | ||
"xegpu::CachePolicyAttr": $l3_hint, | ||
"xegpu::DistributeLayoutAttr": $layout)> | ||
]; | ||
|
||
let hasVerifier = 1; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -816,7 +816,7 @@ void LoadGatherOp::build(OpBuilder &builder, OperationState &state, | |
xegpu::CachePolicyAttr l2_hint, | ||
xegpu::CachePolicyAttr l3_hint) { | ||
build(builder, state, valueType, source, Value(), mask, IntegerAttr(), | ||
l1_hint, l2_hint, l3_hint); | ||
l1_hint, l2_hint, l3_hint, /*layout=*/nullptr); | ||
} | ||
|
||
void LoadGatherOp::build(OpBuilder &builder, OperationState &state, | ||
|
@@ -832,7 +832,34 @@ void LoadGatherOp::build(OpBuilder &builder, OperationState &state, | |
auto offset = vector::FromElementsOp::create(builder, loc, type, values); | ||
|
||
build(builder, state, valueType, source, offset, mask, chunk_size, l1_hint, | ||
l2_hint, l3_hint); | ||
l2_hint, l3_hint, /*layout=*/nullptr); | ||
} | ||
|
||
void LoadGatherOp::build(OpBuilder &builder, OperationState &state, | ||
Type valueType, Value source, Value mask, | ||
xegpu::CachePolicyAttr l1_hint, | ||
xegpu::CachePolicyAttr l2_hint, | ||
xegpu::CachePolicyAttr l3_hint, | ||
DistributeLayoutAttr layout) { | ||
build(builder, state, valueType, source, Value(), mask, IntegerAttr(), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why we have this form: load without offsets? |
||
l1_hint, l2_hint, l3_hint, layout); | ||
} | ||
|
||
void LoadGatherOp::build(OpBuilder &builder, OperationState &state, | ||
Type valueType, Value source, | ||
ArrayRef<OpFoldResult> offsets, Value mask, | ||
IntegerAttr chunk_size, xegpu::CachePolicyAttr l1_hint, | ||
xegpu::CachePolicyAttr l2_hint, | ||
xegpu::CachePolicyAttr l3_hint, | ||
DistributeLayoutAttr layout) { | ||
auto loc = source.getLoc(); | ||
int64_t size = static_cast<int64_t>(offsets.size()); | ||
auto type = VectorType::get(size, builder.getIndexType()); | ||
auto values = getValueOrCreateConstantIndexOp(builder, loc, offsets); | ||
auto offset = vector::FromElementsOp::create(builder, loc, type, values); | ||
|
||
build(builder, state, valueType, source, offset, mask, chunk_size, l1_hint, | ||
l2_hint, l3_hint, layout); | ||
} | ||
|
||
//===----------------------------------------------------------------------===// | ||
|
@@ -883,7 +910,7 @@ void StoreScatterOp::build(OpBuilder &builder, OperationState &state, | |
xegpu::CachePolicyAttr l2_hint, | ||
xegpu::CachePolicyAttr l3_hint) { | ||
build(builder, state, value, dest, Value(), mask, IntegerAttr(), l1_hint, | ||
l2_hint, l3_hint); | ||
l2_hint, l3_hint, /*layout=*/nullptr); | ||
} | ||
|
||
void StoreScatterOp::build(OpBuilder &builder, OperationState &state, | ||
|
@@ -901,7 +928,33 @@ void StoreScatterOp::build(OpBuilder &builder, OperationState &state, | |
|
||
// Call the correct builder overload that does not expect result types. | ||
build(builder, state, value, dest, offset, mask, chunk_size, l1_hint, l2_hint, | ||
l3_hint); | ||
l3_hint, /*layout=*/nullptr); | ||
} | ||
|
||
void StoreScatterOp::build(OpBuilder &builder, OperationState &state, | ||
Value value, Value dest, Value mask, | ||
xegpu::CachePolicyAttr l1_hint, | ||
xegpu::CachePolicyAttr l2_hint, | ||
xegpu::CachePolicyAttr l3_hint, | ||
DistributeLayoutAttr layout) { | ||
build(builder, state, value, dest, Value(), mask, IntegerAttr(), l1_hint, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. also no offsets? |
||
l2_hint, l3_hint, layout); | ||
} | ||
|
||
void StoreScatterOp::build( | ||
OpBuilder &builder, OperationState &state, Value value, Value dest, | ||
ArrayRef<OpFoldResult> offsets, Value mask, IntegerAttr chunk_size, | ||
xegpu::CachePolicyAttr l1_hint, xegpu::CachePolicyAttr l2_hint, | ||
xegpu::CachePolicyAttr l3_hint, DistributeLayoutAttr layout) { | ||
auto loc = dest.getLoc(); | ||
int64_t size = static_cast<int64_t>(offsets.size()); | ||
auto type = VectorType::get(size, builder.getIndexType()); | ||
auto values = getValueOrCreateConstantIndexOp(builder, loc, offsets); | ||
auto offset = vector::FromElementsOp::create(builder, loc, type, values); | ||
|
||
// Call the correct builder overload that does not expect result types. | ||
build(builder, state, value, dest, offset, mask, chunk_size, l1_hint, l2_hint, | ||
l3_hint, layout); | ||
} | ||
|
||
//===----------------------------------------------------------------------===// | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -97,7 +97,7 @@ func.func @extf_truncf(%arg0: !xegpu.tensor_desc<8x16xf16>, %arg1: !xegpu.tensor | |
// CHECK-NEXT: %[[CST0:.*]] = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<true> : vector<16xi1> | ||
// CHECK-NEXT: %[[T2:.*]] = xegpu.create_tdesc %[[ARG1]], %[[CST]] : memref<256xf16>, vector<16xindex> -> | ||
// CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.scatter_tdesc_attr<chunk_size = 16 : i64>, #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]>> | ||
// CHECK-NEXT: %{{.*}} = xegpu.load %[[T2]], %[[CST0]] {layout_result_0 = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]>} | ||
// CHECK-NEXT: %{{.*}} = xegpu.load %[[T2]], %[[CST0]] <{layout = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]>}> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think you need to change the layout here. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You may create separate test that test how the propagation honor the user's setting. Say, user to set a different layout like Once user set it, the propagation should honor user's setting instead of using its default one. Note that these xegpu.load variant is to be deprecated. Please just focus on xegpu.load variant that has memref as input. |
||
// CHECK-SAME: !xegpu.tensor_desc<16x16xf16, #xegpu.scatter_tdesc_attr<chunk_size = 16 : i64>, #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]>>, vector<16xi1> -> vector<16x16xf16> | ||
func.func @load_gather_with_chunksize(%arg0: memref<8x16xf16>, %arg1: memref<256xf16>, %arg2: memref<8x16xf32>) { | ||
%c0 = arith.constant 0 : index | ||
|
@@ -122,7 +122,7 @@ func.func @load_gather_with_chunksize(%arg0: memref<8x16xf16>, %arg1: memref<256 | |
// CHECK-NEXT: %[[CST0:.*]] = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<true> : vector<16xi1> | ||
// CHECK-NEXT: %[[T0:.*]] = xegpu.create_tdesc %[[ARG0]], %[[CST]] : memref<256xf32>, vector<16xindex> -> | ||
// CHECK-SAME: !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>, #xegpu.layout<lane_layout = [16], lane_data = [1]>> | ||
// CHECK-NEXT: %{{.*}} = xegpu.load %[[T0]], %[[CST0]] {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} : | ||
// CHECK-NEXT: %{{.*}} = xegpu.load %[[T0]], %[[CST0]] <{layout = #xegpu.layout<lane_layout = [16], lane_data = [1]>}> : | ||
// CHECK-SAME: !xegpu.tensor_desc<16xf32, #xegpu.scatter_tdesc_attr<>, #xegpu.layout<lane_layout = [16], lane_data = [1]>>, vector<16xi1> -> vector<16xf32> | ||
func.func @load_gather_1d(%arg0: memref<256xf32>, %arg1: !xegpu.tensor_desc<16xf32>) { | ||
%cst = arith.constant dense<[0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240]> : vector<16xindex> | ||
|
@@ -167,8 +167,8 @@ func.func @store_scatter_1d(%arg0: vector<16xf32>, %arg1: memref<256xf32>) { | |
// CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]: memref<256xf16>) { | ||
// CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<true> : vector<16xi1> | ||
// CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<12> : vector<16xindex> | ||
// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64}> | ||
// CHECK-SAME: {layout_result_0 = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]>} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64, | ||
// CHECK-SAME: layout = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]>}> : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
// CHECK: xegpu.store %[[LOAD_VEC]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] <{chunk_size = 8 : i64}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> | ||
func.func @scatter_ops_chunksize(%src: memref<256xf16>) { | ||
%1 = arith.constant dense<1>: vector<16xi1> | ||
|
@@ -186,7 +186,7 @@ func.func @scatter_ops_chunksize(%src: memref<256xf16>) { | |
// CHECK: %[[MASK:.*]] = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<true> : vector<16xi1> | ||
// CHECK: %[[OFFSETS:.*]] = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<12> : vector<16xindex> | ||
// CHECK: %[[LOAD_VEC:.*]] = xegpu.load %[[ARG0]][%[[OFFSETS]]], %[[MASK]] | ||
// CHECK-SAME: {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16xf16> | ||
// CHECK-SAME: <{layout = #xegpu.layout<lane_layout = [16], lane_data = [1]>}> : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16xf16> | ||
// CHECK: xegpu.store %[[LOAD_VEC]], %[[ARG0]][%[[OFFSETS]]], %[[MASK]] : vector<16xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> | ||
func.func @scatter_ops(%src: memref<256xf16>) { | ||
%1 = arith.constant dense<1>: vector<16xi1> | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -136,9 +136,9 @@ gpu.module @xevm_module{ | |
%1 = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<1>: vector<16xi1> | ||
%offset = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<12> : vector<16xindex> | ||
%loaded = scf.if %pred -> (vector<16x8xf16>) { | ||
%3 = xegpu.load %src[%offset], %1 <{chunk_size=8}> { | ||
layout_result_0 = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]> | ||
} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
%3 = xegpu.load %src[%offset], %1 <{chunk_size=8, | ||
layout = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I would leave these two tests as is. |
||
}> : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
scf.yield %3 : vector<16x8xf16> | ||
} else { | ||
%3 = arith.constant { | ||
|
@@ -168,9 +168,9 @@ gpu.module @xevm_module{ | |
%1 = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<1>: vector<16xi1> | ||
%offset = arith.constant {layout_result_0 = #xegpu.layout<lane_layout = [16], lane_data = [1]>} dense<12> : vector<16xindex> | ||
scf.if %pred { | ||
%3 = xegpu.load %src[%offset], %1 <{chunk_size=8}> { | ||
layout_result_0 = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]> | ||
} : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
%3 = xegpu.load %src[%offset], %1 <{chunk_size=8, | ||
layout = #xegpu.layout<lane_layout = [16, 1], lane_data = [1, 2]> | ||
}> : memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
xegpu.store %3, %src[%offset], %1 <{chunk_size=8}> : vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> | ||
} | ||
gpu.return | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we are deprecating the load_gather w/ tdesc format, so no need to check it here.