Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions mlir/include/mlir/Dialect/XeGPU/IR/XeGPUOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -327,8 +327,7 @@ def XeGPU_LoadNdOp : XeGPU_Op<"load_nd", [AllElementTypesMatch<["value", "Tensor
let hasVerifier = 1;
}

def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [AllShapesMatch<["value", "TensorDesc"]>,
AllElementTypesMatch<["value", "TensorDesc"]>]> {
def XeGPU_StoreNdOp : XeGPU_Op<"store_nd", [AllElementTypesMatch<["value", "TensorDesc"]>]> {
let summary = "stores a n-D block register region back to memory, currently only supports 2D";

let description = [{
Expand Down
76 changes: 56 additions & 20 deletions mlir/lib/Dialect/XeGPU/IR/XeGPUOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,32 @@ static bool isWriteHintOrNone(const CachePolicyAttr &attr) {
kind == CachePolicy::WRITE_BACK || kind == CachePolicy::WRITE_THROUGH;
}

// Validations for nd instruction arguments is successful if any of these are
// true:
// - tensor descriptor and the output vector shapes exactly match.
// - tensor descriptor has a sg_map attribute and the distributed vector shape
// matches the tensor descriptor shape when scaled using sg_map factors on
// each dimension.
static bool isArgShapesValid(ArrayRef<int64_t> descShape,
ArrayRef<int64_t> valShape, SGMapAttr sgMap) {
if (descShape == valShape)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what if descShape == valShape and sgMap is valid? does it mean the sgMap will be discarded?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, I didn't get the question. If the shapes are equal that means the nd load/store is valid and not distributed (if there's an sg map, only sg map's validity is checked by SGMapAttr::verify). If the shapes don't match then we either have a distributed or an invalid case. For distributed we check that ranks are the same, the sg map is present and the scaled values for each dimension match.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

NVM, it seems a dumb question. It seems to me that, there are currently 3 stages: 1) pure SIMD code, sgMap == null, and descShape == valShape. 2). a valid sgMap is attached to guide the lowering, but the code is not rewritten yet, so we have descShape == valShape and sgMap != null, but sgMap is not effective yet. 3). code is rewritten, so descShape != valShape, and sgMap != null, and sgMap is now effective.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, those are all valid states

return true;

if (!sgMap)
return false;

if (valShape.size() != descShape.size())
return false;

for (const auto &[factor, dim, expected] :
llvm::zip_equal(sgMap.getWiLayout(), valShape, descShape)) {
if (factor * dim != expected)
return false;
}

return true;
}

//===----------------------------------------------------------------------===//
// XeGPU_CreateNdDescOp
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -210,13 +236,13 @@ LogicalResult PrefetchNdOp::verify() {
return emitOpError("Expects a non-scattered TensorDesc.\n");

if (!isReadHintOrNone(getL1HintAttr()))
return emitOpError("invlid l1_hint: ") << getL1HintAttr();
return emitOpError("invalid l1_hint: ") << getL1HintAttr();

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for fixing these typos.

if (!isReadHintOrNone(getL2HintAttr()))
return emitOpError("invlid l2_hint: ") << getL2HintAttr();
return emitOpError("invalid l2_hint: ") << getL2HintAttr();

if (!isReadHintOrNone(getL3HintAttr()))
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
return emitOpError("invalid l3_hint: ") << getL3HintAttr();

return success();
}
Expand All @@ -238,13 +264,13 @@ LogicalResult LoadNdOp::verify() {
return emitOpError("Invalid result, it should be a VectorType.\n");

if (!isReadHintOrNone(getL1HintAttr()))
return emitOpError("invlid l1_hint: ") << getL1HintAttr();
return emitOpError("invalid l1_hint: ") << getL1HintAttr();

if (!isReadHintOrNone(getL2HintAttr()))
return emitOpError("invlid l2_hint: ") << getL2HintAttr();
return emitOpError("invalid l2_hint: ") << getL2HintAttr();

if (!isReadHintOrNone(getL3HintAttr()))
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
return emitOpError("invalid l3_hint: ") << getL3HintAttr();

auto array_len = tdescTy.getArrayLength();
auto tdescShape = getShapeOf(tdescTy);
Expand Down Expand Up @@ -280,8 +306,9 @@ LogicalResult LoadNdOp::verify() {
auto it = tdescShape.begin();
tdescShape.insert(it, array_len);
}
auto sgMap = tdescTy.getSGMapAttr();

if (tdescShape != valueShape)
if (!isArgShapesValid(tdescShape, valueShape, sgMap))
return emitOpError() << "Result shape doesn't match TensorDesc shape."
<< "The expected shape is " << makeString(tdescShape)
<< ". But the given shape is "
Expand All @@ -303,17 +330,26 @@ LogicalResult StoreNdOp::verify() {
return emitOpError("Expects a non-scattered TensorDesc.\n");

if (!valTy)
return emitOpError("Exepcting a VectorType result.\n");
return emitOpError("Expecting a VectorType result.\n");

if (!isWriteHintOrNone(getL1HintAttr()))
return emitOpError("invlid l1_hint: ") << getL1HintAttr();
return emitOpError("invalid l1_hint: ") << getL1HintAttr();

if (!isWriteHintOrNone(getL2HintAttr()))
return emitOpError("invlid l2_hint: ") << getL2HintAttr();
return emitOpError("invalid l2_hint: ") << getL2HintAttr();

if (!isWriteHintOrNone(getL3HintAttr()))
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
return emitOpError("invalid l3_hint: ") << getL3HintAttr();

auto tdescShape = getShapeOf(dstTy);
auto valueShape = getShapeOf(valTy);
auto sgMap = dstTy.getSGMapAttr();

if (!isArgShapesValid(tdescShape, valueShape, sgMap))
return emitOpError() << "Result shape doesn't match TensorDesc shape."
<< "The expected shape is " << makeString(tdescShape)
<< ". But the given shape is "
<< makeString(valueShape) << ".\n";
return success();
}

Expand Down Expand Up @@ -423,13 +459,13 @@ LogicalResult PrefetchOp::verify() {
return emitOpError("Expects a scattered TensorDesc.\n");

if (!isReadHintOrNone(getL1HintAttr()))
return emitOpError("invlid l1_hint: ") << getL1HintAttr();
return emitOpError("invalid l1_hint: ") << getL1HintAttr();

if (!isReadHintOrNone(getL2HintAttr()))
return emitOpError("invlid l2_hint: ") << getL2HintAttr();
return emitOpError("invalid l2_hint: ") << getL2HintAttr();

if (!isReadHintOrNone(getL3HintAttr()))
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
return emitOpError("invalid l3_hint: ") << getL3HintAttr();

return success();
}
Expand All @@ -446,13 +482,13 @@ LogicalResult LoadGatherOp::verify() {
return emitOpError("Expects a scattered TensorDesc.\n");

if (!isReadHintOrNone(getL1HintAttr()))
return emitOpError("invlid l1_hint: ") << getL1HintAttr();
return emitOpError("invalid l1_hint: ") << getL1HintAttr();

if (!isReadHintOrNone(getL2HintAttr()))
return emitOpError("invlid l2_hint: ") << getL2HintAttr();
return emitOpError("invalid l2_hint: ") << getL2HintAttr();

if (!isReadHintOrNone(getL3HintAttr()))
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
return emitOpError("invalid l3_hint: ") << getL3HintAttr();

auto tdescElemTy = tdescTy.getElementType();
auto valueElemTy = getElementType();
Expand Down Expand Up @@ -490,13 +526,13 @@ LogicalResult StoreScatterOp::verify() {
return emitOpError("Expects a scattered TensorDesc.\n");

if (!isWriteHintOrNone(getL1HintAttr()))
return emitOpError("invlid l1_hint: ") << getL1HintAttr();
return emitOpError("invalid l1_hint: ") << getL1HintAttr();

if (!isWriteHintOrNone(getL2HintAttr()))
return emitOpError("invlid l2_hint: ") << getL2HintAttr();
return emitOpError("invalid l2_hint: ") << getL2HintAttr();

if (!isWriteHintOrNone(getL3HintAttr()))
return emitOpError("invlid l3_hint: ") << getL3HintAttr();
return emitOpError("invalid l3_hint: ") << getL3HintAttr();

auto maskTy = getMaskType();
auto valueTy = getValueType();
Expand Down
24 changes: 24 additions & 0 deletions mlir/test/Dialect/XeGPU/XeGPUOps.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,17 @@ gpu.func @test_load_nd_vc_2(%src: memref<8x16xf16>) {
gpu.return
}

// load_nd args may have different shapes, validated against sg_map
// CHECK: func @test_load_nd_vc_3(%[[arg0:.*]]: memref<24x32xf32>) {
gpu.func @test_load_nd_vc_3(%src: memref<24x32xf32>) {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@chencha3 do you remember what does 'vc' suffix stand for? I feel like for SIMT-ish examples it is not relevant :)

// CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
%1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> ->
!xegpu.tensor_desc<8x16xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
// CHECK: %[[R1:.*]] = xegpu.load_nd %[[R0]] <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>> -> vector<8x1xf32>
%2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>> -> vector<8x1xf32>
gpu.return
}

// CHECK: func @test_store_nd_vc(%[[arg0:.*]]: memref<24x32xf16>) {
gpu.func @test_store_nd_vc(%dst: memref<24x32xf16>) {
// CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<24x32xf16>
Expand All @@ -108,6 +119,19 @@ gpu.func @test_store_nd_vc_2(%dst: memref<24x32xf16>) {
gpu.return
}

// store_nd args may have different shapes, validated against sg_map
// CHECK: func @test_store_nd_vc_3(%[[arg0:.*]]: memref<24x32xf16>) {
gpu.func @test_store_nd_vc_3(%src: memref<24x32xf16>) {
// CHECK: %[[C:.*]] = arith.constant dense<1.000000e+00> : vector<24x2xf16>
%1 = arith.constant dense<1.0>: vector<24x2xf16>
// CHECK: %[[R0:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<24x32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
%2 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf16> ->
!xegpu.tensor_desc<24x32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
// CHECK: xegpu.store_nd %[[C]], %[[R0]] <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}> : vector<24x2xf16>, !xegpu.tensor_desc<24x32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
xegpu.store_nd %1, %2 <{l1_hint = #xegpu.cache_hint<write_back>, l2_hint = #xegpu.cache_hint<uncached>}>: vector<24x2xf16>, !xegpu.tensor_desc<24x32xf16, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
gpu.return
}

// CHECK: gpu.func @test_create_update_nd_tdesc_vc(%[[arg0:.*]]: memref<24x32xf32>) {
gpu.func @test_create_update_nd_tdesc_vc(%src: memref<24x32xf32>) {
// CHECK: %[[REG:.*]] = xegpu.create_nd_tdesc %arg0[0, 0] : memref<24x32xf32> -> !xegpu.tensor_desc<8x16xf32>
Expand Down
21 changes: 15 additions & 6 deletions mlir/test/Dialect/XeGPU/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ func.func @test_create_nd_tdesc_vc_4(%src: memref<2x24x32xf32, 3>) {
// -----
func.func @test_prefetch_nd_vc_1(%src: memref<24x32xf16>) {
%1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<8x16xf16>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
// expected-error@+1 {{invalid l1_hint: #xegpu.cache_hint<write_back>}}
xegpu.prefetch_nd %1 <{l1_hint = #xegpu.cache_hint<write_back>}>: !xegpu.tensor_desc<8x16xf16>
return
}
Expand All @@ -51,7 +51,7 @@ func.func @test_prefetch_nd_vc_2(%src: memref<24xf16>) {
// -----
func.func @test_load_nd_vc_1(%src: memref<8x16xf16>) {
%1 = xegpu.create_nd_tdesc %src[0, 0] : memref<8x16xf16> -> !xegpu.tensor_desc<8x16xf16>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
// expected-error@+1 {{invalid l1_hint: #xegpu.cache_hint<write_back>}}
%2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint<write_back>}>
: !xegpu.tensor_desc<8x16xf16> -> vector<4x16x2xf16>
return
Expand All @@ -77,11 +77,20 @@ func.func @test_load_nd_vc_3(%src: memref<8x16xf16>) {
return
}

// -----
func.func @test_load_nd_vc_4(%src: memref<24x32xf32>) {
%1 = xegpu.create_nd_tdesc %src[0, 0] : memref<24x32xf32> ->
!xegpu.tensor_desc<8x16xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>>
// expected-error@+1 {{Result shape doesn't match TensorDesc shape.}}
%2 = xegpu.load_nd %1 <{l1_hint = #xegpu.cache_hint<cached>, l2_hint = #xegpu.cache_hint<uncached>}> : !xegpu.tensor_desc<8x16xf32, #xegpu.sg_map<wi_layout = [1, 16], wi_data = [1, 1]>> -> vector<8x2xf32>
return
}

// -----
func.func @test_store_nd_vc_1(%dst: memref<24x32xf16>) {
%1 = arith.constant dense<1.0>: vector<24x32xf16>
%2 = xegpu.create_nd_tdesc %dst[0, 0] : memref<24x32xf16> -> !xegpu.tensor_desc<24x32xf16>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<streaming>}}
// expected-error@+1 {{invalid l1_hint: #xegpu.cache_hint<streaming>}}
xegpu.store_nd %1, %2 <{l1_hint = #xegpu.cache_hint<streaming>}>: vector<24x32xf16>, !xegpu.tensor_desc<24x32xf16>
return
}
Expand Down Expand Up @@ -147,7 +156,7 @@ func.func @test_prefetch_vc_2(%src: ui64) {
%0 = arith.constant dense<[0, 8, 16, 24]> : vector<4xindex>
%1 = xegpu.create_tdesc %src, %0 : ui64, vector<4xindex>
-> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
// expected-error@+1 {{invalid l1_hint: #xegpu.cache_hint<write_back>}}
xegpu.prefetch %1 <{l1_hint = #xegpu.cache_hint<write_back>}>: !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
return
}
Expand All @@ -168,7 +177,7 @@ func.func @test_load_gather_vc_2(%src: ui64) {
%0 = arith.constant dense<1>: vector<4xi1>
%1 = xegpu.create_tdesc %src, %cst : ui64, vector<4xindex>
-> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<write_back>}}
// expected-error@+1 {{invalid l1_hint: #xegpu.cache_hint<write_back>}}
%2 = xegpu.load %1, %0 <{l1_hint = #xegpu.cache_hint<write_back>}>
: !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>, vector<4xi1>
-> vector<4x2xf32>
Expand All @@ -193,7 +202,7 @@ func.func @test_store_scatter_vc_2(%src: ui64) {
%1 = arith.constant dense<2.9>: vector<4x2xf32>
%2 = xegpu.create_tdesc %src, %cst : ui64, vector<4xindex>
-> !xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>
// expected-error@+1 {{invlid l1_hint: #xegpu.cache_hint<streaming>}}
// expected-error@+1 {{invalid l1_hint: #xegpu.cache_hint<streaming>}}
xegpu.store %1, %2, %0 <{l1_hint = #xegpu.cache_hint<streaming>}> : vector<4x2xf32>,
!xegpu.tensor_desc<4x2xf32, #xegpu.scatter_tdesc_attr<chunk_size = 2>>, vector<4xi1>
return
Expand Down
Loading